repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
jbeezley/SMQTK | [
"fc9404b69150ef44f24423844bc80735c0c2b669"
] | [
"python/smqtk/bin/make_train_test_sets.py"
] | [
"#!/usr/bin/env python\nimport argparse\nimport csv\nimport itertools\nimport os\nimport re\n\nimport numpy\nimport six\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\n\nclass KeyToFilepath(argparse.Action):\n \"\"\"\n Custom argparse action for parsing out positional class-to-filepath\n arguments.\n \"\"\"\n re_key2path = re.compile('(\\w+)=(.+)', flags=re.UNICODE)\n\n # noinspection PyUnusedLocal,PyShadowingBuiltins\n def __init__(self, option_strings, dest, nargs=None, const=None,\n default=None, type=None, choices=None, required=False,\n help=None, metavar=None):\n \"\"\"\n Custom constructor to enforce that `nargs` is always `+`.\n \"\"\"\n super(KeyToFilepath, self).__init__(option_strings,\n dest, \"+\",\n const, default, type,\n choices, required,\n help, metavar)\n\n # noinspection PyShadowingNames\n def __call__(self, parser, namespace, values, option_string=None):\n d = dict()\n for a in values:\n m = self.re_key2path.match(a)\n if not m:\n raise ValueError(\"Invalid argument syntax: '%s'\" % a)\n cls_name = m.group(1)\n filepath = m.group(2)\n if not os.path.isfile(filepath):\n raise ValueError(\n \"Invalid filepath '%s' given for argument: '%s'\"\n % (filepath, a)\n )\n # Read in UIDs from lines in CSV file\n d[cls_name] = filepath\n setattr(namespace, self.dest, d)\n\n\ndef cli_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('cls_to_cmdProcessedCsv',\n nargs='+',\n help=\"Series of `label=filepath` arguments where we \"\n \"interpret the string value before the `=` sign \"\n \"as the class label and the value after to be the \"\n \"path to the `compute_many_descriptors` output \"\n \"CSV of files successfully processed.\",\n action=KeyToFilepath)\n parser.add_argument('-t', '--test-percent',\n type=float,\n default=0.3,\n help=\"Percentage of images per class to split for \"\n \"testing. Should be in the [0,1] range. Selects \"\n \"~30%% by default.\")\n parser.add_argument('--rand-state', type=int, default=0,\n help='Random state initialization integer. Default is '\n '0.')\n parser.add_argument('--output-base',\n default=\"classifier\",\n help=\"String base to output files. We will generate 3 \"\n \"files: '<>.all_uuids.csv', '<>.train_uuids.csv' \"\n \"and '<>.test_uuids.csv'. \"\n \"Default is 'classifier'.\")\n return parser\n\n\ndef main():\n args = cli_parser().parse_args()\n\n TEST_PERCENT = args.test_percent\n RAND_STATE = args.rand_state\n OUTPUT_BASE = args.output_base\n CLS_TO_FILEPATH = args.cls_to_cmdProcessedCsv\n\n # Parse CSV files associated to classes\n cls_uuids = {}\n for cls, filepath in six.iteritems(CLS_TO_FILEPATH):\n cls_uuids[cls] = sorted({r[1] for r in csv.reader(open(filepath))})\n\n cls_list = sorted(cls_uuids)\n all_label, all_uuids = \\\n zip(*[(cls_name, uuid)\n for cls_name in cls_list\n for uuid in cls_uuids[cls_name]])\n # Transform into numpy array for multi-index access later\n all_label = numpy.array(all_label)\n all_uuids = numpy.array(all_uuids)\n\n # ``n_splits=1`` -- Only make one train/test split\n sss = StratifiedShuffleSplit(n_splits=1, test_size=TEST_PERCENT,\n random_state=RAND_STATE)\n\n # Get array of index position values of ``all_uuids`` of uuids to use for\n # train and test sets, respectively.\n train_index, test_index = \\\n iter(sss.split(numpy.zeros(len(all_label)), all_label)).next()\n uuids_train, uuids_test = all_uuids[train_index], all_uuids[test_index]\n label_train, label_test = all_label[train_index], all_label[test_index]\n\n print(\"Train:\")\n for cls_label in cls_list:\n cnt = label_train.tolist().count(cls_label)\n print(\"- %s:\\t%d\\t(~%.2f %% of total class examples)\"\n % (cls_label, cnt, float(cnt) / len(cls_uuids[cls_label]) * 100))\n print(\"Test:\")\n for cls_label in cls_list:\n cnt = label_test.tolist().count(cls_label)\n print(\"- %s:\\t%d\\t(~%.2f %% of total class examples)\"\n % (cls_label, cnt, float(cnt) / len(cls_uuids[cls_label]) * 100))\n\n # Save out files for use with ``classifier_model_validation``\n with open('%s.all_uuids.csv' % OUTPUT_BASE, 'w') as f:\n w = csv.writer(f)\n for uuid, label in itertools.izip(all_uuids, all_label):\n w.writerow([uuid, label])\n\n with open('%s.train_uuids.csv' % OUTPUT_BASE, 'w') as f:\n w = csv.writer(f)\n for uuid, label in itertools.izip(uuids_train, label_train):\n w.writerow([uuid, label])\n\n with open('%s.test_uuids.csv' % OUTPUT_BASE, 'w') as f:\n w = csv.writer(f)\n for uuid, label in itertools.izip(uuids_test, label_test):\n w.writerow([uuid, label])\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"sklearn.model_selection.StratifiedShuffleSplit"
]
] |
GeoscienceAustralia/geophys_utils | [
"e5580f582f3e42f347d71b140dd9213f784e2fde"
] | [
"geophys_utils/netcdf_converter/aseg_gdf_utils.py"
] | [
"'''\nFunctions to work with ASEG-GDF format string\nRefer to https://www.aseg.org.au/sites/default/files/pdf/ASEG-GDF2-REV4.pdf for further information\n\nCreated on 19 Jun. 2018\n\n@author: u76345\n'''\n\nimport re\nimport numpy as np\nfrom collections import OrderedDict\nfrom math import ceil, log10\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO) # Logging level for this module\n\ndef dfrexp(f):\n '''\n Decimal version of frexp or np.frexp function to return mantissa & exponent\n @param f: Floating point scalar or array\n @return fman: Scalar or array decimal mantissa between 0.0 and 1.0 \n @return fexp: Scalar or array decimal exponent\n '''\n # Compute decimal exponent\n if type(f) == np.ndarray:\n fexp = np.zeros(shape=f.shape, dtype='int32')\n fexp[f != 0] = np.ceil(np.log10(np.abs(f[f != 0]))).astype('int32')\n else: # Scalar\n fexp = int(ceil(log10(abs(f)))) if f != 0 else 0\n \n # Compute decimal mantissa between 0.0 and 1.0\n fman = f/10.0**fexp\n \n logger.debug('fman: {}'.format(fman))\n logger.debug('fexp: {}'.format(fexp))\n \n return fman, fexp\n\n\n# Approximate maximum number of significant decimal figures for each signed datatype\nSIG_FIGS = OrderedDict([('uint8', 4), # 128\n ('uint16', 10), # 32768\n ('uint32', 19), # 2147483648 - should be 9, but made 10 because int64 is unsupported\n ('uint64', 30), # 9223372036854775808 - Not supported in netCDF3 or netCDF4-Classic\n ('int8', 2), # 128\n ('int16', 4), # 32768\n ('int32', 10), # 2147483648 - should be 9, but made 10 because int64 is unsupported\n ('int64', 19), # 9223372036854775808 - Not supported in netCDF3 or netCDF4-Classic\n # https://en.wikipedia.org/wiki/Floating-point_arithmetic#IEEE_754:_floating_point_in_modern_computers\n ('float32', 7), # 7.2\n ('float64', 35) # 15.9 - should be 16, but made 35 to support unrealistic precision specifications\n ]\n )\n\nDTYPE_REDUCTION_LISTS = [['int64', 'int32', 'int16', 'int8'], # Integer dtypes\n ['uint64', 'uint32', 'uint16', 'uint8'], # Unsigned integer dtypes\n ['float64', 'float32'] #, 'int16', 'int8'] # Floating point dtypes - do NOT try casting to integer types\n ]\n \nASEG_DTYPE_CODE_MAPPING = {'uint8': 'I',\n 'uint16': 'I',\n 'uint32': 'I',\n 'uint64': 'I',\n 'int8': 'I',\n 'int16': 'I',\n 'int32': 'I',\n 'int64': 'I',\n 'float32': 'E', # real in exponent form\n 'float64': 'D', # double precision real in exponent form\n 'str': 'A'\n }\n\ndef decode_aseg_gdf_format(aseg_gdf_format):\n '''\n Function to decode ASEG-GDF format string\n @param aseg_gdf_format: ASEG-GDF format string\n\n @return columns: Number of columns (i.e. 1 for 1D data, or read from format string for 2D data)\n @return aseg_dtype_code: ASEG-GDF data type character, e.g. \"F\" or \"I\"\n @return width_specifier: Width of field in number of characters read from format string\n @return decimal_places: Number of fractional digits read from format string \n '''\n if not aseg_gdf_format:\n raise BaseException('No ASEG-GDF format string to decode') \n\n match = re.match('(\\d+)*(\\w)(\\d+)\\.*(\\d+)*', aseg_gdf_format)\n \n if not match:\n raise BaseException('Invalid ASEG-GDF format string {}'.format(aseg_gdf_format)) \n \n columns = int(match.group(1)) if match.group(1) is not None else 1\n aseg_dtype_code = match.group(2).upper()\n width_specifier = int(match.group(3))\n decimal_places = int(match.group(4)) if match.group(4) is not None else 0\n \n logger.debug('aseg_gdf_format: {}, columns: {}, aseg_dtype_code: {}, width_specifier: {}, decimal_places: {}'.format(aseg_gdf_format, \n columns, \n aseg_dtype_code, \n width_specifier, \n decimal_places\n )\n ) \n return columns, aseg_dtype_code, width_specifier, decimal_places \n\ndef aseg_gdf_format2dtype(aseg_gdf_format):\n '''\n Function to return Python data type string and other precision information from ASEG-GDF format string\n @param aseg_gdf_format: ASEG-GDF format string\n\n @return dtype: Data type string, e.g. int8 or float32\n @return columns: Number of columns (i.e. 1 for 1D data, or read from format string for 2D data)\n @return width_specifier: Width of field in number of characters read from format string\n @return decimal_places: Number of fractional digits read from format string \n '''\n columns, aseg_dtype_code, width_specifier, decimal_places = decode_aseg_gdf_format(aseg_gdf_format)\n dtype = None # Initially unknown datatype\n \n # Determine type and size for required significant figures\n # Integer type - N.B: Only signed types available\n if aseg_dtype_code == 'I':\n assert not decimal_places, 'Integer format cannot be defined with fractional digits'\n for test_dtype, sig_figs in SIG_FIGS.items():\n if test_dtype.startswith('int') and sig_figs >= width_specifier:\n dtype = test_dtype\n break\n assert dtype, 'Invalid width_specifier of {}'.format(width_specifier) \n \n # Floating point type - use approximate sig. figs. to determine length\n #TODO: Remove 'A' after string field handling has been properly implemented\n elif aseg_dtype_code in ['D', 'E', 'F']: # Floating point\n for test_dtype, sig_figs in SIG_FIGS.items():\n if test_dtype.startswith('float') and sig_figs >= width_specifier-2: # Allow for sign and decimal place\n dtype = test_dtype\n break\n assert dtype, 'Invalid floating point format of {}.{}'.format(width_specifier, decimal_places) \n \n elif aseg_dtype_code == 'A':\n assert not decimal_places, 'String format cannot be defined with fractional digits'\n dtype = '<U{}'.format(width_specifier) # Numpy fixed-length string type\n \n else:\n raise BaseException('Unhandled ASEG-GDF dtype code {}'.format(aseg_dtype_code))\n \n logger.debug('aseg_dtype_code: {}, columns: {}, width_specifier: {}, decimal_places: {}'.format(dtype, \n columns, \n width_specifier, \n decimal_places\n )\n ) \n return dtype, columns, width_specifier, decimal_places\n\n\ndef variable2aseg_gdf_format(array_variable, decimal_places=None):\n '''\n Function to return ASEG-GDF format string and other info from data array or netCDF array variable\n @param array_variable: data array or netCDF array variable\n @param decimal_places: Number of decimal places to respect, or None for value derived from datatype and values\n \n @return aseg_gdf_format: ASEG-GDF format string\n @return dtype: Data type string, e.g. int8 or float32\n @return columns: Number of columns (i.e. 1 for 1D data, or second dimension size for 2D data)\n @return width_specifier: Width of field in number of characters\n @return decimal_places: Number of fractional digits (derived from datatype sig. figs - width_specifier)\n @param python_format: Python Formatter string for fixed-width output\n '''\n if len(array_variable.shape) <= 1: # 1D variable or scalar\n columns = 1\n elif len(array_variable.shape) == 2: # 2D variable\n columns = array_variable.shape[1]\n else:\n raise BaseException('Unable to handle arrays with dimensionality > 2')\n \n data_array = array_variable[:]\n \n # Try to determine the dtype string from the variable and data_array type\n if not len(array_variable.shape): # Scalar\n dtype = type(data_array).__name__\n if dtype == 'str':\n width_specifier = len(data_array) + 1\n decimal_places = 0 \n elif dtype == 'ndarray': # Single-element array\n dtype = str(array_variable.dtype)\n data = np.asscalar(data_array)\n\n sig_figs = SIG_FIGS[dtype] + 1 # Look up approximate significant figures and add 1\n sign_width = 1 if data < 0 else 0\n integer_digits = ceil(log10(np.abs(data) + 1.0))\n else: # Array\n dtype = str(array_variable.dtype)\n if dtype in ['str', \"<class 'str'>\"]: # String array or string array variable\n dtype = 'str'\n width_specifier = max([len(string.strip()) for string in data_array]) + 1\n decimal_places = 0\n \n else: # Numeric datatype array\n # Include fill value if required\n if type(data_array) == np.ma.core.MaskedArray:\n logger.debug('Array is masked. Including fill value.')\n data_array = data_array.data\n \n sig_figs = SIG_FIGS[dtype] + 1 # Look up approximate significant figures and add 1\n sign_width = 1 if np.nanmin(data_array) < 0 else 0\n integer_digits = ceil(log10(np.nanmax(np.abs(data_array)) + 1.0))\n \n aseg_dtype_code = ASEG_DTYPE_CODE_MAPPING.get(dtype)\n assert aseg_dtype_code, 'Unhandled dtype {}'.format(dtype)\n \n if aseg_dtype_code == 'I': # Integer\n decimal_places = 0\n width_specifier = integer_digits + sign_width + 1\n aseg_gdf_format = 'I{}'.format(width_specifier)\n python_format = '{' + ':>{:d}.{:d}f'.format(width_specifier, decimal_places) + '}'\n\n elif aseg_dtype_code in ['F', 'D', 'E']: # Floating point\n # If array_variable is a netCDF variable with a \"format\" attribute, use stored format string to determine decimal_places\n if decimal_places is not None:\n decimal_places = min(decimal_places, abs(sig_figs-integer_digits))\n logger.debug('decimal_places set to {} from decimal_places {}'.format(decimal_places, decimal_places))\n elif hasattr(array_variable, 'aseg_gdf_format'): \n _columns, _aseg_dtype_code, _integer_digits, decimal_places = decode_aseg_gdf_format(array_variable.aseg_gdf_format)\n decimal_places = min(decimal_places, abs(sig_figs-integer_digits))\n logger.debug('decimal_places set to {} from variable attribute aseg_gdf_format {}'.format(decimal_places, array_variable.aseg_gdf_format))\n else: # No aseg_gdf_format variable attribute\n decimal_places = abs(sig_figs-integer_digits) # Allow for full precision of datatype\n logger.debug('decimal_places set to {} from sig_figs {} and integer_digits {}'.format(decimal_places, sig_figs, integer_digits))\n \n width_specifier = min(sign_width + integer_digits + decimal_places + 2,\n sign_width + sig_figs + 2\n )\n \n aseg_gdf_format = '{}{}.{}'.format(aseg_dtype_code, width_specifier, decimal_places)\n if aseg_dtype_code == 'F': # Floating point notation\n python_format = '{' + ':>{:d}.{:d}f'.format(width_specifier, decimal_places) + '}' # Add 1 to width for decimal point\n else: # Exponential notation for 'D' or 'E'\n python_format = '{' + ':>{:d}.{:d}E'.format(width_specifier, decimal_places) + '}' # Add 1 to width for decimal point\n\n elif aseg_dtype_code == 'A': # String\n if hasattr(array_variable, 'aseg_gdf_format'):\n _columns, _aseg_dtype_code, width_specifier, decimal_places = decode_aseg_gdf_format(array_variable.aseg_gdf_format)\n aseg_gdf_format = array_variable.aseg_gdf_format\n else:\n aseg_gdf_format = 'A{}'.format(width_specifier)\n \n python_format = '{' + ':>{:d}s'.format(width_specifier) + '}'\n else:\n raise BaseException('Unhandled ASEG-GDF dtype code {}'.format(aseg_dtype_code))\n \n # Pre-pend column count to start of aseg_gdf_format\n if columns > 1:\n aseg_gdf_format = '{}{}'.format(columns, aseg_gdf_format)\n \n return aseg_gdf_format, dtype, columns, width_specifier, decimal_places, python_format\n\n\ndef fix_field_precision(data_array, current_dtype, decimal_places, no_data_mask=[], fill_value=None):\n '''\n Function to return revised ASEG-GDF format string and other info from data array or netCDF array variable\n after correcting datatype for excessive precision specification, or None if there is no precision change.\n Arrays are copied to smaller representations and then the difference with the original is checked to\n ensure that any difference is less than precision of the specified number of fractional digits.\n Note that fill_value is also considered but potentially modified only if data precision is changed\n @param data_array: data array - assumed to be of dtype float64 for raw data\n @param current_dtype: Current data type string, e.g. int8 or float32\n @param decimal_places: Number of fractional digits for precision checking\n @param fill_value: fill value or None\n \n Returns None if no precision change required.\n @return aseg_gdf_format: ASEG-GDF format string\n @return dtype: Data type string, e.g. int8 or float32\n @return columns: Number of columns (i.e. 1 for 1D data, or second dimension size for 2D data)\n @return width_specifier: Width of field in number of characters\n @return decimal_places: Number of fractional digits (derived from datatype sig. figs - width_specifier)\n @return python_format: Python Formatter string for fixed-width output\n @return fill_value: Potentially modified fill value\n '''\n logger.debug('data_array: {}, current_dtype: {}, decimal_places: {}'.format(data_array, current_dtype, decimal_places))\n \n try:\n data_mantissa, data_exponent = dfrexp(data_array)\n except:\n logger.debug('Unable to compute data_mantissa & data_exponent')\n return\n \n for dtype_reduction_list in DTYPE_REDUCTION_LISTS:\n try:\n current_dtype_index = dtype_reduction_list.index(current_dtype)\n\n # Try types from smallest to largest\n for smaller_dtype in dtype_reduction_list[:current_dtype_index:-1]: \n smaller_array = data_array.astype(smaller_dtype)\n difference_array = data_array - smaller_array\n logger.debug('current_dtype: {}\\nsmaller_dtype: {}\\narray_variable\\n{}\\nsmaller_array\\n{}\\n\\\ndifference_array\\n{}\\ndecimal_places: {}\\ndifference count: {}\\ndifference values: '.format(current_dtype, \n smaller_dtype, \n data_array, \n smaller_array, \n difference_array, \n decimal_places, \n np.count_nonzero(difference_array >= pow(10, -decimal_places)), \n difference_array[difference_array != 0]\n )\n )\n logger.debug('Maximum error converting from {} to {}: {}'.format(current_dtype,\n smaller_dtype,\n np.nanmax(np.abs(difference_array))))\n smaller_mantissa, smaller_exponent = dfrexp(smaller_array)\n if np.any(np.logical_or((smaller_exponent != data_exponent), \n (np.abs(data_mantissa - smaller_mantissa) >= pow(10, -decimal_places)))):\n # Differences found - try larger datatype\n continue\n else:\n logger.debug('Maximum mantissa difference: {}'.format(np.nanmax(np.abs(data_mantissa - smaller_mantissa))))\n logger.debug('Maximum exponent difference: {}'.format(np.nanmax(np.abs(smaller_exponent - data_exponent))))\n aseg_gdf_format, dtype, columns, width_specifier, decimal_places, python_format = variable2aseg_gdf_format(smaller_array, decimal_places)\n\n if fill_value is not None:\n # Use reduced precision fill_value if available and unambiguous\n if np.any(no_data_mask):\n reduced_precision_fill_value = data_array[no_data_mask][0] \n \n # Check for ambiguity introduced by reduced precision\n if np.any(data_array[~no_data_mask] == reduced_precision_fill_value):\n logger.debug('Reduced precision fill value of {} is ambiguous'.format(reduced_precision_fill_value))\n continue # Can't use this datatype - try the next bigger one\n elif fill_value != reduced_precision_fill_value:\n logger.debug('fill_value precision reduced from {} to {}'.format(fill_value, \n reduced_precision_fill_value)\n )\n fill_value = reduced_precision_fill_value\n \n fill_value = truncate(fill_value, data_array, no_data_mask, width_specifier, decimal_places)\n \n return aseg_gdf_format, dtype, columns, width_specifier, decimal_places, python_format, fill_value\n\n \n except ValueError: # current_dtype not in dtype_reduction_list\n continue\n\n\ndef truncate(fill_value, data_array, no_data_mask, width_specifier, decimal_places):\n '''\n Function to truncate fill_value to <width_specifier>.<decimal_places> rather than rounding for neater output later on\n\n @param fill_value: Original fill value\n @param data_array: Array containing data\n @param no_data_mask: Boolean mask array (true for valid data)\n @param width_specifier: Width of field in number of characters\n @param decimal_places: Number of fractional digits for precision checking\n \n @return fill_value: Potentially modified fill value\n '''\n try:\n truncated_fill_value = None\n \n integer_digits = width_specifier - decimal_places\n if fill_value < 0:\n integer_digits -= 1 # Allow for sign\n if decimal_places > 0:\n integer_digits -= 1 # Allow for decimal point\n \n fill_value_str = (fill_value)\n assert 'e' not in fill_value_str.lower(), 'Unable to truncate value in exponential notation'\n pattern = re.compile('(-?)\\d*?(\\d{0,' + '{}'.format(integer_digits) + '}\\.\\d{0,' + '{}'.format(decimal_places) + '})')\n search = re.search(pattern, fill_value_str)\n truncated_fill_value = float(search.group(1)+search.group(2))\n # Check for any ambiguity introduced by truncation\n assert not np.any(data_array[~no_data_mask] == truncated_fill_value), 'Truncated fill value of {} is ambiguous'.format(truncated_fill_value)\n if fill_value != truncated_fill_value:\n logger.debug('fill_value truncated from {} to {}'.format(fill_value, truncated_fill_value))\n return truncated_fill_value\n except Exception as e:\n logger.debug('Unable to truncate fill value from {} to {} ({}). Keeping original value.'.format(fill_value, truncated_fill_value, e))\n return fill_value\n"
] | [
[
"numpy.zeros",
"numpy.asscalar",
"numpy.any",
"numpy.abs",
"numpy.nanmin"
]
] |
saurabhclusterone/deepchem | [
"29bcf0fbf29a74c264a553237627ad3573a4b09d"
] | [
"deepchem/utils/save.py"
] | [
"\"\"\"\nSimple utils to save and load from disk.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\n# TODO(rbharath): Use standard joblib once old-data has been regenerated.\nimport joblib\nfrom sklearn.externals import joblib as old_joblib\nimport gzip\nimport json\nimport pickle\nimport pandas as pd\nimport numpy as np\nimport os\nimport deepchem\nfrom rdkit import Chem\nimport warnings\nfrom deepchem.utils.genomics import encode_bio_sequence as encode_sequence, encode_fasta_sequence as fasta_sequence, seq_one_hot_encode as seq_one_hotencode\n\n\ndef log(string, verbose=True):\n \"\"\"Print string if verbose.\"\"\"\n if verbose:\n print(string)\n\n\ndef save_to_disk(dataset, filename, compress=3):\n \"\"\"Save a dataset to file.\"\"\"\n joblib.dump(dataset, filename, compress=compress)\n\n\ndef get_input_type(input_file):\n \"\"\"Get type of input file. Must be csv/pkl.gz/sdf file.\"\"\"\n filename, file_extension = os.path.splitext(input_file)\n # If gzipped, need to compute extension again\n if file_extension == \".gz\":\n filename, file_extension = os.path.splitext(filename)\n if file_extension == \".csv\":\n return \"csv\"\n elif file_extension == \".pkl\":\n return \"pandas-pickle\"\n elif file_extension == \".joblib\":\n return \"pandas-joblib\"\n elif file_extension == \".sdf\":\n return \"sdf\"\n else:\n raise ValueError(\"Unrecognized extension %s\" % file_extension)\n\n\ndef load_data(input_files, shard_size=None, verbose=True):\n \"\"\"Loads data from disk.\n\n For CSV files, supports sharded loading for large files.\n \"\"\"\n if not len(input_files):\n return\n input_type = get_input_type(input_files[0])\n if input_type == \"sdf\":\n if shard_size is not None:\n log(\"Ignoring shard_size for sdf input.\", verbose)\n for value in load_sdf_files(input_files):\n yield value\n elif input_type == \"csv\":\n for value in load_csv_files(input_files, shard_size, verbose=verbose):\n yield value\n elif input_type == \"pandas-pickle\":\n for input_file in input_files:\n yield load_pickle_from_disk(input_file)\n\n\ndef load_sdf_files(input_files, clean_mols):\n \"\"\"Load SDF file into dataframe.\"\"\"\n dataframes = []\n for input_file in input_files:\n # Tasks are stored in .sdf.csv file\n raw_df = next(load_csv_files([input_file + \".csv\"], shard_size=None))\n # Structures are stored in .sdf file\n print(\"Reading structures from %s.\" % input_file)\n suppl = Chem.SDMolSupplier(str(input_file), clean_mols, False, False)\n df_rows = []\n for ind, mol in enumerate(suppl):\n if mol is not None:\n smiles = Chem.MolToSmiles(mol)\n df_rows.append([ind, smiles, mol])\n mol_df = pd.DataFrame(df_rows, columns=('mol_id', 'smiles', 'mol'))\n dataframes.append(pd.concat([mol_df, raw_df], axis=1, join='inner'))\n return dataframes\n\n\ndef load_csv_files(filenames, shard_size=None, verbose=True):\n \"\"\"Load data as pandas dataframe.\"\"\"\n # First line of user-specified CSV *must* be header.\n shard_num = 1\n for filename in filenames:\n if shard_size is None:\n yield pd.read_csv(filename)\n else:\n log(\"About to start loading CSV from %s\" % filename, verbose)\n for df in pd.read_csv(filename, chunksize=shard_size):\n log(\"Loading shard %d of size %s.\" % (shard_num, str(shard_size)),\n verbose)\n df = df.replace(np.nan, str(\"\"), regex=True)\n shard_num += 1\n yield df\n\n\ndef seq_one_hot_encode(sequences, letters='ATCGN'):\n \"\"\"One hot encodes list of genomic sequences.\n\n Sequences encoded have shape (N_sequences, N_letters, sequence_length, 1).\n These sequences will be processed as images with one color channel.\n\n Parameters\n ----------\n sequences: np.ndarray\n Array of genetic sequences\n letters: str\n String with the set of possible letters in the sequences.\n\n Raises\n ------\n ValueError:\n If sequences are of different lengths.\n\n Returns\n -------\n np.ndarray: Shape (N_sequences, N_letters, sequence_length, 1).\n \"\"\"\n warnings.warn(\n \"This Function has been deprecated and now resides in deepchem.utils.genomics \",\n DeprecationWarning)\n return seq_one_hotencode(sequences, letters=letters)\n\n\ndef encode_fasta_sequence(fname):\n \"\"\"\n Loads fasta file and returns an array of one-hot sequences.\n\n Parameters\n ----------\n fname: str\n Filename of fasta file.\n\n Returns\n -------\n np.ndarray: Shape (N_sequences, 5, sequence_length, 1).\n \"\"\"\n warnings.warn(\n \"This Function has been deprecated and now resides in deepchem.utils.genomics\",\n DeprecationWarning)\n\n return fasta_sequence(fname)\n\n\ndef encode_bio_sequence(fname, file_type=\"fasta\", letters=\"ATCGN\"):\n \"\"\"\n Loads a sequence file and returns an array of one-hot sequences.\n\n Parameters\n ----------\n fname: str\n Filename of fasta file.\n file_type: str\n The type of file encoding to process, e.g. fasta or fastq, this\n is passed to Biopython.SeqIO.parse.\n letters: str\n The set of letters that the sequences consist of, e.g. ATCG.\n\n Returns\n -------\n np.ndarray: Shape (N_sequences, N_letters, sequence_length, 1).\n \"\"\"\n warnings.warn(\n \"This Function has been deprecated and now resides in deepchem.utils.genomics \",\n DeprecationWarning)\n return encode_sequence(fname, file_type=file_type, letters=letters)\n\n\ndef save_metadata(tasks, metadata_df, data_dir):\n \"\"\"\n Saves the metadata for a DiskDataset\n Parameters\n ----------\n tasks: list of str\n Tasks of DiskDataset\n metadata_df: pd.DataFrame\n data_dir: str\n Directory to store metadata\n Returns\n -------\n \"\"\"\n if isinstance(tasks, np.ndarray):\n tasks = tasks.tolist()\n metadata_filename = os.path.join(data_dir, \"metadata.csv.gzip\")\n tasks_filename = os.path.join(data_dir, \"tasks.json\")\n with open(tasks_filename, 'w') as fout:\n json.dump(tasks, fout)\n metadata_df.to_csv(metadata_filename, index=False, compression='gzip')\n\n\ndef load_from_disk(filename):\n \"\"\"Load a dataset from file.\"\"\"\n name = filename\n if os.path.splitext(name)[1] == \".gz\":\n name = os.path.splitext(name)[0]\n if os.path.splitext(name)[1] == \".pkl\":\n return load_pickle_from_disk(filename)\n elif os.path.splitext(name)[1] == \".joblib\":\n try:\n return joblib.load(filename)\n except KeyError:\n # Try older joblib version for legacy files.\n return old_joblib.load(filename)\n except ValueError:\n return old_joblib.load(filename)\n elif os.path.splitext(name)[1] == \".csv\":\n # First line of user-specified CSV *must* be header.\n df = pd.read_csv(filename, header=0)\n df = df.replace(np.nan, str(\"\"), regex=True)\n return df\n else:\n raise ValueError(\"Unrecognized filetype for %s\" % filename)\n\n\ndef load_sharded_csv(filenames):\n \"\"\"Load a dataset from multiple files. Each file MUST have same column headers\"\"\"\n dataframes = []\n for name in filenames:\n placeholder_name = name\n if os.path.splitext(name)[1] == \".gz\":\n name = os.path.splitext(name)[0]\n if os.path.splitext(name)[1] == \".csv\":\n # First line of user-specified CSV *must* be header.\n df = pd.read_csv(placeholder_name, header=0)\n df = df.replace(np.nan, str(\"\"), regex=True)\n dataframes.append(df)\n else:\n raise ValueError(\"Unrecognized filetype for %s\" % filename)\n\n # combine dataframes\n combined_df = dataframes[0]\n for i in range(0, len(dataframes) - 1):\n combined_df = combined_df.append(dataframes[i + 1])\n combined_df = combined_df.reset_index(drop=True)\n return combined_df\n\n\ndef load_pickle_from_disk(filename):\n \"\"\"Load dataset from pickle file.\"\"\"\n if \".gz\" in filename:\n with gzip.open(filename, \"rb\") as f:\n df = pickle.load(f)\n else:\n with open(filename, \"rb\") as f:\n df = pickle.load(f)\n return df\n\n\ndef load_dataset_from_disk(save_dir):\n \"\"\"\n Parameters\n ----------\n save_dir: str\n\n Returns\n -------\n loaded: bool\n Whether the load succeeded\n all_dataset: (dc.data.Dataset, dc.data.Dataset, dc.data.Dataset)\n The train, valid, test datasets\n transformers: list of dc.trans.Transformer\n The transformers used for this dataset\n\n \"\"\"\n\n train_dir = os.path.join(save_dir, \"train_dir\")\n valid_dir = os.path.join(save_dir, \"valid_dir\")\n test_dir = os.path.join(save_dir, \"test_dir\")\n if not os.path.exists(train_dir) or not os.path.exists(\n valid_dir) or not os.path.exists(test_dir):\n return False, None, list()\n loaded = True\n train = deepchem.data.DiskDataset(train_dir)\n valid = deepchem.data.DiskDataset(valid_dir)\n test = deepchem.data.DiskDataset(test_dir)\n all_dataset = (train, valid, test)\n with open(os.path.join(save_dir, \"transformers.pkl\"), 'rb') as f:\n transformers = pickle.load(f)\n return loaded, all_dataset, transformers\n\n\ndef save_dataset_to_disk(save_dir, train, valid, test, transformers):\n train_dir = os.path.join(save_dir, \"train_dir\")\n valid_dir = os.path.join(save_dir, \"valid_dir\")\n test_dir = os.path.join(save_dir, \"test_dir\")\n train.move(train_dir)\n valid.move(valid_dir)\n test.move(test_dir)\n with open(os.path.join(save_dir, \"transformers.pkl\"), 'wb') as f:\n pickle.dump(transformers, f)\n return None\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"pandas.concat",
"sklearn.externals.joblib.load"
]
] |
HUuxiaobin/Face-Super-Resolution-Guided-by-3D-Facial-Priors | [
"987e7c74d33d26cc5e9d1c0e395a06519a31792f"
] | [
"3Dface_priors/facial_landmark.py"
] | [
"# import the necessary packages\nfrom imutils import face_utils\nimport numpy as np\nimport argparse\nimport imutils\nimport dlib\nimport cv2\n#python facial_landmarks.py --shape-predictor shape_predictor_68_face_landmarks.dat --image test1.jpg\ndef rect_to_bb(rect):\n# take a bounding predicted by dlib and convert it\n# to the format (x, y, w, h) as we would normally do\n# with OpenCV\n x = rect.left()\n y = rect.top()\n w = rect.right() - x\n h = rect.bottom() - y\n # return a tuple of (x, y, w, h)\n return (x, y, w, h)\n\ndef shape_to_np(shape, dtype=\"int\"):\n# initialize the list of (x, y)-coordinates\n coords = np.zeros((68, 2), dtype=dtype)\n# loop over the 68 facial landmarks and convert them\n# to a 2-tuple of (x, y)-coordinates\n for i in range(0, 68):\n coords[i] = (shape.part(i).x, shape.part(i).y)\n# return the list of (x, y)-coordinates\n return coords\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--shape-predictor\", required=True, help=\"path to facial landmark predictor\")\nap.add_argument(\"-i\", \"--image\", required=True, help=\"path to input image\")\nargs = vars(ap.parse_args())\n\n# initialize dlib's face detector (HOG-based) and then create\n# the facial landmark predictor\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(args[\"shape_predictor\"])\n\nimage = cv2.imread(args[\"image\"])\nimage = imutils.resize(image, width=500)\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n# detect faces in the grayscale image\nrects = detector(gray, 1)\n\n# loop over the face detections\nfor (i, rect) in enumerate(rects):\n# determine the facial landmarks for the face region, then\n# convert the facial landmark (x, y)-coordinates to a NumPy\n# array\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n# convert dlib's rectangle to a OpenCV-style bounding box\n# [i.e., (x, y, w, h)], then draw the face bounding box\n (x, y, w, h) = face_utils.rect_to_bb(rect)\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n # show the face number\n cv2.putText(image, \"Face #{}\".format(i + 1), (x - 10, y - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n# loop over the (x, y)-coordinates for the facial landmarks\n# and draw them on the image\n for (x, y) in shape:\n cv2.circle(image, (x, y), 1, (0, 0, 255), -1)\n# show the output image with the face detections + facial landmarks\ncv2.imshow(\"Output\", image)\ncv2.waitKey(0)"
] | [
[
"numpy.zeros"
]
] |
koskotG/ebonite | [
"b01b662c43709d152940f488574d78ff25f89ecf"
] | [
"tests/ext/sklearn/test_model.py"
] | [
"import numpy as np\nimport pytest\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\n\nfrom ebonite.core.analyzer.model import ModelAnalyzer\nfrom ebonite.ext.sklearn import SklearnModelWrapper\n\n\[email protected]\ndef inp_data():\n return [[1, 2, 3], [3, 2, 1]]\n\n\[email protected]\ndef out_data():\n return [1, 2]\n\n\[email protected]\ndef classifier(inp_data, out_data):\n lr = LogisticRegression()\n lr.fit(inp_data, out_data)\n return lr\n\n\[email protected]\ndef regressor(inp_data, out_data):\n lr = LinearRegression()\n lr.fit(inp_data, out_data)\n return lr\n\n\[email protected]('model', ['classifier', 'regressor'])\ndef test_hook(model, inp_data, request):\n model = request.getfixturevalue(model)\n wrapper = ModelAnalyzer.analyze(model, input_data=inp_data)\n\n assert isinstance(wrapper, SklearnModelWrapper)\n\n\[email protected]('model', ['classifier', 'regressor'])\ndef test_wrapper__predict(model, inp_data, request):\n model = request.getfixturevalue(model)\n wrapper = ModelAnalyzer.analyze(model, input_data=inp_data)\n\n np.testing.assert_array_almost_equal(model.predict(inp_data), wrapper.call_method('predict', inp_data))\n\n\ndef test_wrapper__clf_predict_proba(classifier, inp_data):\n wrapper = ModelAnalyzer.analyze(classifier, input_data=inp_data)\n\n np.testing.assert_array_almost_equal(classifier.predict_proba(inp_data),\n wrapper.call_method('predict_proba', inp_data))\n\n\ndef test_wrapper__reg_predict_proba(regressor, inp_data):\n wrapper = ModelAnalyzer.analyze(regressor, input_data=inp_data)\n\n with pytest.raises(ValueError):\n wrapper.call_method('predict_proba', inp_data)\n\n\[email protected]('model', ['classifier', 'regressor'])\ndef test_wrapper__dump_load(tmpdir, model, inp_data, request):\n model = request.getfixturevalue(model)\n wrapper = ModelAnalyzer.analyze(model, input_data=inp_data)\n\n expected_requirements = {'sklearn', 'numpy'}\n assert set(wrapper.requirements.modules) == expected_requirements\n\n with wrapper.dump() as d:\n d.materialize(tmpdir)\n wrapper.unbind()\n with pytest.raises(ValueError):\n wrapper.call_method('predict', inp_data)\n\n wrapper.load(tmpdir)\n np.testing.assert_array_almost_equal(model.predict(inp_data), wrapper.call_method('predict', inp_data))\n assert set(wrapper.requirements.modules) == expected_requirements\n"
] | [
[
"sklearn.linear_model.LinearRegression",
"sklearn.linear_model.LogisticRegression"
]
] |
derEitel/patch_individual_filter_layer | [
"ecd7b3ace759e10ceda8c39ebe1190a7bc27f223"
] | [
"nitorch/nitorch/initialization.py"
] | [
"# Initialize weights\nfrom torch.nn import init, Conv3d, BatchNorm3d, Linear\n\n\ndef xavier(x):\n \"\"\"Wrapper for torch.nn.init.xavier method.\n\n Parameters\n ----------\n x : torch.tensor\n Input tensor to be initialized. See torch.nn.init.py for more information\n\n Returns\n -------\n torch.tensor\n Initialized tensor\n\n \"\"\"\n return init.xavier_normal_(x)\n\n\ndef xavier_uniform(x):\n \"\"\"Wrapper for torch.nn.init.xavier_uniform method.\n\n Parameters\n ----------\n x : torch.tensor\n Input tensor to be initialized. See torch.nn.init.py for more information\n\n Returns\n -------\n torch.tensor\n Initialized tensor\n\n \"\"\"\n return init.xavier_uniform_(x)\n\n\ndef he(x):\n \"\"\"Wrapper for torch.nn.init.kaiming_normal_ method.\n\n Parameters\n ----------\n x : torch.tensor\n Input tensor to be initialized. See torch.nn.init.py for more information\n\n Returns\n -------\n torch.tensor\n Initialized tensor\n\n \"\"\"\n return init.kaiming_normal_(x)\n\n\ndef he_uniform(x):\n \"\"\"Wrapper for torch.nn.init.kaiming_uniform_ method.\n\n Parameters\n ----------\n x : torch.tensor\n Input tensor to be initialized. See torch.nn.init.py for more information\n\n Returns\n -------\n torch.tensor\n Initialized tensor\n\n \"\"\"\n return init.kaiming_uniform_(x)\n\n\ndef weights_init(m, func=he_uniform):\n \"\"\"Performs weight initialization for a layer.\n\n Parameters\n ----------\n m\n The layer which weights should be initialized.\n func\n The function to use to initialize weights.\n\n Returns\n -------\n m\n Weight initialized layer.\n\n \"\"\"\n if isinstance(m, Conv3d):\n func(m.weight.data)\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, BatchNorm3d):\n m.reset_parameters()\n elif isinstance(m, Linear):\n m.reset_parameters()\n"
] | [
[
"torch.nn.init.kaiming_normal_",
"torch.nn.init.xavier_uniform_",
"torch.nn.init.constant_",
"torch.nn.init.xavier_normal_",
"torch.nn.init.kaiming_uniform_"
]
] |
piersharding/astropy | [
"9680cd546aa9063758f2c23c836ca79a7c8f1eb1"
] | [
"astropy/io/votable/tree.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# TODO: Test FITS parsing\n\n# STDLIB\nimport io\nimport re\nimport gzip\nimport base64\nimport codecs\nimport urllib.request\n\n# THIRD-PARTY\nimport numpy as np\nfrom numpy import ma\n\n# LOCAL\nfrom astropy.io import fits\nfrom astropy import __version__ as astropy_version\nfrom astropy.utils.collections import HomogeneousList\nfrom astropy.utils.xml.writer import XMLWriter\n\nfrom . import converters\nfrom .exceptions import (warn_or_raise, vo_warn, vo_raise, vo_reraise,\n warn_unknown_attrs, W06, W07, W08, W09, W10, W11, W12,\n W13, W15, W17, W18, W19, W20, W21, W22, W26, W27, W28,\n W29, W32, W33, W35, W36, W37, W38, W40, W41, W42, W43,\n W44, W45, W50, W52, W53, W54, E06, E08, E09, E10, E11,\n E12, E13, E15, E16, E17, E18, E19, E20, E21, E22, E23,\n E25)\nfrom . import ucd as ucd_mod\nfrom . import util\nfrom . import xmlutil\n\ntry:\n from . import tablewriter\n _has_c_tabledata_writer = True\nexcept ImportError:\n _has_c_tabledata_writer = False\n\n\n__all__ = [\n 'Link', 'Info', 'Values', 'Field', 'Param', 'CooSys', 'TimeSys',\n 'FieldRef', 'ParamRef', 'Group', 'Table', 'Resource',\n 'VOTableFile'\n ]\n\n\n# The default number of rows to read in each chunk before converting\n# to an array.\nDEFAULT_CHUNK_SIZE = 256\nRESIZE_AMOUNT = 1.5\n\n######################################################################\n# FACTORY FUNCTIONS\n\n\ndef _resize(masked, new_size):\n \"\"\"\n Masked arrays can not be resized inplace, and `np.resize` and\n `ma.resize` are both incompatible with structured arrays.\n Therefore, we do all this.\n \"\"\"\n new_array = ma.zeros((new_size,), dtype=masked.dtype)\n length = min(len(masked), new_size)\n new_array[:length] = masked[:length]\n\n return new_array\n\n\ndef _lookup_by_attr_factory(attr, unique, iterator, element_name, doc):\n \"\"\"\n Creates a function useful for looking up an element by a given\n attribute.\n\n Parameters\n ----------\n attr : str\n The attribute name\n\n unique : bool\n Should be `True` if the attribute is unique and therefore this\n should return only one value. Otherwise, returns a list of\n values.\n\n iterator : generator\n A generator that iterates over some arbitrary set of elements\n\n element_name : str\n The XML element name of the elements being iterated over (used\n for error messages only).\n\n doc : str\n A docstring to apply to the generated function.\n\n Returns\n -------\n factory : function\n A function that looks up an element by the given attribute.\n \"\"\"\n\n def lookup_by_attr(self, ref, before=None):\n \"\"\"\n Given a string *ref*, finds the first element in the iterator\n where the given attribute == *ref*. If *before* is provided,\n will stop searching at the object *before*. This is\n important, since \"forward references\" are not allowed in the\n VOTABLE format.\n \"\"\"\n for element in getattr(self, iterator)():\n if element is before:\n if getattr(element, attr, None) == ref:\n vo_raise(\n f\"{element_name} references itself\",\n element._config, element._pos, KeyError)\n break\n if getattr(element, attr, None) == ref:\n yield element\n\n def lookup_by_attr_unique(self, ref, before=None):\n for element in lookup_by_attr(self, ref, before=before):\n return element\n raise KeyError(\n \"No {} with {} '{}' found before the referencing {}\".format(\n element_name, attr, ref, element_name))\n\n if unique:\n lookup_by_attr_unique.__doc__ = doc\n return lookup_by_attr_unique\n else:\n lookup_by_attr.__doc__ = doc\n return lookup_by_attr\n\n\ndef _lookup_by_id_or_name_factory(iterator, element_name, doc):\n \"\"\"\n Like `_lookup_by_attr_factory`, but looks in both the \"ID\" and\n \"name\" attributes.\n \"\"\"\n\n def lookup_by_id_or_name(self, ref, before=None):\n \"\"\"\n Given an key *ref*, finds the first element in the iterator\n with the attribute ID == *ref* or name == *ref*. If *before*\n is provided, will stop searching at the object *before*. This\n is important, since \"forward references\" are not allowed in\n the VOTABLE format.\n \"\"\"\n for element in getattr(self, iterator)():\n if element is before:\n if ref in (element.ID, element.name):\n vo_raise(\n f\"{element_name} references itself\",\n element._config, element._pos, KeyError)\n break\n if ref in (element.ID, element.name):\n return element\n raise KeyError(\n \"No {} with ID or name '{}' found before the referencing {}\".format(\n element_name, ref, element_name))\n\n lookup_by_id_or_name.__doc__ = doc\n return lookup_by_id_or_name\n\n\ndef _get_default_unit_format(config):\n \"\"\"\n Get the default unit format as specified in the VOTable spec.\n \"\"\"\n # The unit format changed between VOTable versions 1.3 and 1.4,\n # see issue #10791.\n if config['version_1_4_or_later']:\n return 'vounit'\n else:\n return 'cds'\n\n\ndef _get_unit_format(config):\n \"\"\"\n Get the unit format based on the configuration.\n \"\"\"\n if config.get('unit_format') is None:\n format = _get_default_unit_format(config)\n else:\n format = config['unit_format']\n return format\n\n\n######################################################################\n# ATTRIBUTE CHECKERS\ndef check_astroyear(year, field, config=None, pos=None):\n \"\"\"\n Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if\n *year* is not a valid astronomical year as defined by the VOTABLE\n standard.\n\n Parameters\n ----------\n year : str\n An astronomical year string\n\n field : str\n The name of the field this year was found in (used for error\n message)\n\n config, pos : optional\n Information about the source of the value\n \"\"\"\n if (year is not None and\n re.match(r\"^[JB]?[0-9]+([.][0-9]*)?$\", year) is None):\n warn_or_raise(W07, W07, (field, year), config, pos)\n return False\n return True\n\n\ndef check_string(string, attr_name, config=None, pos=None):\n \"\"\"\n Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if\n *string* is not a string or Unicode string.\n\n Parameters\n ----------\n string : str\n An astronomical year string\n\n attr_name : str\n The name of the field this year was found in (used for error\n message)\n\n config, pos : optional\n Information about the source of the value\n \"\"\"\n if string is not None and not isinstance(string, str):\n warn_or_raise(W08, W08, attr_name, config, pos)\n return False\n return True\n\n\ndef resolve_id(ID, id, config=None, pos=None):\n if ID is None and id is not None:\n warn_or_raise(W09, W09, (), config, pos)\n return id\n return ID\n\n\ndef check_ucd(ucd, config=None, pos=None):\n \"\"\"\n Warns or raises a\n `~astropy.io.votable.exceptions.VOTableSpecError` if *ucd* is not\n a valid `unified content descriptor`_ string as defined by the\n VOTABLE standard.\n\n Parameters\n ----------\n ucd : str\n A UCD string.\n\n config, pos : optional\n Information about the source of the value\n \"\"\"\n if config is None:\n config = {}\n if config.get('version_1_1_or_later'):\n try:\n ucd_mod.parse_ucd(\n ucd,\n check_controlled_vocabulary=config.get(\n 'version_1_2_or_later', False),\n has_colon=config.get('version_1_2_or_later', False))\n except ValueError as e:\n # This weird construction is for Python 3 compatibility\n if config.get('verify', 'ignore') == 'exception':\n vo_raise(W06, (ucd, str(e)), config, pos)\n elif config.get('verify', 'ignore') == 'warn':\n vo_warn(W06, (ucd, str(e)), config, pos)\n return False\n else:\n return False\n return True\n\n\n######################################################################\n# PROPERTY MIXINS\nclass _IDProperty:\n @property\n def ID(self):\n \"\"\"\n The XML ID_ of the element. May be `None` or a string\n conforming to XML ID_ syntax.\n \"\"\"\n return self._ID\n\n @ID.setter\n def ID(self, ID):\n xmlutil.check_id(ID, 'ID', self._config, self._pos)\n self._ID = ID\n\n @ID.deleter\n def ID(self):\n self._ID = None\n\n\nclass _NameProperty:\n @property\n def name(self):\n \"\"\"An optional name for the element.\"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n xmlutil.check_token(name, 'name', self._config, self._pos)\n self._name = name\n\n @name.deleter\n def name(self):\n self._name = None\n\n\nclass _XtypeProperty:\n @property\n def xtype(self):\n \"\"\"Extended data type information.\"\"\"\n return self._xtype\n\n @xtype.setter\n def xtype(self, xtype):\n if xtype is not None and not self._config.get('version_1_2_or_later'):\n warn_or_raise(\n W28, W28, ('xtype', self._element_name, '1.2'),\n self._config, self._pos)\n check_string(xtype, 'xtype', self._config, self._pos)\n self._xtype = xtype\n\n @xtype.deleter\n def xtype(self):\n self._xtype = None\n\n\nclass _UtypeProperty:\n _utype_in_v1_2 = False\n\n @property\n def utype(self):\n \"\"\"The usage-specific or `unique type`_ of the element.\"\"\"\n return self._utype\n\n @utype.setter\n def utype(self, utype):\n if (self._utype_in_v1_2 and\n utype is not None and\n not self._config.get('version_1_2_or_later')):\n warn_or_raise(\n W28, W28, ('utype', self._element_name, '1.2'),\n self._config, self._pos)\n check_string(utype, 'utype', self._config, self._pos)\n self._utype = utype\n\n @utype.deleter\n def utype(self):\n self._utype = None\n\n\nclass _UcdProperty:\n _ucd_in_v1_2 = False\n\n @property\n def ucd(self):\n \"\"\"The `unified content descriptor`_ for the element.\"\"\"\n return self._ucd\n\n @ucd.setter\n def ucd(self, ucd):\n if ucd is not None and ucd.strip() == '':\n ucd = None\n if ucd is not None:\n if (self._ucd_in_v1_2 and\n not self._config.get('version_1_2_or_later')):\n warn_or_raise(\n W28, W28, ('ucd', self._element_name, '1.2'),\n self._config, self._pos)\n check_ucd(ucd, self._config, self._pos)\n self._ucd = ucd\n\n @ucd.deleter\n def ucd(self):\n self._ucd = None\n\n\nclass _DescriptionProperty:\n @property\n def description(self):\n \"\"\"\n An optional string describing the element. Corresponds to the\n DESCRIPTION_ element.\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, description):\n self._description = description\n\n @description.deleter\n def description(self):\n self._description = None\n\n\n######################################################################\n# ELEMENT CLASSES\nclass Element:\n \"\"\"\n A base class for all classes that represent XML elements in the\n VOTABLE file.\n \"\"\"\n _element_name = ''\n _attr_list = []\n\n def _add_unknown_tag(self, iterator, tag, data, config, pos):\n warn_or_raise(W10, W10, tag, config, pos)\n\n def _ignore_add(self, iterator, tag, data, config, pos):\n warn_unknown_attrs(tag, data.keys(), config, pos)\n\n def _add_definitions(self, iterator, tag, data, config, pos):\n if config.get('version_1_1_or_later'):\n warn_or_raise(W22, W22, (), config, pos)\n warn_unknown_attrs(tag, data.keys(), config, pos)\n\n def parse(self, iterator, config):\n \"\"\"\n For internal use. Parse the XML content of the children of the\n element.\n\n Parameters\n ----------\n iterator : xml iterator\n An iterator over XML elements as returned by\n `~astropy.utils.xml.iterparser.get_xml_iterator`.\n\n config : dict\n The configuration dictionary that affects how certain\n elements are read.\n\n Returns\n -------\n self : Element\n Returns self as a convenience.\n \"\"\"\n raise NotImplementedError()\n\n def to_xml(self, w, **kwargs):\n \"\"\"\n For internal use. Output the element to XML.\n\n Parameters\n ----------\n w : astropy.utils.xml.writer.XMLWriter object\n An XML writer to write to.\n\n kwargs : dict\n Any configuration parameters to control the output.\n \"\"\"\n raise NotImplementedError()\n\n\nclass SimpleElement(Element):\n \"\"\"\n A base class for simple elements, such as FIELD, PARAM and INFO\n that don't require any special parsing or outputting machinery.\n \"\"\"\n\n def __init__(self):\n Element.__init__(self)\n\n def __repr__(self):\n buff = io.StringIO()\n SimpleElement.to_xml(self, XMLWriter(buff))\n return buff.getvalue().strip()\n\n def parse(self, iterator, config):\n for start, tag, data, pos in iterator:\n if start and tag != self._element_name:\n self._add_unknown_tag(iterator, tag, data, config, pos)\n elif tag == self._element_name:\n break\n\n return self\n\n def to_xml(self, w, **kwargs):\n w.element(self._element_name,\n attrib=w.object_attrs(self, self._attr_list))\n\n\nclass SimpleElementWithContent(SimpleElement):\n \"\"\"\n A base class for simple elements, such as FIELD, PARAM and INFO\n that don't require any special parsing or outputting machinery.\n \"\"\"\n\n def __init__(self):\n SimpleElement.__init__(self)\n\n self._content = None\n\n def parse(self, iterator, config):\n for start, tag, data, pos in iterator:\n if start and tag != self._element_name:\n self._add_unknown_tag(iterator, tag, data, config, pos)\n elif tag == self._element_name:\n if data:\n self.content = data\n break\n\n return self\n\n def to_xml(self, w, **kwargs):\n w.element(self._element_name, self._content,\n attrib=w.object_attrs(self, self._attr_list))\n\n @property\n def content(self):\n \"\"\"The content of the element.\"\"\"\n return self._content\n\n @content.setter\n def content(self, content):\n check_string(content, 'content', self._config, self._pos)\n self._content = content\n\n @content.deleter\n def content(self):\n self._content = None\n\n\nclass Link(SimpleElement, _IDProperty):\n \"\"\"\n LINK_ elements: used to reference external documents and servers through a URI.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n _attr_list = ['ID', 'content_role', 'content_type', 'title', 'value',\n 'href', 'action']\n _element_name = 'LINK'\n\n def __init__(self, ID=None, title=None, value=None, href=None, action=None,\n id=None, config=None, pos=None, **kwargs):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n SimpleElement.__init__(self)\n\n content_role = kwargs.get('content-role') or kwargs.get('content_role')\n content_type = kwargs.get('content-type') or kwargs.get('content_type')\n\n if 'gref' in kwargs:\n warn_or_raise(W11, W11, (), config, pos)\n\n self.ID = resolve_id(ID, id, config, pos)\n self.content_role = content_role\n self.content_type = content_type\n self.title = title\n self.value = value\n self.href = href\n self.action = action\n\n warn_unknown_attrs(\n 'LINK', kwargs.keys(), config, pos,\n ['content-role', 'content_role', 'content-type', 'content_type',\n 'gref'])\n\n @property\n def content_role(self):\n \"\"\"\n Defines the MIME role of the referenced object. Must be one of:\n\n None, 'query', 'hints', 'doc', 'location' or 'type'\n \"\"\"\n return self._content_role\n\n @content_role.setter\n def content_role(self, content_role):\n if ((content_role == 'type' and\n not self._config['version_1_3_or_later']) or\n content_role not in\n (None, 'query', 'hints', 'doc', 'location')):\n vo_warn(W45, (content_role,), self._config, self._pos)\n self._content_role = content_role\n\n @content_role.deleter\n def content_role(self):\n self._content_role = None\n\n @property\n def content_type(self):\n \"\"\"Defines the MIME content type of the referenced object.\"\"\"\n return self._content_type\n\n @content_type.setter\n def content_type(self, content_type):\n xmlutil.check_mime_content_type(content_type, self._config, self._pos)\n self._content_type = content_type\n\n @content_type.deleter\n def content_type(self):\n self._content_type = None\n\n @property\n def href(self):\n \"\"\"\n A URI to an arbitrary protocol. The vo package only supports\n http and anonymous ftp.\n \"\"\"\n return self._href\n\n @href.setter\n def href(self, href):\n xmlutil.check_anyuri(href, self._config, self._pos)\n self._href = href\n\n @href.deleter\n def href(self):\n self._href = None\n\n def to_table_column(self, column):\n meta = {}\n for key in self._attr_list:\n val = getattr(self, key, None)\n if val is not None:\n meta[key] = val\n\n column.meta.setdefault('links', [])\n column.meta['links'].append(meta)\n\n @classmethod\n def from_table_column(cls, d):\n return cls(**d)\n\n\nclass Info(SimpleElementWithContent, _IDProperty, _XtypeProperty,\n _UtypeProperty):\n \"\"\"\n INFO_ elements: arbitrary key-value pairs for extensions to the standard.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n _element_name = 'INFO'\n _attr_list_11 = ['ID', 'name', 'value']\n _attr_list_12 = _attr_list_11 + ['xtype', 'ref', 'unit', 'ucd', 'utype']\n _utype_in_v1_2 = True\n\n def __init__(self, ID=None, name=None, value=None, id=None, xtype=None,\n ref=None, unit=None, ucd=None, utype=None,\n config=None, pos=None, **extra):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n SimpleElementWithContent.__init__(self)\n\n self.ID = (resolve_id(ID, id, config, pos) or\n xmlutil.fix_id(name, config, pos))\n self.name = name\n self.value = value\n self.xtype = xtype\n self.ref = ref\n self.unit = unit\n self.ucd = ucd\n self.utype = utype\n\n if config.get('version_1_2_or_later'):\n self._attr_list = self._attr_list_12\n else:\n self._attr_list = self._attr_list_11\n if xtype is not None:\n warn_unknown_attrs('INFO', ['xtype'], config, pos)\n if ref is not None:\n warn_unknown_attrs('INFO', ['ref'], config, pos)\n if unit is not None:\n warn_unknown_attrs('INFO', ['unit'], config, pos)\n if ucd is not None:\n warn_unknown_attrs('INFO', ['ucd'], config, pos)\n if utype is not None:\n warn_unknown_attrs('INFO', ['utype'], config, pos)\n\n warn_unknown_attrs('INFO', extra.keys(), config, pos)\n\n @property\n def name(self):\n \"\"\"[*required*] The key of the key-value pair.\"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n if name is None:\n warn_or_raise(W35, W35, ('name'), self._config, self._pos)\n xmlutil.check_token(name, 'name', self._config, self._pos)\n self._name = name\n\n @property\n def value(self):\n \"\"\"\n [*required*] The value of the key-value pair. (Always stored\n as a string or unicode string).\n \"\"\"\n return self._value\n\n @value.setter\n def value(self, value):\n if value is None:\n warn_or_raise(W35, W35, ('value'), self._config, self._pos)\n check_string(value, 'value', self._config, self._pos)\n self._value = value\n\n @property\n def content(self):\n \"\"\"The content inside the INFO element.\"\"\"\n return self._content\n\n @content.setter\n def content(self, content):\n check_string(content, 'content', self._config, self._pos)\n self._content = content\n\n @content.deleter\n def content(self):\n self._content = None\n\n @property\n def ref(self):\n \"\"\"\n Refer to another INFO_ element by ID_, defined previously in\n the document.\n \"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n if ref is not None and not self._config.get('version_1_2_or_later'):\n warn_or_raise(W28, W28, ('ref', 'INFO', '1.2'),\n self._config, self._pos)\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n # TODO: actually apply the reference\n # if ref is not None:\n # try:\n # other = self._votable.get_values_by_id(ref, before=self)\n # except KeyError:\n # vo_raise(\n # \"VALUES ref='%s', which has not already been defined.\" %\n # self.ref, self._config, self._pos, KeyError)\n # self.null = other.null\n # self.type = other.type\n # self.min = other.min\n # self.min_inclusive = other.min_inclusive\n # self.max = other.max\n # self.max_inclusive = other.max_inclusive\n # self._options[:] = other.options\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n @property\n def unit(self):\n \"\"\"A string specifying the units_ for the INFO_.\"\"\"\n return self._unit\n\n @unit.setter\n def unit(self, unit):\n if unit is None:\n self._unit = None\n return\n\n from astropy import units as u\n\n if not self._config.get('version_1_2_or_later'):\n warn_or_raise(W28, W28, ('unit', 'INFO', '1.2'),\n self._config, self._pos)\n\n # First, parse the unit in the default way, so that we can\n # still emit a warning if the unit is not to spec.\n default_format = _get_default_unit_format(self._config)\n unit_obj = u.Unit(\n unit, format=default_format, parse_strict='silent')\n if isinstance(unit_obj, u.UnrecognizedUnit):\n warn_or_raise(W50, W50, (unit,),\n self._config, self._pos)\n\n format = _get_unit_format(self._config)\n if format != default_format:\n unit_obj = u.Unit(\n unit, format=format, parse_strict='silent')\n\n self._unit = unit_obj\n\n @unit.deleter\n def unit(self):\n self._unit = None\n\n def to_xml(self, w, **kwargs):\n attrib = w.object_attrs(self, self._attr_list)\n if 'unit' in attrib:\n attrib['unit'] = self.unit.to_string('cds')\n w.element(self._element_name, self._content,\n attrib=attrib)\n\n\nclass Values(Element, _IDProperty):\n \"\"\"\n VALUES_ element: used within FIELD_ and PARAM_ elements to define the domain of values.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n\n def __init__(self, votable, field, ID=None, null=None, ref=None,\n type=\"legal\", id=None, config=None, pos=None, **extras):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n Element.__init__(self)\n\n self._votable = votable\n self._field = field\n self.ID = resolve_id(ID, id, config, pos)\n self.null = null\n self._ref = ref\n self.type = type\n\n self.min = None\n self.max = None\n self.min_inclusive = True\n self.max_inclusive = True\n self._options = []\n\n warn_unknown_attrs('VALUES', extras.keys(), config, pos)\n\n def __repr__(self):\n buff = io.StringIO()\n self.to_xml(XMLWriter(buff))\n return buff.getvalue().strip()\n\n @property\n def null(self):\n \"\"\"\n For integral datatypes, *null* is used to define the value\n used for missing values.\n \"\"\"\n return self._null\n\n @null.setter\n def null(self, null):\n if null is not None and isinstance(null, str):\n try:\n null_val = self._field.converter.parse_scalar(\n null, self._config, self._pos)[0]\n except Exception:\n warn_or_raise(W36, W36, null, self._config, self._pos)\n null_val = self._field.converter.parse_scalar(\n '0', self._config, self._pos)[0]\n else:\n null_val = null\n self._null = null_val\n\n @null.deleter\n def null(self):\n self._null = None\n\n @property\n def type(self):\n \"\"\"\n [*required*] Defines the applicability of the domain defined\n by this VALUES_ element. Must be one of the following\n strings:\n\n - 'legal': The domain of this column applies in general to\n this datatype. (default)\n\n - 'actual': The domain of this column applies only to the\n data enclosed in the parent table.\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type):\n if type not in ('legal', 'actual'):\n vo_raise(E08, type, self._config, self._pos)\n self._type = type\n\n @property\n def ref(self):\n \"\"\"\n Refer to another VALUES_ element by ID_, defined previously in\n the document, for MIN/MAX/OPTION information.\n \"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n if ref is not None:\n try:\n other = self._votable.get_values_by_id(ref, before=self)\n except KeyError:\n warn_or_raise(W43, W43, ('VALUES', self.ref), self._config,\n self._pos)\n ref = None\n else:\n self.null = other.null\n self.type = other.type\n self.min = other.min\n self.min_inclusive = other.min_inclusive\n self.max = other.max\n self.max_inclusive = other.max_inclusive\n self._options[:] = other.options\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n @property\n def min(self):\n \"\"\"\n The minimum value of the domain. See :attr:`min_inclusive`.\n \"\"\"\n return self._min\n\n @min.setter\n def min(self, min):\n if hasattr(self._field, 'converter') and min is not None:\n self._min = self._field.converter.parse(min)[0]\n else:\n self._min = min\n\n @min.deleter\n def min(self):\n self._min = None\n\n @property\n def min_inclusive(self):\n \"\"\"When `True`, the domain includes the minimum value.\"\"\"\n return self._min_inclusive\n\n @min_inclusive.setter\n def min_inclusive(self, inclusive):\n if inclusive == 'yes':\n self._min_inclusive = True\n elif inclusive == 'no':\n self._min_inclusive = False\n else:\n self._min_inclusive = bool(inclusive)\n\n @min_inclusive.deleter\n def min_inclusive(self):\n self._min_inclusive = True\n\n @property\n def max(self):\n \"\"\"\n The maximum value of the domain. See :attr:`max_inclusive`.\n \"\"\"\n return self._max\n\n @max.setter\n def max(self, max):\n if hasattr(self._field, 'converter') and max is not None:\n self._max = self._field.converter.parse(max)[0]\n else:\n self._max = max\n\n @max.deleter\n def max(self):\n self._max = None\n\n @property\n def max_inclusive(self):\n \"\"\"When `True`, the domain includes the maximum value.\"\"\"\n return self._max_inclusive\n\n @max_inclusive.setter\n def max_inclusive(self, inclusive):\n if inclusive == 'yes':\n self._max_inclusive = True\n elif inclusive == 'no':\n self._max_inclusive = False\n else:\n self._max_inclusive = bool(inclusive)\n\n @max_inclusive.deleter\n def max_inclusive(self):\n self._max_inclusive = True\n\n @property\n def options(self):\n \"\"\"\n A list of string key-value tuples defining other OPTION\n elements for the domain. All options are ignored -- they are\n stored for round-tripping purposes only.\n \"\"\"\n return self._options\n\n def parse(self, iterator, config):\n if self.ref is not None:\n for start, tag, data, pos in iterator:\n if start:\n warn_or_raise(W44, W44, tag, config, pos)\n else:\n if tag != 'VALUES':\n warn_or_raise(W44, W44, tag, config, pos)\n break\n else:\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'MIN':\n if 'value' not in data:\n vo_raise(E09, 'MIN', config, pos)\n self.min = data['value']\n self.min_inclusive = data.get('inclusive', 'yes')\n warn_unknown_attrs(\n 'MIN', data.keys(), config, pos,\n ['value', 'inclusive'])\n elif tag == 'MAX':\n if 'value' not in data:\n vo_raise(E09, 'MAX', config, pos)\n self.max = data['value']\n self.max_inclusive = data.get('inclusive', 'yes')\n warn_unknown_attrs(\n 'MAX', data.keys(), config, pos,\n ['value', 'inclusive'])\n elif tag == 'OPTION':\n if 'value' not in data:\n vo_raise(E09, 'OPTION', config, pos)\n xmlutil.check_token(\n data.get('name'), 'name', config, pos)\n self.options.append(\n (data.get('name'), data.get('value')))\n warn_unknown_attrs(\n 'OPTION', data.keys(), config, pos,\n ['value', 'name'])\n elif tag == 'VALUES':\n break\n\n return self\n\n def is_defaults(self):\n \"\"\"\n Are the settings on this ``VALUE`` element all the same as the\n XML defaults?\n \"\"\"\n # If there's nothing meaningful or non-default to write,\n # don't write anything.\n return (self.ref is None and self.null is None and self.ID is None and\n self.max is None and self.min is None and self.options == [])\n\n def to_xml(self, w, **kwargs):\n def yes_no(value):\n if value:\n return 'yes'\n return 'no'\n\n if self.is_defaults():\n return\n\n if self.ref is not None:\n w.element('VALUES', attrib=w.object_attrs(self, ['ref']))\n else:\n with w.tag('VALUES',\n attrib=w.object_attrs(\n self, ['ID', 'null', 'ref'])):\n if self.min is not None:\n w.element(\n 'MIN',\n value=self._field.converter.output(self.min, False),\n inclusive=yes_no(self.min_inclusive))\n if self.max is not None:\n w.element(\n 'MAX',\n value=self._field.converter.output(self.max, False),\n inclusive=yes_no(self.max_inclusive))\n for name, value in self.options:\n w.element(\n 'OPTION',\n name=name,\n value=value)\n\n def to_table_column(self, column):\n # Have the ref filled in here\n meta = {}\n for key in ['ID', 'null']:\n val = getattr(self, key, None)\n if val is not None:\n meta[key] = val\n if self.min is not None:\n meta['min'] = {\n 'value': self.min,\n 'inclusive': self.min_inclusive}\n if self.max is not None:\n meta['max'] = {\n 'value': self.max,\n 'inclusive': self.max_inclusive}\n if len(self.options):\n meta['options'] = dict(self.options)\n\n column.meta['values'] = meta\n\n def from_table_column(self, column):\n if column.info.meta is None or 'values' not in column.info.meta:\n return\n\n meta = column.info.meta['values']\n for key in ['ID', 'null']:\n val = meta.get(key, None)\n if val is not None:\n setattr(self, key, val)\n if 'min' in meta:\n self.min = meta['min']['value']\n self.min_inclusive = meta['min']['inclusive']\n if 'max' in meta:\n self.max = meta['max']['value']\n self.max_inclusive = meta['max']['inclusive']\n if 'options' in meta:\n self._options = list(meta['options'].items())\n\n\nclass Field(SimpleElement, _IDProperty, _NameProperty, _XtypeProperty,\n _UtypeProperty, _UcdProperty):\n \"\"\"\n FIELD_ element: describes the datatype of a particular column of data.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n\n If *ID* is provided, it is used for the column name in the\n resulting recarray of the table. If no *ID* is provided, *name*\n is used instead. If neither is provided, an exception will be\n raised.\n \"\"\"\n _attr_list_11 = ['ID', 'name', 'datatype', 'arraysize', 'ucd',\n 'unit', 'width', 'precision', 'utype', 'ref']\n _attr_list_12 = _attr_list_11 + ['xtype']\n _element_name = 'FIELD'\n\n def __init__(self, votable, ID=None, name=None, datatype=None,\n arraysize=None, ucd=None, unit=None, width=None,\n precision=None, utype=None, ref=None, type=None, id=None,\n xtype=None,\n config=None, pos=None, **extra):\n if config is None:\n if hasattr(votable, '_get_version_checks'):\n config = votable._get_version_checks()\n else:\n config = {}\n self._config = config\n self._pos = pos\n\n SimpleElement.__init__(self)\n\n if config.get('version_1_2_or_later'):\n self._attr_list = self._attr_list_12\n else:\n self._attr_list = self._attr_list_11\n if xtype is not None:\n warn_unknown_attrs(self._element_name, ['xtype'], config, pos)\n\n # TODO: REMOVE ME ----------------------------------------\n # This is a terrible hack to support Simple Image Access\n # Protocol results from archive.noao.edu. It creates a field\n # for the coordinate projection type of type \"double\", which\n # actually contains character data. We have to hack the field\n # to store character data, or we can't read it in. A warning\n # will be raised when this happens.\n if (config.get('verify', 'ignore') != 'exception' and name == 'cprojection' and\n ID == 'cprojection' and ucd == 'VOX:WCS_CoordProjection' and\n datatype == 'double'):\n datatype = 'char'\n arraysize = '3'\n vo_warn(W40, (), config, pos)\n # ----------------------------------------\n\n self.description = None\n self._votable = votable\n\n self.ID = (resolve_id(ID, id, config, pos) or\n xmlutil.fix_id(name, config, pos))\n self.name = name\n if name is None:\n if (self._element_name == 'PARAM' and\n not config.get('version_1_1_or_later')):\n pass\n else:\n warn_or_raise(W15, W15, self._element_name, config, pos)\n self.name = self.ID\n\n if self._ID is None and name is None:\n vo_raise(W12, self._element_name, config, pos)\n\n datatype_mapping = {\n 'string': 'char',\n 'unicodeString': 'unicodeChar',\n 'int16': 'short',\n 'int32': 'int',\n 'int64': 'long',\n 'float32': 'float',\n 'float64': 'double',\n # The following appear in some Vizier tables\n 'unsignedInt': 'long',\n 'unsignedShort': 'int'\n }\n\n datatype_mapping.update(config.get('datatype_mapping', {}))\n\n if datatype in datatype_mapping:\n warn_or_raise(W13, W13, (datatype, datatype_mapping[datatype]),\n config, pos)\n datatype = datatype_mapping[datatype]\n\n self.ref = ref\n self.datatype = datatype\n self.arraysize = arraysize\n self.ucd = ucd\n self.unit = unit\n self.width = width\n self.precision = precision\n self.utype = utype\n self.type = type\n self._links = HomogeneousList(Link)\n self.title = self.name\n self.values = Values(self._votable, self)\n self.xtype = xtype\n\n self._setup(config, pos)\n\n warn_unknown_attrs(self._element_name, extra.keys(), config, pos)\n\n @classmethod\n def uniqify_names(cls, fields):\n \"\"\"\n Make sure that all names and titles in a list of fields are\n unique, by appending numbers if necessary.\n \"\"\"\n unique = {}\n for field in fields:\n i = 2\n new_id = field.ID\n while new_id in unique:\n new_id = field.ID + f\"_{i:d}\"\n i += 1\n if new_id != field.ID:\n vo_warn(W32, (field.ID, new_id), field._config, field._pos)\n field.ID = new_id\n unique[new_id] = field.ID\n\n for field in fields:\n i = 2\n if field.name is None:\n new_name = field.ID\n implicit = True\n else:\n new_name = field.name\n implicit = False\n if new_name != field.ID:\n while new_name in unique:\n new_name = field.name + f\" {i:d}\"\n i += 1\n\n if (not implicit and\n new_name != field.name):\n vo_warn(W33, (field.name, new_name), field._config, field._pos)\n field._unique_name = new_name\n unique[new_name] = field.name\n\n def _setup(self, config, pos):\n if self.values._ref is not None:\n self.values.ref = self.values._ref\n self.converter = converters.get_converter(self, config, pos)\n\n @property\n def datatype(self):\n \"\"\"\n [*required*] The datatype of the column. Valid values (as\n defined by the spec) are:\n\n 'boolean', 'bit', 'unsignedByte', 'short', 'int', 'long',\n 'char', 'unicodeChar', 'float', 'double', 'floatComplex', or\n 'doubleComplex'\n\n Many VOTABLE files in the wild use 'string' instead of 'char',\n so that is also a valid option, though 'string' will always be\n converted to 'char' when writing the file back out.\n \"\"\"\n return self._datatype\n\n @datatype.setter\n def datatype(self, datatype):\n if datatype is None:\n if self._config.get('version_1_1_or_later'):\n warn_or_raise(E10, E10, self._element_name, self._config,\n self._pos)\n datatype = 'char'\n if datatype not in converters.converter_mapping:\n vo_raise(E06, (datatype, self.ID), self._config, self._pos)\n self._datatype = datatype\n\n @property\n def precision(self):\n \"\"\"\n Along with :attr:`width`, defines the `numerical accuracy`_\n associated with the data. These values are used to limit the\n precision when writing floating point values back to the XML\n file. Otherwise, it is purely informational -- the Numpy\n recarray containing the data itself does not use this\n information.\n \"\"\"\n return self._precision\n\n @precision.setter\n def precision(self, precision):\n if precision is not None and not re.match(r\"^[FE]?[0-9]+$\", precision):\n vo_raise(E11, precision, self._config, self._pos)\n self._precision = precision\n\n @precision.deleter\n def precision(self):\n self._precision = None\n\n @property\n def width(self):\n \"\"\"\n Along with :attr:`precision`, defines the `numerical\n accuracy`_ associated with the data. These values are used to\n limit the precision when writing floating point values back to\n the XML file. Otherwise, it is purely informational -- the\n Numpy recarray containing the data itself does not use this\n information.\n \"\"\"\n return self._width\n\n @width.setter\n def width(self, width):\n if width is not None:\n width = int(width)\n if width <= 0:\n vo_raise(E12, width, self._config, self._pos)\n self._width = width\n\n @width.deleter\n def width(self):\n self._width = None\n\n # ref on FIELD and PARAM behave differently than elsewhere -- here\n # they're just informational, such as to refer to a coordinate\n # system.\n @property\n def ref(self):\n \"\"\"\n On FIELD_ elements, ref is used only for informational\n purposes, for example to refer to a COOSYS_ or TIMESYS_ element.\n \"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n @property\n def unit(self):\n \"\"\"A string specifying the units_ for the FIELD_.\"\"\"\n return self._unit\n\n @unit.setter\n def unit(self, unit):\n if unit is None:\n self._unit = None\n return\n\n from astropy import units as u\n\n # First, parse the unit in the default way, so that we can\n # still emit a warning if the unit is not to spec.\n default_format = _get_default_unit_format(self._config)\n unit_obj = u.Unit(\n unit, format=default_format, parse_strict='silent')\n if isinstance(unit_obj, u.UnrecognizedUnit):\n warn_or_raise(W50, W50, (unit,),\n self._config, self._pos)\n\n format = _get_unit_format(self._config)\n if format != default_format:\n unit_obj = u.Unit(\n unit, format=format, parse_strict='silent')\n\n self._unit = unit_obj\n\n @unit.deleter\n def unit(self):\n self._unit = None\n\n @property\n def arraysize(self):\n \"\"\"\n Specifies the size of the multidimensional array if this\n FIELD_ contains more than a single value.\n\n See `multidimensional arrays`_.\n \"\"\"\n return self._arraysize\n\n @arraysize.setter\n def arraysize(self, arraysize):\n if (arraysize is not None and\n not re.match(r\"^([0-9]+x)*[0-9]*[*]?(s\\W)?$\", arraysize)):\n vo_raise(E13, arraysize, self._config, self._pos)\n self._arraysize = arraysize\n\n @arraysize.deleter\n def arraysize(self):\n self._arraysize = None\n\n @property\n def type(self):\n \"\"\"\n The type attribute on FIELD_ elements is reserved for future\n extensions.\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type):\n self._type = type\n\n @type.deleter\n def type(self):\n self._type = None\n\n @property\n def values(self):\n \"\"\"\n A :class:`Values` instance (or `None`) defining the domain\n of the column.\n \"\"\"\n return self._values\n\n @values.setter\n def values(self, values):\n assert values is None or isinstance(values, Values)\n self._values = values\n\n @values.deleter\n def values(self):\n self._values = None\n\n @property\n def links(self):\n \"\"\"\n A list of :class:`Link` instances used to reference more\n details about the meaning of the FIELD_. This is purely\n informational and is not used by the `astropy.io.votable`\n package.\n \"\"\"\n return self._links\n\n def parse(self, iterator, config):\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'VALUES':\n self.values.__init__(\n self._votable, self, config=config, pos=pos, **data)\n self.values.parse(iterator, config)\n elif tag == 'LINK':\n link = Link(config=config, pos=pos, **data)\n self.links.append(link)\n link.parse(iterator, config)\n elif tag == 'DESCRIPTION':\n warn_unknown_attrs(\n 'DESCRIPTION', data.keys(), config, pos)\n elif tag != self._element_name:\n self._add_unknown_tag(iterator, tag, data, config, pos)\n else:\n if tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(\n W17, W17, self._element_name, config, pos)\n self.description = data or None\n elif tag == self._element_name:\n break\n\n if self.description is not None:\n self.title = \" \".join(x.strip() for x in\n self.description.splitlines())\n else:\n self.title = self.name\n\n self._setup(config, pos)\n\n return self\n\n def to_xml(self, w, **kwargs):\n attrib = w.object_attrs(self, self._attr_list)\n if 'unit' in attrib:\n attrib['unit'] = self.unit.to_string('cds')\n with w.tag(self._element_name, attrib=attrib):\n if self.description is not None:\n w.element('DESCRIPTION', self.description, wrap=True)\n if not self.values.is_defaults():\n self.values.to_xml(w, **kwargs)\n for link in self.links:\n link.to_xml(w, **kwargs)\n\n def to_table_column(self, column):\n \"\"\"\n Sets the attributes of a given `astropy.table.Column` instance\n to match the information in this `Field`.\n \"\"\"\n for key in ['ucd', 'width', 'precision', 'utype', 'xtype']:\n val = getattr(self, key, None)\n if val is not None:\n column.meta[key] = val\n if not self.values.is_defaults():\n self.values.to_table_column(column)\n for link in self.links:\n link.to_table_column(column)\n if self.description is not None:\n column.description = self.description\n if self.unit is not None:\n # TODO: Use units framework when it's available\n column.unit = self.unit\n if (isinstance(self.converter, converters.FloatingPoint) and\n self.converter.output_format != '{!r:>}'):\n column.format = self.converter.output_format\n elif isinstance(self.converter, converters.Char):\n column.info.meta['_votable_string_dtype'] = 'char'\n elif isinstance(self.converter, converters.UnicodeChar):\n column.info.meta['_votable_string_dtype'] = 'unicodeChar'\n\n @classmethod\n def from_table_column(cls, votable, column):\n \"\"\"\n Restores a `Field` instance from a given\n `astropy.table.Column` instance.\n \"\"\"\n kwargs = {}\n meta = column.info.meta\n if meta:\n for key in ['ucd', 'width', 'precision', 'utype', 'xtype']:\n val = meta.get(key, None)\n if val is not None:\n kwargs[key] = val\n # TODO: Use the unit framework when available\n if column.info.unit is not None:\n kwargs['unit'] = column.info.unit\n kwargs['name'] = column.info.name\n result = converters.table_column_to_votable_datatype(column)\n kwargs.update(result)\n\n field = cls(votable, **kwargs)\n\n if column.info.description is not None:\n field.description = column.info.description\n field.values.from_table_column(column)\n if meta and 'links' in meta:\n for link in meta['links']:\n field.links.append(Link.from_table_column(link))\n\n # TODO: Parse format into precision and width\n return field\n\n\nclass Param(Field):\n \"\"\"\n PARAM_ element: constant-valued columns in the data.\n\n :class:`Param` objects are a subclass of :class:`Field`, and have\n all of its methods and members. Additionally, it defines :attr:`value`.\n \"\"\"\n _attr_list_11 = Field._attr_list_11 + ['value']\n _attr_list_12 = Field._attr_list_12 + ['value']\n _element_name = 'PARAM'\n\n def __init__(self, votable, ID=None, name=None, value=None, datatype=None,\n arraysize=None, ucd=None, unit=None, width=None,\n precision=None, utype=None, type=None, id=None, config=None,\n pos=None, **extra):\n self._value = value\n Field.__init__(self, votable, ID=ID, name=name, datatype=datatype,\n arraysize=arraysize, ucd=ucd, unit=unit,\n precision=precision, utype=utype, type=type,\n id=id, config=config, pos=pos, **extra)\n\n @property\n def value(self):\n \"\"\"\n [*required*] The constant value of the parameter. Its type is\n determined by the :attr:`~Field.datatype` member.\n \"\"\"\n return self._value\n\n @value.setter\n def value(self, value):\n if value is None:\n value = \"\"\n if isinstance(value, str):\n self._value = self.converter.parse(\n value, self._config, self._pos)[0]\n else:\n self._value = value\n\n def _setup(self, config, pos):\n Field._setup(self, config, pos)\n self.value = self._value\n\n def to_xml(self, w, **kwargs):\n tmp_value = self._value\n self._value = self.converter.output(tmp_value, False)\n # We must always have a value\n if self._value is None:\n self._value = \"\"\n Field.to_xml(self, w, **kwargs)\n self._value = tmp_value\n\n\nclass CooSys(SimpleElement):\n \"\"\"\n COOSYS_ element: defines a coordinate system.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n _attr_list = ['ID', 'equinox', 'epoch', 'system']\n _element_name = 'COOSYS'\n\n def __init__(self, ID=None, equinox=None, epoch=None, system=None, id=None,\n config=None, pos=None, **extra):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n # COOSYS was deprecated in 1.2 but then re-instated in 1.3\n if (config.get('version_1_2_or_later') and\n not config.get('version_1_3_or_later')):\n warn_or_raise(W27, W27, (), config, pos)\n\n SimpleElement.__init__(self)\n\n self.ID = resolve_id(ID, id, config, pos)\n self.equinox = equinox\n self.epoch = epoch\n self.system = system\n\n warn_unknown_attrs('COOSYS', extra.keys(), config, pos)\n\n @property\n def ID(self):\n \"\"\"\n [*required*] The XML ID of the COOSYS_ element, used for\n cross-referencing. May be `None` or a string conforming to\n XML ID_ syntax.\n \"\"\"\n return self._ID\n\n @ID.setter\n def ID(self, ID):\n if self._config.get('version_1_1_or_later'):\n if ID is None:\n vo_raise(E15, (), self._config, self._pos)\n xmlutil.check_id(ID, 'ID', self._config, self._pos)\n self._ID = ID\n\n @property\n def system(self):\n \"\"\"\n Specifies the type of coordinate system. Valid choices are:\n\n 'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',\n 'supergalactic', 'xy', 'barycentric', or 'geo_app'\n \"\"\"\n return self._system\n\n @system.setter\n def system(self, system):\n if system not in ('eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5',\n 'galactic', 'supergalactic', 'xy', 'barycentric',\n 'geo_app'):\n warn_or_raise(E16, E16, system, self._config, self._pos)\n self._system = system\n\n @system.deleter\n def system(self):\n self._system = None\n\n @property\n def equinox(self):\n \"\"\"\n A parameter required to fix the equatorial or ecliptic systems\n (as e.g. \"J2000\" as the default \"eq_FK5\" or \"B1950\" as the\n default \"eq_FK4\").\n \"\"\"\n return self._equinox\n\n @equinox.setter\n def equinox(self, equinox):\n check_astroyear(equinox, 'equinox', self._config, self._pos)\n self._equinox = equinox\n\n @equinox.deleter\n def equinox(self):\n self._equinox = None\n\n @property\n def epoch(self):\n \"\"\"\n Specifies the epoch of the positions. It must be a string\n specifying an astronomical year.\n \"\"\"\n return self._epoch\n\n @epoch.setter\n def epoch(self, epoch):\n check_astroyear(epoch, 'epoch', self._config, self._pos)\n self._epoch = epoch\n\n @epoch.deleter\n def epoch(self):\n self._epoch = None\n\n\nclass TimeSys(SimpleElement):\n \"\"\"\n TIMESYS_ element: defines a time system.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n _attr_list = ['ID', 'timeorigin', 'timescale', 'refposition']\n _element_name = 'TIMESYS'\n\n def __init__(self, ID=None, timeorigin=None, timescale=None, refposition=None, id=None,\n config=None, pos=None, **extra):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n # TIMESYS is supported starting in version 1.4\n if not config['version_1_4_or_later']:\n warn_or_raise(\n W54, W54, config['version'], config, pos)\n\n SimpleElement.__init__(self)\n\n self.ID = resolve_id(ID, id, config, pos)\n self.timeorigin = timeorigin\n self.timescale = timescale\n self.refposition = refposition\n\n warn_unknown_attrs('TIMESYS', extra.keys(), config, pos,\n ['ID', 'timeorigin', 'timescale', 'refposition'])\n\n @property\n def ID(self):\n \"\"\"\n [*required*] The XML ID of the TIMESYS_ element, used for\n cross-referencing. Must be a string conforming to\n XML ID_ syntax.\n \"\"\"\n return self._ID\n\n @ID.setter\n def ID(self, ID):\n if ID is None:\n vo_raise(E22, (), self._config, self._pos)\n xmlutil.check_id(ID, 'ID', self._config, self._pos)\n self._ID = ID\n\n @property\n def timeorigin(self):\n \"\"\"\n Specifies the time origin of the time coordinate,\n given as a Julian Date for the the time scale and\n reference point defined. It is usually given as a\n floating point literal; for convenience, the magic\n strings \"MJD-origin\" (standing for 2400000.5) and\n \"JD-origin\" (standing for 0) are also allowed.\n\n The timeorigin attribute MUST be given unless the\n time’s representation contains a year of a calendar\n era, in which case it MUST NOT be present. In VOTables,\n these representations currently are Gregorian calendar\n years with xtype=\"timestamp\", or years in the Julian\n or Besselian calendar when a column has yr, a, or Ba as\n its unit and no time origin is given.\n \"\"\"\n return self._timeorigin\n\n @timeorigin.setter\n def timeorigin(self, timeorigin):\n if (timeorigin is not None and\n timeorigin != 'MJD-origin' and timeorigin != 'JD-origin'):\n try:\n timeorigin = float(timeorigin)\n except ValueError:\n warn_or_raise(E23, E23, timeorigin, self._config, self._pos)\n self._timeorigin = timeorigin\n\n @timeorigin.deleter\n def timeorigin(self):\n self._timeorigin = None\n\n @property\n def timescale(self):\n \"\"\"\n [*required*] String specifying the time scale used. Values\n should be taken from the IVOA timescale vocabulary (documented\n at http://www.ivoa.net/rdf/timescale).\n \"\"\"\n return self._timescale\n\n @timescale.setter\n def timescale(self, timescale):\n self._timescale = timescale\n\n @timescale.deleter\n def timescale(self):\n self._timescale = None\n\n @property\n def refposition(self):\n \"\"\"\n [*required*] String specifying the reference position. Values\n should be taken from the IVOA refposition vocabulary (documented\n at http://www.ivoa.net/rdf/refposition).\n \"\"\"\n return self._refposition\n\n @refposition.setter\n def refposition(self, refposition):\n self._refposition = refposition\n\n @refposition.deleter\n def refposition(self):\n self._refposition = None\n\n\nclass FieldRef(SimpleElement, _UtypeProperty, _UcdProperty):\n \"\"\"\n FIELDref_ element: used inside of GROUP_ elements to refer to remote FIELD_ elements.\n \"\"\"\n _attr_list_11 = ['ref']\n _attr_list_12 = _attr_list_11 + ['ucd', 'utype']\n _element_name = \"FIELDref\"\n _utype_in_v1_2 = True\n _ucd_in_v1_2 = True\n\n def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None,\n **extra):\n \"\"\"\n *table* is the :class:`Table` object that this :class:`FieldRef`\n is a member of.\n\n *ref* is the ID to reference a :class:`Field` object defined\n elsewhere.\n \"\"\"\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n SimpleElement.__init__(self)\n self._table = table\n self.ref = ref\n self.ucd = ucd\n self.utype = utype\n\n if config.get('version_1_2_or_later'):\n self._attr_list = self._attr_list_12\n else:\n self._attr_list = self._attr_list_11\n if ucd is not None:\n warn_unknown_attrs(self._element_name, ['ucd'], config, pos)\n if utype is not None:\n warn_unknown_attrs(self._element_name, ['utype'], config, pos)\n\n @property\n def ref(self):\n \"\"\"The ID_ of the FIELD_ that this FIELDref_ references.\"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n def get_ref(self):\n \"\"\"\n Lookup the :class:`Field` instance that this :class:`FieldRef`\n references.\n \"\"\"\n for field in self._table._votable.iter_fields_and_params():\n if isinstance(field, Field) and field.ID == self.ref:\n return field\n vo_raise(\n f\"No field named '{self.ref}'\",\n self._config, self._pos, KeyError)\n\n\nclass ParamRef(SimpleElement, _UtypeProperty, _UcdProperty):\n \"\"\"\n PARAMref_ element: used inside of GROUP_ elements to refer to remote PARAM_ elements.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n\n It contains the following publicly-accessible members:\n\n *ref*: An XML ID referring to a <PARAM> element.\n \"\"\"\n _attr_list_11 = ['ref']\n _attr_list_12 = _attr_list_11 + ['ucd', 'utype']\n _element_name = \"PARAMref\"\n _utype_in_v1_2 = True\n _ucd_in_v1_2 = True\n\n def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None):\n if config is None:\n config = {}\n\n self._config = config\n self._pos = pos\n\n Element.__init__(self)\n self._table = table\n self.ref = ref\n self.ucd = ucd\n self.utype = utype\n\n if config.get('version_1_2_or_later'):\n self._attr_list = self._attr_list_12\n else:\n self._attr_list = self._attr_list_11\n if ucd is not None:\n warn_unknown_attrs(self._element_name, ['ucd'], config, pos)\n if utype is not None:\n warn_unknown_attrs(self._element_name, ['utype'], config, pos)\n\n @property\n def ref(self):\n \"\"\"The ID_ of the PARAM_ that this PARAMref_ references.\"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n def get_ref(self):\n \"\"\"\n Lookup the :class:`Param` instance that this :class:``PARAMref``\n references.\n \"\"\"\n for param in self._table._votable.iter_fields_and_params():\n if isinstance(param, Param) and param.ID == self.ref:\n return param\n vo_raise(\n f\"No params named '{self.ref}'\",\n self._config, self._pos, KeyError)\n\n\nclass Group(Element, _IDProperty, _NameProperty, _UtypeProperty,\n _UcdProperty, _DescriptionProperty):\n \"\"\"\n GROUP_ element: groups FIELD_ and PARAM_ elements.\n\n This information is currently ignored by the vo package---that is\n the columns in the recarray are always flat---but the grouping\n information is stored so that it can be written out again to the\n XML file.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n\n def __init__(self, table, ID=None, name=None, ref=None, ucd=None,\n utype=None, id=None, config=None, pos=None, **extra):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n Element.__init__(self)\n self._table = table\n\n self.ID = (resolve_id(ID, id, config, pos)\n or xmlutil.fix_id(name, config, pos))\n self.name = name\n self.ref = ref\n self.ucd = ucd\n self.utype = utype\n self.description = None\n\n self._entries = HomogeneousList(\n (FieldRef, ParamRef, Group, Param))\n\n warn_unknown_attrs('GROUP', extra.keys(), config, pos)\n\n def __repr__(self):\n return f'<GROUP>... {len(self._entries)} entries ...</GROUP>'\n\n @property\n def ref(self):\n \"\"\"\n Currently ignored, as it's not clear from the spec how this is\n meant to work.\n \"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n @property\n def entries(self):\n \"\"\"\n [read-only] A list of members of the GROUP_. This list may\n only contain objects of type :class:`Param`, :class:`Group`,\n :class:`ParamRef` and :class:`FieldRef`.\n \"\"\"\n return self._entries\n\n def _add_fieldref(self, iterator, tag, data, config, pos):\n fieldref = FieldRef(self._table, config=config, pos=pos, **data)\n self.entries.append(fieldref)\n\n def _add_paramref(self, iterator, tag, data, config, pos):\n paramref = ParamRef(self._table, config=config, pos=pos, **data)\n self.entries.append(paramref)\n\n def _add_param(self, iterator, tag, data, config, pos):\n if isinstance(self._table, VOTableFile):\n votable = self._table\n else:\n votable = self._table._votable\n param = Param(votable, config=config, pos=pos, **data)\n self.entries.append(param)\n param.parse(iterator, config)\n\n def _add_group(self, iterator, tag, data, config, pos):\n group = Group(self._table, config=config, pos=pos, **data)\n self.entries.append(group)\n group.parse(iterator, config)\n\n def parse(self, iterator, config):\n tag_mapping = {\n 'FIELDref': self._add_fieldref,\n 'PARAMref': self._add_paramref,\n 'PARAM': self._add_param,\n 'GROUP': self._add_group,\n 'DESCRIPTION': self._ignore_add}\n\n for start, tag, data, pos in iterator:\n if start:\n tag_mapping.get(tag, self._add_unknown_tag)(\n iterator, tag, data, config, pos)\n else:\n if tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(W17, W17, 'GROUP', config, pos)\n self.description = data or None\n elif tag == 'GROUP':\n break\n return self\n\n def to_xml(self, w, **kwargs):\n with w.tag(\n 'GROUP',\n attrib=w.object_attrs(\n self, ['ID', 'name', 'ref', 'ucd', 'utype'])):\n if self.description is not None:\n w.element(\"DESCRIPTION\", self.description, wrap=True)\n for entry in self.entries:\n entry.to_xml(w, **kwargs)\n\n def iter_fields_and_params(self):\n \"\"\"\n Recursively iterate over all :class:`Param` elements in this\n :class:`Group`.\n \"\"\"\n for entry in self.entries:\n if isinstance(entry, Param):\n yield entry\n elif isinstance(entry, Group):\n for field in entry.iter_fields_and_params():\n yield field\n\n def iter_groups(self):\n \"\"\"\n Recursively iterate over all sub-:class:`Group` instances in\n this :class:`Group`.\n \"\"\"\n for entry in self.entries:\n if isinstance(entry, Group):\n yield entry\n for group in entry.iter_groups():\n yield group\n\n\nclass Table(Element, _IDProperty, _NameProperty, _UcdProperty,\n _DescriptionProperty):\n \"\"\"\n TABLE_ element: optionally contains data.\n\n It contains the following publicly-accessible and mutable\n attribute:\n\n *array*: A Numpy masked array of the data itself, where each\n row is a row of votable data, and columns are named and typed\n based on the <FIELD> elements of the table. The mask is\n parallel to the data array, except for variable-length fields.\n For those fields, the numpy array's column type is \"object\"\n (``\"O\"``), and another masked array is stored there.\n\n If the Table contains no data, (for example, its enclosing\n :class:`Resource` has :attr:`~Resource.type` == 'meta') *array*\n will have zero-length.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n\n def __init__(self, votable, ID=None, name=None, ref=None, ucd=None,\n utype=None, nrows=None, id=None, config=None, pos=None,\n **extra):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n self._empty = False\n\n Element.__init__(self)\n self._votable = votable\n\n self.ID = (resolve_id(ID, id, config, pos)\n or xmlutil.fix_id(name, config, pos))\n self.name = name\n xmlutil.check_id(ref, 'ref', config, pos)\n self._ref = ref\n self.ucd = ucd\n self.utype = utype\n if nrows is not None:\n nrows = int(nrows)\n if nrows < 0:\n raise ValueError(\"'nrows' cannot be negative.\")\n self._nrows = nrows\n self.description = None\n self.format = 'tabledata'\n\n self._fields = HomogeneousList(Field)\n self._params = HomogeneousList(Param)\n self._groups = HomogeneousList(Group)\n self._links = HomogeneousList(Link)\n self._infos = HomogeneousList(Info)\n\n self.array = ma.array([])\n\n warn_unknown_attrs('TABLE', extra.keys(), config, pos)\n\n def __repr__(self):\n return repr(self.to_table())\n\n def __bytes__(self):\n return bytes(self.to_table())\n\n def __str__(self):\n return str(self.to_table())\n\n @property\n def ref(self):\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n \"\"\"\n Refer to another TABLE, previously defined, by the *ref* ID_\n for all metadata (FIELD_, PARAM_ etc.) information.\n \"\"\"\n # When the ref changes, we want to verify that it will work\n # by actually going and looking for the referenced table.\n # If found, set a bunch of properties in this table based\n # on the other one.\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n if ref is not None:\n try:\n table = self._votable.get_table_by_id(ref, before=self)\n except KeyError:\n warn_or_raise(\n W43, W43, ('TABLE', self.ref), self._config, self._pos)\n ref = None\n else:\n self._fields = table.fields\n self._params = table.params\n self._groups = table.groups\n self._links = table.links\n else:\n del self._fields[:]\n del self._params[:]\n del self._groups[:]\n del self._links[:]\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n @property\n def format(self):\n \"\"\"\n [*required*] The serialization format of the table. Must be\n one of:\n\n 'tabledata' (TABLEDATA_), 'binary' (BINARY_), 'binary2' (BINARY2_)\n 'fits' (FITS_).\n\n Note that the 'fits' format, since it requires an external\n file, can not be written out. Any file read in with 'fits'\n format will be read out, by default, in 'tabledata' format.\n\n See :ref:`votable-serialization`.\n \"\"\"\n return self._format\n\n @format.setter\n def format(self, format):\n format = format.lower()\n if format == 'fits':\n vo_raise(\"fits format can not be written out, only read.\",\n self._config, self._pos, NotImplementedError)\n if format == 'binary2':\n if not self._config['version_1_3_or_later']:\n vo_raise(\n \"binary2 only supported in votable 1.3 or later\",\n self._config, self._pos)\n elif format not in ('tabledata', 'binary'):\n vo_raise(f\"Invalid format '{format}'\",\n self._config, self._pos)\n self._format = format\n\n @property\n def nrows(self):\n \"\"\"\n [*immutable*] The number of rows in the table, as specified in\n the XML file.\n \"\"\"\n return self._nrows\n\n @property\n def fields(self):\n \"\"\"\n A list of :class:`Field` objects describing the types of each\n of the data columns.\n \"\"\"\n return self._fields\n\n @property\n def params(self):\n \"\"\"\n A list of parameters (constant-valued columns) for the\n table. Must contain only :class:`Param` objects.\n \"\"\"\n return self._params\n\n @property\n def groups(self):\n \"\"\"\n A list of :class:`Group` objects describing how the columns\n and parameters are grouped. Currently this information is\n only kept around for round-tripping and informational\n purposes.\n \"\"\"\n return self._groups\n\n @property\n def links(self):\n \"\"\"\n A list of :class:`Link` objects (pointers to other documents\n or servers through a URI) for the table.\n \"\"\"\n return self._links\n\n @property\n def infos(self):\n \"\"\"\n A list of :class:`Info` objects for the table. Allows for\n post-operational diagnostics.\n \"\"\"\n return self._infos\n\n def is_empty(self):\n \"\"\"\n Returns True if this table doesn't contain any real data\n because it was skipped over by the parser (through use of the\n ``table_number`` kwarg).\n \"\"\"\n return self._empty\n\n def create_arrays(self, nrows=0, config=None):\n \"\"\"\n Create a new array to hold the data based on the current set\n of fields, and store them in the *array* and member variable.\n Any data in the existing array will be lost.\n\n *nrows*, if provided, is the number of rows to allocate.\n \"\"\"\n if nrows is None:\n nrows = 0\n\n fields = self.fields\n\n if len(fields) == 0:\n array = np.recarray((nrows,), dtype='O')\n mask = np.zeros((nrows,), dtype='b')\n else:\n # for field in fields: field._setup(config)\n Field.uniqify_names(fields)\n\n dtype = []\n for x in fields:\n if x._unique_name == x.ID:\n id = x.ID\n else:\n id = (x._unique_name, x.ID)\n dtype.append((id, x.converter.format))\n\n array = np.recarray((nrows,), dtype=np.dtype(dtype))\n descr_mask = []\n for d in array.dtype.descr:\n new_type = (d[1][1] == 'O' and 'O') or 'bool'\n if len(d) == 2:\n descr_mask.append((d[0], new_type))\n elif len(d) == 3:\n descr_mask.append((d[0], new_type, d[2]))\n mask = np.zeros((nrows,), dtype=descr_mask)\n\n self.array = ma.array(array, mask=mask)\n\n def _resize_strategy(self, size):\n \"\"\"\n Return a new (larger) size based on size, used for\n reallocating an array when it fills up. This is in its own\n function so the resizing strategy can be easily replaced.\n \"\"\"\n # Once we go beyond 0, make a big step -- after that use a\n # factor of 1.5 to help keep memory usage compact\n if size == 0:\n return 512\n return int(np.ceil(size * RESIZE_AMOUNT))\n\n def _add_field(self, iterator, tag, data, config, pos):\n field = Field(self._votable, config=config, pos=pos, **data)\n self.fields.append(field)\n field.parse(iterator, config)\n\n def _add_param(self, iterator, tag, data, config, pos):\n param = Param(self._votable, config=config, pos=pos, **data)\n self.params.append(param)\n param.parse(iterator, config)\n\n def _add_group(self, iterator, tag, data, config, pos):\n group = Group(self, config=config, pos=pos, **data)\n self.groups.append(group)\n group.parse(iterator, config)\n\n def _add_link(self, iterator, tag, data, config, pos):\n link = Link(config=config, pos=pos, **data)\n self.links.append(link)\n link.parse(iterator, config)\n\n def _add_info(self, iterator, tag, data, config, pos):\n if not config.get('version_1_2_or_later'):\n warn_or_raise(W26, W26, ('INFO', 'TABLE', '1.2'), config, pos)\n info = Info(config=config, pos=pos, **data)\n self.infos.append(info)\n info.parse(iterator, config)\n\n def parse(self, iterator, config):\n columns = config.get('columns')\n\n # If we've requested to read in only a specific table, skip\n # all others\n table_number = config.get('table_number')\n current_table_number = config.get('_current_table_number')\n skip_table = False\n if current_table_number is not None:\n config['_current_table_number'] += 1\n if (table_number is not None and\n table_number != current_table_number):\n skip_table = True\n self._empty = True\n\n table_id = config.get('table_id')\n if table_id is not None:\n if table_id != self.ID:\n skip_table = True\n self._empty = True\n\n if self.ref is not None:\n # This table doesn't have its own datatype descriptors, it\n # just references those from another table.\n\n # This is to call the property setter to go and get the\n # referenced information\n self.ref = self.ref\n\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'DATA':\n warn_unknown_attrs(\n 'DATA', data.keys(), config, pos)\n break\n else:\n if tag == 'TABLE':\n return self\n elif tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(W17, W17, 'RESOURCE', config, pos)\n self.description = data or None\n else:\n tag_mapping = {\n 'FIELD': self._add_field,\n 'PARAM': self._add_param,\n 'GROUP': self._add_group,\n 'LINK': self._add_link,\n 'INFO': self._add_info,\n 'DESCRIPTION': self._ignore_add}\n\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'DATA':\n if len(self.fields) == 0:\n warn_or_raise(E25, E25, None, config, pos)\n warn_unknown_attrs(\n 'DATA', data.keys(), config, pos)\n break\n\n tag_mapping.get(tag, self._add_unknown_tag)(\n iterator, tag, data, config, pos)\n else:\n if tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(W17, W17, 'RESOURCE', config, pos)\n self.description = data or None\n elif tag == 'TABLE':\n # For error checking purposes\n Field.uniqify_names(self.fields)\n # We still need to create arrays, even if the file\n # contains no DATA section\n self.create_arrays(nrows=0, config=config)\n return self\n\n self.create_arrays(nrows=self._nrows, config=config)\n fields = self.fields\n names = [x.ID for x in fields]\n # Deal with a subset of the columns, if requested.\n if not columns:\n colnumbers = list(range(len(fields)))\n else:\n if isinstance(columns, str):\n columns = [columns]\n columns = np.asarray(columns)\n if issubclass(columns.dtype.type, np.integer):\n if np.any(columns < 0) or np.any(columns > len(fields)):\n raise ValueError(\n \"Some specified column numbers out of range\")\n colnumbers = columns\n elif issubclass(columns.dtype.type, np.character):\n try:\n colnumbers = [names.index(x) for x in columns]\n except ValueError:\n raise ValueError(\n f\"Columns '{columns}' not found in fields list\")\n else:\n raise TypeError(\"Invalid columns list\")\n\n if (not skip_table) and (len(fields) > 0):\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'TABLEDATA':\n warn_unknown_attrs(\n 'TABLEDATA', data.keys(), config, pos)\n self.array = self._parse_tabledata(\n iterator, colnumbers, fields, config)\n break\n elif tag == 'BINARY':\n warn_unknown_attrs(\n 'BINARY', data.keys(), config, pos)\n self.array = self._parse_binary(\n 1, iterator, colnumbers, fields, config, pos)\n break\n elif tag == 'BINARY2':\n if not config['version_1_3_or_later']:\n warn_or_raise(\n W52, W52, config['version'], config, pos)\n self.array = self._parse_binary(\n 2, iterator, colnumbers, fields, config, pos)\n break\n elif tag == 'FITS':\n warn_unknown_attrs(\n 'FITS', data.keys(), config, pos, ['extnum'])\n try:\n extnum = int(data.get('extnum', 0))\n if extnum < 0:\n raise ValueError(\"'extnum' cannot be negative.\")\n except ValueError:\n vo_raise(E17, (), config, pos)\n self.array = self._parse_fits(\n iterator, extnum, config)\n break\n else:\n warn_or_raise(W37, W37, tag, config, pos)\n break\n\n for start, tag, data, pos in iterator:\n if not start and tag == 'DATA':\n break\n\n for start, tag, data, pos in iterator:\n if start and tag == 'INFO':\n if not config.get('version_1_2_or_later'):\n warn_or_raise(\n W26, W26, ('INFO', 'TABLE', '1.2'), config, pos)\n info = Info(config=config, pos=pos, **data)\n self.infos.append(info)\n info.parse(iterator, config)\n elif not start and tag == 'TABLE':\n break\n\n return self\n\n def _parse_tabledata(self, iterator, colnumbers, fields, config):\n # Since we don't know the number of rows up front, we'll\n # reallocate the record array to make room as we go. This\n # prevents the need to scan through the XML twice. The\n # allocation is by factors of 1.5.\n invalid = config.get('invalid', 'exception')\n\n # Need to have only one reference so that we can resize the\n # array\n array = self.array\n del self.array\n\n parsers = [field.converter.parse for field in fields]\n binparsers = [field.converter.binparse for field in fields]\n\n numrows = 0\n alloc_rows = len(array)\n colnumbers_bits = [i in colnumbers for i in range(len(fields))]\n row_default = [x.converter.default for x in fields]\n mask_default = [True] * len(fields)\n array_chunk = []\n mask_chunk = []\n chunk_size = config.get('chunk_size', DEFAULT_CHUNK_SIZE)\n for start, tag, data, pos in iterator:\n if tag == 'TR':\n # Now parse one row\n row = row_default[:]\n row_mask = mask_default[:]\n i = 0\n for start, tag, data, pos in iterator:\n if start:\n binary = (data.get('encoding', None) == 'base64')\n warn_unknown_attrs(\n tag, data.keys(), config, pos, ['encoding'])\n else:\n if tag == 'TD':\n if i >= len(fields):\n vo_raise(E20, len(fields), config, pos)\n\n if colnumbers_bits[i]:\n try:\n if binary:\n rawdata = base64.b64decode(\n data.encode('ascii'))\n buf = io.BytesIO(rawdata)\n buf.seek(0)\n try:\n value, mask_value = binparsers[i](\n buf.read)\n except Exception as e:\n vo_reraise(\n e, config, pos,\n \"(in row {:d}, col '{}')\".format(\n len(array_chunk),\n fields[i].ID))\n else:\n try:\n value, mask_value = parsers[i](\n data, config, pos)\n except Exception as e:\n vo_reraise(\n e, config, pos,\n \"(in row {:d}, col '{}')\".format(\n len(array_chunk),\n fields[i].ID))\n except Exception as e:\n if invalid == 'exception':\n vo_reraise(e, config, pos)\n else:\n row[i] = value\n row_mask[i] = mask_value\n elif tag == 'TR':\n break\n else:\n self._add_unknown_tag(\n iterator, tag, data, config, pos)\n i += 1\n\n if i < len(fields):\n vo_raise(E21, (i, len(fields)), config, pos)\n\n array_chunk.append(tuple(row))\n mask_chunk.append(tuple(row_mask))\n\n if len(array_chunk) == chunk_size:\n while numrows + chunk_size > alloc_rows:\n alloc_rows = self._resize_strategy(alloc_rows)\n if alloc_rows != len(array):\n array = _resize(array, alloc_rows)\n array[numrows:numrows + chunk_size] = array_chunk\n array.mask[numrows:numrows + chunk_size] = mask_chunk\n numrows += chunk_size\n array_chunk = []\n mask_chunk = []\n\n elif not start and tag == 'TABLEDATA':\n break\n\n # Now, resize the array to the exact number of rows we need and\n # put the last chunk values in there.\n alloc_rows = numrows + len(array_chunk)\n\n array = _resize(array, alloc_rows)\n array[numrows:] = array_chunk\n if alloc_rows != 0:\n array.mask[numrows:] = mask_chunk\n numrows += len(array_chunk)\n\n if (self.nrows is not None and\n self.nrows >= 0 and\n self.nrows != numrows):\n warn_or_raise(W18, W18, (self.nrows, numrows), config, pos)\n self._nrows = numrows\n\n return array\n\n def _get_binary_data_stream(self, iterator, config):\n have_local_stream = False\n for start, tag, data, pos in iterator:\n if tag == 'STREAM':\n if start:\n warn_unknown_attrs(\n 'STREAM', data.keys(), config, pos,\n ['type', 'href', 'actuate', 'encoding', 'expires',\n 'rights'])\n if 'href' not in data:\n have_local_stream = True\n if data.get('encoding', None) != 'base64':\n warn_or_raise(\n W38, W38, data.get('encoding', None),\n config, pos)\n else:\n href = data['href']\n xmlutil.check_anyuri(href, config, pos)\n encoding = data.get('encoding', None)\n else:\n buffer = data\n break\n\n if have_local_stream:\n buffer = base64.b64decode(buffer.encode('ascii'))\n string_io = io.BytesIO(buffer)\n string_io.seek(0)\n read = string_io.read\n else:\n if not href.startswith(('http', 'ftp', 'file')):\n vo_raise(\n \"The vo package only supports remote data through http, \" +\n \"ftp or file\",\n self._config, self._pos, NotImplementedError)\n fd = urllib.request.urlopen(href)\n if encoding is not None:\n if encoding == 'gzip':\n fd = gzip.GzipFile(href, 'rb', fileobj=fd)\n elif encoding == 'base64':\n fd = codecs.EncodedFile(fd, 'base64')\n else:\n vo_raise(\n f\"Unknown encoding type '{encoding}'\",\n self._config, self._pos, NotImplementedError)\n read = fd.read\n\n def careful_read(length):\n result = read(length)\n if len(result) != length:\n raise EOFError\n return result\n\n return careful_read\n\n def _parse_binary(self, mode, iterator, colnumbers, fields, config, pos):\n fields = self.fields\n\n careful_read = self._get_binary_data_stream(iterator, config)\n\n # Need to have only one reference so that we can resize the\n # array\n array = self.array\n del self.array\n\n binparsers = [field.converter.binparse for field in fields]\n\n numrows = 0\n alloc_rows = len(array)\n while True:\n # Resize result arrays if necessary\n if numrows >= alloc_rows:\n alloc_rows = self._resize_strategy(alloc_rows)\n array = _resize(array, alloc_rows)\n\n row_data = []\n row_mask_data = []\n\n try:\n if mode == 2:\n mask_bits = careful_read(int((len(fields) + 7) / 8))\n row_mask_data = list(converters.bitarray_to_bool(\n mask_bits, len(fields)))\n\n # Ignore the mask for string columns (see issue 8995)\n for i, f in enumerate(fields):\n if row_mask_data[i] and (f.datatype == 'char' or f.datatype == 'unicodeChar'):\n row_mask_data[i] = False\n\n for i, binparse in enumerate(binparsers):\n try:\n value, value_mask = binparse(careful_read)\n except EOFError:\n raise\n except Exception as e:\n vo_reraise(\n e, config, pos, \"(in row {:d}, col '{}')\".format(\n numrows, fields[i].ID))\n row_data.append(value)\n if mode == 1:\n row_mask_data.append(value_mask)\n else:\n row_mask_data[i] = row_mask_data[i] or value_mask\n except EOFError:\n break\n\n row = [x.converter.default for x in fields]\n row_mask = [False] * len(fields)\n for i in colnumbers:\n row[i] = row_data[i]\n row_mask[i] = row_mask_data[i]\n\n array[numrows] = tuple(row)\n array.mask[numrows] = tuple(row_mask)\n numrows += 1\n\n array = _resize(array, numrows)\n\n return array\n\n def _parse_fits(self, iterator, extnum, config):\n for start, tag, data, pos in iterator:\n if tag == 'STREAM':\n if start:\n warn_unknown_attrs(\n 'STREAM', data.keys(), config, pos,\n ['type', 'href', 'actuate', 'encoding', 'expires',\n 'rights'])\n href = data['href']\n encoding = data.get('encoding', None)\n else:\n break\n\n if not href.startswith(('http', 'ftp', 'file')):\n vo_raise(\n \"The vo package only supports remote data through http, \"\n \"ftp or file\",\n self._config, self._pos, NotImplementedError)\n\n fd = urllib.request.urlopen(href)\n if encoding is not None:\n if encoding == 'gzip':\n fd = gzip.GzipFile(href, 'r', fileobj=fd)\n elif encoding == 'base64':\n fd = codecs.EncodedFile(fd, 'base64')\n else:\n vo_raise(\n f\"Unknown encoding type '{encoding}'\",\n self._config, self._pos, NotImplementedError)\n\n hdulist = fits.open(fd)\n\n array = hdulist[int(extnum)].data\n if array.dtype != self.array.dtype:\n warn_or_raise(W19, W19, (), self._config, self._pos)\n\n return array\n\n def to_xml(self, w, **kwargs):\n specified_format = kwargs.get('tabledata_format')\n if specified_format is not None:\n format = specified_format\n else:\n format = self.format\n if format == 'fits':\n format = 'tabledata'\n\n with w.tag(\n 'TABLE',\n attrib=w.object_attrs(\n self,\n ('ID', 'name', 'ref', 'ucd', 'utype', 'nrows'))):\n\n if self.description is not None:\n w.element(\"DESCRIPTION\", self.description, wrap=True)\n\n for element_set in (self.fields, self.params):\n for element in element_set:\n element._setup({}, None)\n\n if self.ref is None:\n for element_set in (self.fields, self.params, self.groups,\n self.links):\n for element in element_set:\n element.to_xml(w, **kwargs)\n elif kwargs['version_1_2_or_later']:\n index = list(self._votable.iter_tables()).index(self)\n group = Group(self, ID=f\"_g{index}\")\n group.to_xml(w, **kwargs)\n\n if len(self.array):\n with w.tag('DATA'):\n if format == 'tabledata':\n self._write_tabledata(w, **kwargs)\n elif format == 'binary':\n self._write_binary(1, w, **kwargs)\n elif format == 'binary2':\n self._write_binary(2, w, **kwargs)\n\n if kwargs['version_1_2_or_later']:\n for element in self._infos:\n element.to_xml(w, **kwargs)\n\n def _write_tabledata(self, w, **kwargs):\n fields = self.fields\n array = self.array\n\n with w.tag('TABLEDATA'):\n w._flush()\n if (_has_c_tabledata_writer and\n not kwargs.get('_debug_python_based_parser')):\n supports_empty_values = [\n field.converter.supports_empty_values(kwargs)\n for field in fields]\n fields = [field.converter.output for field in fields]\n indent = len(w._tags) - 1\n tablewriter.write_tabledata(\n w.write, array.data, array.mask, fields,\n supports_empty_values, indent, 1 << 8)\n else:\n write = w.write\n indent_spaces = w.get_indentation_spaces()\n tr_start = indent_spaces + \"<TR>\\n\"\n tr_end = indent_spaces + \"</TR>\\n\"\n td = indent_spaces + \" <TD>{}</TD>\\n\"\n td_empty = indent_spaces + \" <TD/>\\n\"\n fields = [(i, field.converter.output,\n field.converter.supports_empty_values(kwargs))\n for i, field in enumerate(fields)]\n for row in range(len(array)):\n write(tr_start)\n array_row = array.data[row]\n mask_row = array.mask[row]\n for i, output, supports_empty_values in fields:\n data = array_row[i]\n masked = mask_row[i]\n if supports_empty_values and np.all(masked):\n write(td_empty)\n else:\n try:\n val = output(data, masked)\n except Exception as e:\n vo_reraise(\n e,\n additional=\"(in row {:d}, col '{}')\".format(\n row, self.fields[i].ID))\n if len(val):\n write(td.format(val))\n else:\n write(td_empty)\n write(tr_end)\n\n def _write_binary(self, mode, w, **kwargs):\n fields = self.fields\n array = self.array\n if mode == 1:\n tag_name = 'BINARY'\n else:\n tag_name = 'BINARY2'\n\n with w.tag(tag_name):\n with w.tag('STREAM', encoding='base64'):\n fields_basic = [(i, field.converter.binoutput)\n for (i, field) in enumerate(fields)]\n\n data = io.BytesIO()\n for row in range(len(array)):\n array_row = array.data[row]\n array_mask = array.mask[row]\n\n if mode == 2:\n flattened = np.array([np.all(x) for x in array_mask])\n data.write(converters.bool_to_bitarray(flattened))\n\n for i, converter in fields_basic:\n try:\n chunk = converter(array_row[i], array_mask[i])\n assert type(chunk) == bytes\n except Exception as e:\n vo_reraise(\n e, additional=f\"(in row {row:d}, col '{fields[i].ID}')\")\n data.write(chunk)\n\n w._flush()\n w.write(base64.b64encode(data.getvalue()).decode('ascii'))\n\n def to_table(self, use_names_over_ids=False):\n \"\"\"\n Convert this VO Table to an `astropy.table.Table` instance.\n\n Parameters\n ----------\n use_names_over_ids : bool, optional\n When `True` use the ``name`` attributes of columns as the\n names of columns in the `astropy.table.Table` instance.\n Since names are not guaranteed to be unique, this may cause\n some columns to be renamed by appending numbers to the end.\n Otherwise (default), use the ID attributes as the column\n names.\n\n .. warning::\n Variable-length array fields may not be restored\n identically when round-tripping through the\n `astropy.table.Table` instance.\n \"\"\"\n from astropy.table import Table\n\n meta = {}\n for key in ['ID', 'name', 'ref', 'ucd', 'utype', 'description']:\n val = getattr(self, key, None)\n if val is not None:\n meta[key] = val\n\n if use_names_over_ids:\n names = [field.name for field in self.fields]\n unique_names = []\n for i, name in enumerate(names):\n new_name = name\n i = 2\n while new_name in unique_names:\n new_name = f'{name}{i}'\n i += 1\n unique_names.append(new_name)\n names = unique_names\n else:\n names = [field.ID for field in self.fields]\n\n table = Table(self.array, names=names, meta=meta)\n\n for name, field in zip(names, self.fields):\n column = table[name]\n field.to_table_column(column)\n\n return table\n\n @classmethod\n def from_table(cls, votable, table):\n \"\"\"\n Create a `Table` instance from a given `astropy.table.Table`\n instance.\n \"\"\"\n kwargs = {}\n for key in ['ID', 'name', 'ref', 'ucd', 'utype']:\n val = table.meta.get(key)\n if val is not None:\n kwargs[key] = val\n new_table = cls(votable, **kwargs)\n if 'description' in table.meta:\n new_table.description = table.meta['description']\n\n for colname in table.colnames:\n column = table[colname]\n new_table.fields.append(Field.from_table_column(votable, column))\n\n if table.mask is None:\n new_table.array = ma.array(np.asarray(table))\n else:\n new_table.array = ma.array(np.asarray(table),\n mask=np.asarray(table.mask))\n\n return new_table\n\n def iter_fields_and_params(self):\n \"\"\"\n Recursively iterate over all FIELD and PARAM elements in the\n TABLE.\n \"\"\"\n for param in self.params:\n yield param\n for field in self.fields:\n yield field\n for group in self.groups:\n for field in group.iter_fields_and_params():\n yield field\n\n get_field_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_fields_and_params', 'FIELD or PARAM',\n \"\"\"\n Looks up a FIELD or PARAM element by the given ID.\n \"\"\")\n\n get_field_by_id_or_name = _lookup_by_id_or_name_factory(\n 'iter_fields_and_params', 'FIELD or PARAM',\n \"\"\"\n Looks up a FIELD or PARAM element by the given ID or name.\n \"\"\")\n\n get_fields_by_utype = _lookup_by_attr_factory(\n 'utype', False, 'iter_fields_and_params', 'FIELD or PARAM',\n \"\"\"\n Looks up a FIELD or PARAM element by the given utype and\n returns an iterator emitting all matches.\n \"\"\")\n\n def iter_groups(self):\n \"\"\"\n Recursively iterate over all GROUP elements in the TABLE.\n \"\"\"\n for group in self.groups:\n yield group\n for g in group.iter_groups():\n yield g\n\n get_group_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_groups', 'GROUP',\n \"\"\"\n Looks up a GROUP element by the given ID. Used by the group's\n \"ref\" attribute\n \"\"\")\n\n get_groups_by_utype = _lookup_by_attr_factory(\n 'utype', False, 'iter_groups', 'GROUP',\n \"\"\"\n Looks up a GROUP element by the given utype and returns an\n iterator emitting all matches.\n \"\"\")\n\n def iter_info(self):\n for info in self.infos:\n yield info\n\n\nclass Resource(Element, _IDProperty, _NameProperty, _UtypeProperty,\n _DescriptionProperty):\n \"\"\"\n RESOURCE_ element: Groups TABLE_ and RESOURCE_ elements.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n\n def __init__(self, name=None, ID=None, utype=None, type='results',\n id=None, config=None, pos=None, **kwargs):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n Element.__init__(self)\n self.name = name\n self.ID = resolve_id(ID, id, config, pos)\n self.utype = utype\n self.type = type\n self._extra_attributes = kwargs\n self.description = None\n\n self._coordinate_systems = HomogeneousList(CooSys)\n self._time_systems = HomogeneousList(TimeSys)\n self._groups = HomogeneousList(Group)\n self._params = HomogeneousList(Param)\n self._infos = HomogeneousList(Info)\n self._links = HomogeneousList(Link)\n self._tables = HomogeneousList(Table)\n self._resources = HomogeneousList(Resource)\n\n warn_unknown_attrs('RESOURCE', kwargs.keys(), config, pos)\n\n def __repr__(self):\n buff = io.StringIO()\n w = XMLWriter(buff)\n w.element(\n self._element_name,\n attrib=w.object_attrs(self, self._attr_list))\n return buff.getvalue().strip()\n\n @property\n def type(self):\n \"\"\"\n [*required*] The type of the resource. Must be either:\n\n - 'results': This resource contains actual result values\n (default)\n\n - 'meta': This resource contains only datatype descriptions\n (FIELD_ elements), but no actual data.\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type):\n if type not in ('results', 'meta'):\n vo_raise(E18, type, self._config, self._pos)\n self._type = type\n\n @property\n def extra_attributes(self):\n \"\"\"\n A dictionary of string keys to string values containing any\n extra attributes of the RESOURCE_ element that are not defined\n in the specification. (The specification explicitly allows\n for extra attributes here, but nowhere else.)\n \"\"\"\n return self._extra_attributes\n\n @property\n def coordinate_systems(self):\n \"\"\"\n A list of coordinate system definitions (COOSYS_ elements) for\n the RESOURCE_. Must contain only `CooSys` objects.\n \"\"\"\n return self._coordinate_systems\n\n @property\n def time_systems(self):\n \"\"\"\n A list of time system definitions (TIMESYS_ elements) for\n the RESOURCE_. Must contain only `TimeSys` objects.\n \"\"\"\n return self._time_systems\n\n @property\n def infos(self):\n \"\"\"\n A list of informational parameters (key-value pairs) for the\n resource. Must only contain `Info` objects.\n \"\"\"\n return self._infos\n\n @property\n def groups(self):\n \"\"\"\n A list of groups\n \"\"\"\n return self._groups\n\n @property\n def params(self):\n \"\"\"\n A list of parameters (constant-valued columns) for the\n resource. Must contain only `Param` objects.\n \"\"\"\n return self._params\n\n @property\n def links(self):\n \"\"\"\n A list of links (pointers to other documents or servers\n through a URI) for the resource. Must contain only `Link`\n objects.\n \"\"\"\n return self._links\n\n @property\n def tables(self):\n \"\"\"\n A list of tables in the resource. Must contain only\n `Table` objects.\n \"\"\"\n return self._tables\n\n @property\n def resources(self):\n \"\"\"\n A list of nested resources inside this resource. Must contain\n only `Resource` objects.\n \"\"\"\n return self._resources\n\n def _add_table(self, iterator, tag, data, config, pos):\n table = Table(self._votable, config=config, pos=pos, **data)\n self.tables.append(table)\n table.parse(iterator, config)\n\n def _add_info(self, iterator, tag, data, config, pos):\n info = Info(config=config, pos=pos, **data)\n self.infos.append(info)\n info.parse(iterator, config)\n\n def _add_group(self, iterator, tag, data, config, pos):\n group = Group(self, config=config, pos=pos, **data)\n self.groups.append(group)\n group.parse(iterator, config)\n\n def _add_param(self, iterator, tag, data, config, pos):\n param = Param(self._votable, config=config, pos=pos, **data)\n self.params.append(param)\n param.parse(iterator, config)\n\n def _add_coosys(self, iterator, tag, data, config, pos):\n coosys = CooSys(config=config, pos=pos, **data)\n self.coordinate_systems.append(coosys)\n coosys.parse(iterator, config)\n\n def _add_timesys(self, iterator, tag, data, config, pos):\n timesys = TimeSys(config=config, pos=pos, **data)\n self.time_systems.append(timesys)\n timesys.parse(iterator, config)\n\n def _add_resource(self, iterator, tag, data, config, pos):\n resource = Resource(config=config, pos=pos, **data)\n self.resources.append(resource)\n resource.parse(self._votable, iterator, config)\n\n def _add_link(self, iterator, tag, data, config, pos):\n link = Link(config=config, pos=pos, **data)\n self.links.append(link)\n link.parse(iterator, config)\n\n def parse(self, votable, iterator, config):\n self._votable = votable\n\n tag_mapping = {\n 'TABLE': self._add_table,\n 'INFO': self._add_info,\n 'PARAM': self._add_param,\n 'GROUP': self._add_group,\n 'COOSYS': self._add_coosys,\n 'TIMESYS': self._add_timesys,\n 'RESOURCE': self._add_resource,\n 'LINK': self._add_link,\n 'DESCRIPTION': self._ignore_add\n }\n\n for start, tag, data, pos in iterator:\n if start:\n tag_mapping.get(tag, self._add_unknown_tag)(\n iterator, tag, data, config, pos)\n elif tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(W17, W17, 'RESOURCE', config, pos)\n self.description = data or None\n elif tag == 'RESOURCE':\n break\n\n del self._votable\n\n return self\n\n def to_xml(self, w, **kwargs):\n attrs = w.object_attrs(self, ('ID', 'type', 'utype'))\n attrs.update(self.extra_attributes)\n with w.tag('RESOURCE', attrib=attrs):\n if self.description is not None:\n w.element(\"DESCRIPTION\", self.description, wrap=True)\n for element_set in (self.coordinate_systems, self.time_systems,\n self.params, self.infos, self.links,\n self.tables, self.resources):\n for element in element_set:\n element.to_xml(w, **kwargs)\n\n def iter_tables(self):\n \"\"\"\n Recursively iterates over all tables in the resource and\n nested resources.\n \"\"\"\n for table in self.tables:\n yield table\n for resource in self.resources:\n for table in resource.iter_tables():\n yield table\n\n def iter_fields_and_params(self):\n \"\"\"\n Recursively iterates over all FIELD_ and PARAM_ elements in\n the resource, its tables and nested resources.\n \"\"\"\n for param in self.params:\n yield param\n for table in self.tables:\n for param in table.iter_fields_and_params():\n yield param\n for resource in self.resources:\n for param in resource.iter_fields_and_params():\n yield param\n\n def iter_coosys(self):\n \"\"\"\n Recursively iterates over all the COOSYS_ elements in the\n resource and nested resources.\n \"\"\"\n for coosys in self.coordinate_systems:\n yield coosys\n for resource in self.resources:\n for coosys in resource.iter_coosys():\n yield coosys\n\n def iter_timesys(self):\n \"\"\"\n Recursively iterates over all the TIMESYS_ elements in the\n resource and nested resources.\n \"\"\"\n for timesys in self.time_systems:\n yield timesys\n for resource in self.resources:\n for timesys in resource.iter_timesys():\n yield timesys\n\n def iter_info(self):\n \"\"\"\n Recursively iterates over all the INFO_ elements in the\n resource and nested resources.\n \"\"\"\n for info in self.infos:\n yield info\n for table in self.tables:\n for info in table.iter_info():\n yield info\n for resource in self.resources:\n for info in resource.iter_info():\n yield info\n\n\nclass VOTableFile(Element, _IDProperty, _DescriptionProperty):\n \"\"\"\n VOTABLE_ element: represents an entire file.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n\n *version* is settable at construction time only, since conformance\n tests for building the rest of the structure depend on it.\n \"\"\"\n\n def __init__(self, ID=None, id=None, config=None, pos=None, version=\"1.4\"):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n Element.__init__(self)\n self.ID = resolve_id(ID, id, config, pos)\n self.description = None\n\n self._coordinate_systems = HomogeneousList(CooSys)\n self._time_systems = HomogeneousList(TimeSys)\n self._params = HomogeneousList(Param)\n self._infos = HomogeneousList(Info)\n self._resources = HomogeneousList(Resource)\n self._groups = HomogeneousList(Group)\n\n version = str(version)\n if version not in (\"1.0\", \"1.1\", \"1.2\", \"1.3\", \"1.4\"):\n raise ValueError(\"'version' should be one of '1.0', '1.1', \"\n \"'1.2', '1.3', or '1.4'\")\n\n self._version = version\n\n def __repr__(self):\n n_tables = len(list(self.iter_tables()))\n return f'<VOTABLE>... {n_tables} tables ...</VOTABLE>'\n\n @property\n def version(self):\n \"\"\"\n The version of the VOTable specification that the file uses.\n \"\"\"\n return self._version\n\n @version.setter\n def version(self, version):\n version = str(version)\n if version not in ('1.1', '1.2', '1.3', '1.4'):\n raise ValueError(\n \"astropy.io.votable only supports VOTable versions \"\n \"1.1, 1.2, 1.3, and 1.4\")\n self._version = version\n\n @property\n def coordinate_systems(self):\n \"\"\"\n A list of coordinate system descriptions for the file. Must\n contain only `CooSys` objects.\n \"\"\"\n return self._coordinate_systems\n\n @property\n def time_systems(self):\n \"\"\"\n A list of time system descriptions for the file. Must\n contain only `TimeSys` objects.\n \"\"\"\n return self._time_systems\n\n @property\n def params(self):\n \"\"\"\n A list of parameters (constant-valued columns) that apply to\n the entire file. Must contain only `Param` objects.\n \"\"\"\n return self._params\n\n @property\n def infos(self):\n \"\"\"\n A list of informational parameters (key-value pairs) for the\n entire file. Must only contain `Info` objects.\n \"\"\"\n return self._infos\n\n @property\n def resources(self):\n \"\"\"\n A list of resources, in the order they appear in the file.\n Must only contain `Resource` objects.\n \"\"\"\n return self._resources\n\n @property\n def groups(self):\n \"\"\"\n A list of groups, in the order they appear in the file. Only\n supported as a child of the VOTABLE element in VOTable 1.2 or\n later.\n \"\"\"\n return self._groups\n\n def _add_param(self, iterator, tag, data, config, pos):\n param = Param(self, config=config, pos=pos, **data)\n self.params.append(param)\n param.parse(iterator, config)\n\n def _add_resource(self, iterator, tag, data, config, pos):\n resource = Resource(config=config, pos=pos, **data)\n self.resources.append(resource)\n resource.parse(self, iterator, config)\n\n def _add_coosys(self, iterator, tag, data, config, pos):\n coosys = CooSys(config=config, pos=pos, **data)\n self.coordinate_systems.append(coosys)\n coosys.parse(iterator, config)\n\n def _add_timesys(self, iterator, tag, data, config, pos):\n timesys = TimeSys(config=config, pos=pos, **data)\n self.time_systems.append(timesys)\n timesys.parse(iterator, config)\n\n def _add_info(self, iterator, tag, data, config, pos):\n info = Info(config=config, pos=pos, **data)\n self.infos.append(info)\n info.parse(iterator, config)\n\n def _add_group(self, iterator, tag, data, config, pos):\n if not config.get('version_1_2_or_later'):\n warn_or_raise(W26, W26, ('GROUP', 'VOTABLE', '1.2'), config, pos)\n group = Group(self, config=config, pos=pos, **data)\n self.groups.append(group)\n group.parse(iterator, config)\n\n def _get_version_checks(self):\n config = {}\n config['version_1_1_or_later'] = \\\n util.version_compare(self.version, '1.1') >= 0\n config['version_1_2_or_later'] = \\\n util.version_compare(self.version, '1.2') >= 0\n config['version_1_3_or_later'] = \\\n util.version_compare(self.version, '1.3') >= 0\n config['version_1_4_or_later'] = \\\n util.version_compare(self.version, '1.4') >= 0\n return config\n\n def parse(self, iterator, config):\n config['_current_table_number'] = 0\n\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'xml':\n pass\n elif tag == 'VOTABLE':\n if 'version' not in data:\n warn_or_raise(W20, W20, self.version, config, pos)\n config['version'] = self.version\n else:\n config['version'] = self._version = data['version']\n if config['version'].lower().startswith('v'):\n warn_or_raise(\n W29, W29, config['version'], config, pos)\n self._version = config['version'] = \\\n config['version'][1:]\n if config['version'] not in ('1.1', '1.2', '1.3', '1.4'):\n vo_warn(W21, config['version'], config, pos)\n\n if 'xmlns' in data:\n # Starting with VOTable 1.3, namespace URIs stop\n # incrementing with minor version changes. See\n # this IVOA note for more info:\n # http://www.ivoa.net/documents/Notes/XMLVers/20180529/\n #\n # If this policy is in place for major version 2,\n # then this logic will need tweaking.\n if config['version'] in ('1.3', '1.4'):\n ns_version = '1.3'\n else:\n ns_version = config['version']\n correct_ns = f'http://www.ivoa.net/xml/VOTable/v{ns_version}'\n if data['xmlns'] != correct_ns:\n vo_warn(\n W41, (correct_ns, data['xmlns']), config, pos)\n else:\n vo_warn(W42, (), config, pos)\n\n break\n else:\n vo_raise(E19, (), config, pos)\n config.update(self._get_version_checks())\n\n tag_mapping = {\n 'PARAM': self._add_param,\n 'RESOURCE': self._add_resource,\n 'COOSYS': self._add_coosys,\n 'TIMESYS': self._add_timesys,\n 'INFO': self._add_info,\n 'DEFINITIONS': self._add_definitions,\n 'DESCRIPTION': self._ignore_add,\n 'GROUP': self._add_group}\n\n for start, tag, data, pos in iterator:\n if start:\n tag_mapping.get(tag, self._add_unknown_tag)(\n iterator, tag, data, config, pos)\n elif tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(W17, W17, 'VOTABLE', config, pos)\n self.description = data or None\n\n if not len(self.resources) and config['version_1_2_or_later']:\n warn_or_raise(W53, W53, (), config, pos)\n\n return self\n\n def to_xml(self, fd, compressed=False, tabledata_format=None,\n _debug_python_based_parser=False, _astropy_version=None):\n \"\"\"\n Write to an XML file.\n\n Parameters\n ----------\n fd : str path or writable file-like object\n Where to write the file.\n\n compressed : bool, optional\n When `True`, write to a gzip-compressed file. (Default:\n `False`)\n\n tabledata_format : str, optional\n Override the format of the table(s) data to write. Must\n be one of ``tabledata`` (text representation), ``binary`` or\n ``binary2``. By default, use the format that was specified\n in each `Table` object as it was created or read in. See\n :ref:`votable-serialization`.\n \"\"\"\n if tabledata_format is not None:\n if tabledata_format.lower() not in (\n 'tabledata', 'binary', 'binary2'):\n raise ValueError(f\"Unknown format type '{format}'\")\n\n kwargs = {\n 'version': self.version,\n 'tabledata_format':\n tabledata_format,\n '_debug_python_based_parser': _debug_python_based_parser,\n '_group_number': 1}\n kwargs.update(self._get_version_checks())\n\n with util.convert_to_writable_filelike(\n fd, compressed=compressed) as fd:\n w = XMLWriter(fd)\n version = self.version\n if _astropy_version is None:\n lib_version = astropy_version\n else:\n lib_version = _astropy_version\n\n xml_header = \"\"\"\n<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!-- Produced with astropy.io.votable version {lib_version}\n http://www.astropy.org/ -->\\n\"\"\"\n w.write(xml_header.lstrip().format(**locals()))\n\n with w.tag('VOTABLE',\n {'version': version,\n 'xmlns:xsi':\n \"http://www.w3.org/2001/XMLSchema-instance\",\n 'xsi:noNamespaceSchemaLocation':\n f\"http://www.ivoa.net/xml/VOTable/v{version}\",\n 'xmlns':\n f\"http://www.ivoa.net/xml/VOTable/v{version}\"}):\n if self.description is not None:\n w.element(\"DESCRIPTION\", self.description, wrap=True)\n element_sets = [self.coordinate_systems, self.time_systems,\n self.params, self.infos, self.resources]\n if kwargs['version_1_2_or_later']:\n element_sets[0] = self.groups\n for element_set in element_sets:\n for element in element_set:\n element.to_xml(w, **kwargs)\n\n def iter_tables(self):\n \"\"\"\n Iterates over all tables in the VOTable file in a \"flat\" way,\n ignoring the nesting of resources etc.\n \"\"\"\n for resource in self.resources:\n for table in resource.iter_tables():\n yield table\n\n def get_first_table(self):\n \"\"\"\n Often, you know there is only one table in the file, and\n that's all you need. This method returns that first table.\n \"\"\"\n for table in self.iter_tables():\n if not table.is_empty():\n return table\n raise IndexError(\"No table found in VOTABLE file.\")\n\n get_table_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_tables', 'TABLE',\n \"\"\"\n Looks up a TABLE_ element by the given ID. Used by the table\n \"ref\" attribute.\n \"\"\")\n\n get_tables_by_utype = _lookup_by_attr_factory(\n 'utype', False, 'iter_tables', 'TABLE',\n \"\"\"\n Looks up a TABLE_ element by the given utype, and returns an\n iterator emitting all matches.\n \"\"\")\n\n def get_table_by_index(self, idx):\n \"\"\"\n Get a table by its ordinal position in the file.\n \"\"\"\n for i, table in enumerate(self.iter_tables()):\n if i == idx:\n return table\n raise IndexError(\n f\"No table at index {idx:d} found in VOTABLE file.\")\n\n def iter_fields_and_params(self):\n \"\"\"\n Recursively iterate over all FIELD_ and PARAM_ elements in the\n VOTABLE_ file.\n \"\"\"\n for resource in self.resources:\n for field in resource.iter_fields_and_params():\n yield field\n\n get_field_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_fields_and_params', 'FIELD',\n \"\"\"\n Looks up a FIELD_ element by the given ID_. Used by the field's\n \"ref\" attribute.\n \"\"\")\n\n get_fields_by_utype = _lookup_by_attr_factory(\n 'utype', False, 'iter_fields_and_params', 'FIELD',\n \"\"\"\n Looks up a FIELD_ element by the given utype and returns an\n iterator emitting all matches.\n \"\"\")\n\n get_field_by_id_or_name = _lookup_by_id_or_name_factory(\n 'iter_fields_and_params', 'FIELD',\n \"\"\"\n Looks up a FIELD_ element by the given ID_ or name.\n \"\"\")\n\n def iter_values(self):\n \"\"\"\n Recursively iterate over all VALUES_ elements in the VOTABLE_\n file.\n \"\"\"\n for field in self.iter_fields_and_params():\n yield field.values\n\n get_values_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_values', 'VALUES',\n \"\"\"\n Looks up a VALUES_ element by the given ID. Used by the values\n \"ref\" attribute.\n \"\"\")\n\n def iter_groups(self):\n \"\"\"\n Recursively iterate over all GROUP_ elements in the VOTABLE_\n file.\n \"\"\"\n for table in self.iter_tables():\n for group in table.iter_groups():\n yield group\n\n get_group_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_groups', 'GROUP',\n \"\"\"\n Looks up a GROUP_ element by the given ID. Used by the group's\n \"ref\" attribute\n \"\"\")\n\n get_groups_by_utype = _lookup_by_attr_factory(\n 'utype', False, 'iter_groups', 'GROUP',\n \"\"\"\n Looks up a GROUP_ element by the given utype and returns an\n iterator emitting all matches.\n \"\"\")\n\n def iter_coosys(self):\n \"\"\"\n Recursively iterate over all COOSYS_ elements in the VOTABLE_\n file.\n \"\"\"\n for coosys in self.coordinate_systems:\n yield coosys\n for resource in self.resources:\n for coosys in resource.iter_coosys():\n yield coosys\n\n get_coosys_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_coosys', 'COOSYS',\n \"\"\"Looks up a COOSYS_ element by the given ID.\"\"\")\n\n def iter_timesys(self):\n \"\"\"\n Recursively iterate over all TIMESYS_ elements in the VOTABLE_\n file.\n \"\"\"\n for timesys in self.time_systems:\n yield timesys\n for resource in self.resources:\n for timesys in resource.iter_timesys():\n yield timesys\n\n get_timesys_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_timesys', 'TIMESYS',\n \"\"\"Looks up a TIMESYS_ element by the given ID.\"\"\")\n\n def iter_info(self):\n \"\"\"\n Recursively iterate over all INFO_ elements in the VOTABLE_\n file.\n \"\"\"\n for info in self.infos:\n yield info\n for resource in self.resources:\n for info in resource.iter_info():\n yield info\n\n get_info_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_info', 'INFO',\n \"\"\"Looks up a INFO element by the given ID.\"\"\")\n\n def set_all_tables_format(self, format):\n \"\"\"\n Set the output storage format of all tables in the file.\n \"\"\"\n for table in self.iter_tables():\n table.format = format\n\n @classmethod\n def from_table(cls, table, table_id=None):\n \"\"\"\n Create a `VOTableFile` instance from a given\n `astropy.table.Table` instance.\n\n Parameters\n ----------\n table_id : str, optional\n Set the given ID attribute on the returned Table instance.\n \"\"\"\n votable_file = cls()\n resource = Resource()\n votable = Table.from_table(votable_file, table)\n if table_id is not None:\n votable.ID = table_id\n resource.tables.append(votable)\n votable_file.resources.append(resource)\n return votable_file\n"
] | [
[
"numpy.ceil",
"numpy.zeros",
"numpy.dtype",
"numpy.any",
"numpy.asarray",
"numpy.ma.array",
"numpy.recarray",
"numpy.all",
"numpy.ma.zeros"
]
] |
tsaodingtw/pttanal | [
"c1d786c04e2d6f1ce02f6886748b4dbec1f37676"
] | [
"tf.py"
] | [
"import tensorflow as tf\nimport numpy as np\nfrom feature import Feature\nimport sqlite3\nimport pickle\n\ndb = sqlite3.connect('ptt.db')\ncur = db.execute('SELECT * FROM ARTICLES LIMIT 1000')\n\n\n# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3\nf = Feature()\npost_data = []\npush_data = []\nboo_data = []\nfor i in cur:\n post_data.append(f.features_for_tensorflow(i[5]))\n push_data.append(i[7])\n boo_data.append(i[8])\n\ny_data = np.array(push_data) - np.array(boo_data)\n\nx_data = np.array(post_data)\nx = tf.placeholder(tf.float32, shape=(300000, 1))\n# Try to find values for W and b that compute y_data = W * x_data + b\n# (We know that W should be 0.1 and b 0.3, but Tensorflow will\n# figure that out for us.)\nW = tf.Variable(tf.random_uniform([300000, 1], -1.0, 1.0), name=\"Weigh\")\nb = tf.Variable(tf.zeros([1]), name=\"Bias\")\ny = tf.add(tf.matmul(W, x, transpose_a=True), b)\n\n# Minimize the mean squared errors.\nloss = tf.reduce_mean(tf.square(y - y_data))\noptimizer = tf.train.GradientDescentOptimizer(0.0000005)\ntrain = optimizer.minimize(loss)\n\n# Before starting, initialize the variables. We will 'run' this first.\ninit = tf.initialize_all_variables()\n\n# Launch the graph.\nsaver = tf.train.Saver([W, b])\nsess = tf.Session()\nsess.run(init)\n\n# Fit the line.\nfor step in range(20001):\n for data in x_data:\n sess.run(train, feed_dict={x: data})\n if step % 20 == 0:\n print(step, sess.run(W), sess.run(b))\n if step % 1000 == 0:\n # Append the step number to the checkpoint name:\n saver.save(sess, 'my-model', global_step=step)\n\n"
] | [
[
"tensorflow.initialize_all_variables",
"tensorflow.placeholder",
"tensorflow.zeros",
"tensorflow.matmul",
"tensorflow.random_uniform",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.train.Saver",
"tensorflow.Session",
"numpy.array",
"tensorflow.square"
]
] |
thorwhalen/ut | [
"353a4629c35a2cca76ef91a4d5209afe766433b4"
] | [
"sound/dacc/mg.py"
] | [
"__author__ = 'thor'\n\nimport os\nimport pandas as pd\nfrom pymongo import MongoClient\nfrom pymongo.cursor import Cursor\n\n\nfrom ut.sound import util as sutil\nfrom ut.daf.manip import reorder_columns_as\nfrom ut.sound.util import Sound\nfrom ut.pstr.trans import str_to_utf8_or_bust\n\n\nclass MgDacc(object):\n def __init__(self, db, collection, root_folder, path_field='_id', mg_client_kwargs={}):\n self.mgc = MongoClient(**mg_client_kwargs)[db][collection]\n self.root_folder = root_folder\n self.path_field = path_field\n\n def filepath_of(self, path):\n return str_to_utf8_or_bust(os.path.join(self.root_folder, path))\n\n def get_wf_and_sr(self, path, **kwargs):\n return sutil.wf_and_sr_from_filepath(self.filepath_of(path), **kwargs)\n\n def get_sound(self, path_or_doc, **kwargs):\n if not isinstance(path_or_doc, str):\n path_or_doc = path_or_doc.copy()\n file_path = path_or_doc.pop(self.path_field)\n kwargs = dict(kwargs, **path_or_doc)\n path_or_doc = file_path\n name = kwargs.pop('name', os.path.splitext(os.path.basename(path_or_doc))[0])\n try:\n wf, sr = self.get_wf_and_sr(path_or_doc, **kwargs)\n except TypeError:\n kwargs.pop('channels')\n kwargs.pop('frames')\n wf, sr = self.get_wf_and_sr(path_or_doc, **kwargs)\n return Sound(wf=wf, sr=sr, name=name)\n\n def get_sound_iterator(self, find_args={}, find_kwargs={}):\n \"\"\"\n Util to flip through sounds.\n You can do, for example:\n sound_iterator = self.get_sound_iterator\n and then run the following several times:\n sound = sound_iterator.next(); sound.display_sound()\n \"\"\"\n if not find_args and not find_kwargs:\n cursor = self.mgc.find()\n else:\n cursor = self.mgc.find(*find_args, **find_kwargs)\n return map(lambda x: self.get_sound(path_or_doc=x[self.path_field]), cursor)\n\n\nclass SegmentDacc(MgDacc):\n def __init__(self, db, collection, root_folder, path_field='_id', mg_client_kwargs={},\n segment_field='segments', feat_field='fv', tag_field='tags', kv_tag_field='kv_tags'):\n super(SegmentDacc, self).__init__(db, collection, root_folder, path_field, mg_client_kwargs)\n self.segment_field = segment_field\n self.feat_field = feat_field\n self.tag_field = tag_field\n self.kv_tag_field = kv_tag_field\n\n def get_data_with_tags(self, *args, **kwargs):\n if len(args) > 0 and isinstance(args[0], Cursor):\n c = args[0]\n else:\n c = self.mgc.find(*args, **kwargs)\n d = list()\n for ci in c:\n for seg in ci['segments']:\n dd = {'path': ci[self.path_field], 'tags': ci[self.tag_field]}\n dd.update(seg['fv'])\n dd.update({'offset_s': seg['offset_s'], 'duration': seg['duration']})\n d += [dd]\n d = reorder_columns_as(pd.DataFrame(d), ['path', 'tags', 'offset_s', 'duration'])\n return d\n\n def get_data_with_kv_tags(self, *args, **kwargs):\n if 'kv_tag_keys' in list(kwargs.keys()):\n kv_tag_keys = kwargs.get('kv_tag_keys')\n kwargs.pop('kv_tag_keys')\n else:\n kv_tag_keys = ['move_direction', 'vehicle_type']\n\n if len(args) > 0 and isinstance(args[0], Cursor):\n c = args[0]\n else:\n c = self.mgc.find(*args, **kwargs)\n d = list()\n for ci in c:\n for seg in ci[self.segment_field]:\n dd = {'path': ci[self.path_field]}\n for tag_key in kv_tag_keys:\n dd.update({tag_key: ci[self.kv_tag_field].get(tag_key, None)})\n dd.update(seg['fv'])\n dd.update({'offset_s': seg['offset_s'], 'duration': seg['duration']})\n d += [dd]\n d = reorder_columns_as(pd.DataFrame(d), ['path'] + kv_tag_keys + ['offset_s', 'duration'])\n return d\n\n # def get_sound(self, *args, **kwargs):\n # # if len(args) > 0:\n # # kwargs['path_or_doc'] = args[0]\n # return super(SegmentDacc, self).get_sound(path_or_doc=, **kwargs)\n\n # return super(SegmentDacc, self).get_sound(args[0], **kwargs)\n # return super(SegmentDacc, self).get_sound(path_or_doc=kwargs['path'],\n # offset_s=kwargs['offset_s'],\n # duration=kwargs['duration'])\n\n def get_segment_iterator(self, only_segments=True, fields=None, *args, **kwargs):\n cursor = self.mgc.find(*args, **kwargs)\n\n def segment_iterator():\n for d in cursor:\n segments = d.pop(self.segment_field)\n if segments is not None:\n for dd in segments:\n if not only_segments:\n dd = dict(d, **dd)\n if fields is None:\n yield dd\n else:\n yield {k: v for k, v in dd.items() if k in fields}\n\n return segment_iterator()\n\n def get_sound_iterator(self, *args, **kwargs):\n \"\"\"\n Util to flip through sounds.\n You can do, for example:\n sound_iterator = self.get_sound_iterator\n and then run the following several times:\n sound = sound_iterator.next(); sound.display_sound()\n \"\"\"\n\n cursor = self.mgc.find(*args, **kwargs)\n return map(self.get_sound, cursor)\n"
] | [
[
"pandas.DataFrame"
]
] |
MaksHess/napari | [
"64a144607342c02177fc62fa83a3442ace0a98e7"
] | [
"napari/utils/_dtype.py"
] | [
"from typing import Tuple, Union\n\nimport numpy as np\n\n_np_uints = {\n 8: np.uint8,\n 16: np.uint16,\n 32: np.uint32,\n 64: np.uint64,\n}\n\n_np_ints = {\n 8: np.int8,\n 16: np.int16,\n 32: np.int32,\n 64: np.int64,\n}\n\n_np_floats = {\n 16: np.float16,\n 32: np.float32,\n 64: np.float64,\n}\n\n_np_complex = {\n 64: np.complex64,\n 128: np.complex128,\n}\n\n_np_kinds = {\n 'uint': _np_uints,\n 'int': _np_ints,\n 'float': _np_floats,\n 'complex': _np_complex,\n}\n\n\ndef _normalize_str_by_bit_depth(dtype_str, kind):\n if not any(str.isdigit(c) for c in dtype_str): # Python 'int' or 'float'\n return np.dtype(kind).type\n bit_dict = _np_kinds[kind]\n if '128' in dtype_str:\n return bit_dict[128]\n if '8' in dtype_str:\n return bit_dict[8]\n if '16' in dtype_str:\n return bit_dict[16]\n if '32' in dtype_str:\n return bit_dict[32]\n if '64' in dtype_str:\n return bit_dict[64]\n\n\ndef normalize_dtype(dtype_spec):\n \"\"\"Return a proper NumPy type given ~any duck array dtype.\n\n Parameters\n ----------\n dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc\n A type that can be interpreted as a NumPy numeric data type, e.g.\n 'uint32', np.uint8, torch.float32, etc.\n\n Returns\n -------\n dtype : numpy.dtype\n The corresponding dtype.\n\n Notes\n -----\n half-precision floats are not supported.\n \"\"\"\n dtype_str = str(dtype_spec)\n if 'uint' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'uint')\n if 'int' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'int')\n if 'float' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'float')\n if 'complex' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'complex')\n if 'bool' in dtype_str:\n return np.bool_\n # If we don't find one of the named dtypes, return the dtype_spec\n # unchanged. This allows NumPy big endian types to work. See\n # https://github.com/napari/napari/issues/3421\n else:\n return dtype_spec\n\n\ndef get_dtype_limits(dtype_spec) -> Tuple[float, float]:\n \"\"\"Return machine limits for numeric types.\n\n Parameters\n ----------\n dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc\n A type that can be interpreted as a NumPy numeric data type, e.g.\n 'uint32', np.uint8, torch.float32, etc.\n\n Returns\n -------\n limits : tuple\n The smallest/largest numbers expressible by the type.\n \"\"\"\n dtype = normalize_dtype(dtype_spec)\n info: Union[np.iinfo, np.finfo]\n if np.issubdtype(dtype, np.integer):\n info = np.iinfo(dtype)\n elif dtype and np.issubdtype(dtype, np.floating):\n info = np.finfo(dtype)\n else:\n raise TypeError(f'Unrecognized or non-numeric dtype: {dtype_spec}')\n return info.min, info.max\n"
] | [
[
"numpy.dtype",
"numpy.iinfo",
"numpy.issubdtype",
"numpy.finfo"
]
] |
romenr/bachelorthesis | [
"1f4325d5f10274597efb81194b6869768cc38659"
] | [
"controller/model.py"
] | [
"#!/usr/bin/env python\n\nimport numpy as np\nfrom parameters import *\nfrom snn import TargetFollowingSNN, ObstacleAvoidanceSNN, nest_simulate\n\n\nclass Model:\n\n\tdef __init__(self):\n\t\tself.snn_tf = TargetFollowingSNN()\n\t\tself.snn_oa = ObstacleAvoidanceSNN()\n\t\tself.turn_pre = 0.0\n\t\tself.angle_pre = 0.0\n\t\tself.weights_tf = []\n\t\tself.weights_oa = []\n\n\tdef reset(self):\n\t\tself.turn_pre = 0.0\n\t\tself.angle_pre = 0.0\n\n\tdef simulate(self, state):\n\t\tself.snn_tf.set_input(state)\n\t\tself.snn_oa.set_input(state)\n\n\t\t# Simulate both networks\n\t\tnest_simulate()\n\n\t\toutput, self.weights_tf = self.snn_tf.get_results()\n\t\toutput_p, self.weights_oa = self.snn_oa.get_results()\n\n\t\tangle = self.get_turning_angle(output)\n\t\tangle_oa = self.get_obstacle_avoidance_angle(output_p)\n\n\t\tif np.any(state[\"prox\"][1:] > 0.25) and not (\n\t\t\t\t\t\tabs(angle) > abs(angle_oa) and np.sign(angle) == np.sign(angle_oa)):\n\t\t\tangle = angle_oa\n\t\treturn angle\n\n\tdef get_turning_angle(self, snn_output):\n\t\t# Snake turning model\n\t\tm_l = snn_output[left_neuron]\n\t\tm_r = snn_output[right_neuron]\n\t\tangle = a_max * (m_l - m_r)\n\t\tc = math.sqrt((m_l**2 + m_r**2)/2.0)\n\t\tself.turn_pre = c * angle + (1 - c) * self.turn_pre\n\t\treturn self.turn_pre\n\n\tdef get_obstacle_avoidance_angle(self, snn_output):\n\t\tm_l = snn_output[left_neuron]\n\t\tm_r = snn_output[right_neuron]\n\t\tangle = a_avoidance_max * (m_l - m_r)\n\t\treturn angle\n\n\tdef get_turning_radius(self, n_l, n_r):\n\t\t# Snake turning model\n\t\tm_l = n_l/n_max\n\t\tm_r = n_r/n_max\n\t\ta = m_l - m_r\n\t\tc = math.sqrt((m_l**2 + m_r**2)/2.0)\n\t\tself.turn_pre = c*0.5*a + (1-c)*self.turn_pre\n\t\tif abs(self.turn_pre) < 0.001:\n\t\t\tradius = 0\n\t\telse:\n\t\t\tradius = r_min/self.turn_pre\n\t\treturn radius\n"
] | [
[
"numpy.any",
"numpy.sign"
]
] |
rraymondhp/dybm | [
"3d618874a2f8838eaeca17ce40649a3789e9f140"
] | [
"src/pydybm/arraymath/dycupy/random.py"
] | [
"\"\"\"``cupy``-based implementation of the random module\n\"\"\"\n\n__author__ = \"Taro Sekiyama\"\n__copyright__ = \"(C) Copyright IBM Corp. 2016\"\n\n\nimport numpy.random as r\nimport cupy as cp\n\n\ndef _to_gpu(a):\n arr = cp.empty_like(a)\n arr.set(a)\n return arr\n\n\nclass RandomState:\n def __init__(self, seed):\n self._random = r.RandomState(seed)\n\n def uniform(self, low=0.0, high=1.0, size=None):\n return _to_gpu(self._random.uniform(low=low, high=high, size=size))\n\n def normal(self, loc=0.0, scale=1.0, size=None):\n return _to_gpu(self._random.normal(loc=loc, scale=scale, size=size))\n\n def get_state(self):\n return self._random.get_state()\n\n def set_state(self, *args):\n return self._random.set_state(*args)\n\n def rand(self, *args):\n return _to_gpu(self._random.rand(*args))\n\n\nseed = r.seed\n\n\ndef normal(loc=0.0, scale=1.0, size=None):\n return _to_gpu(r.normal(loc=loc, scale=scale, size=size))\n\n\ndef uniform(low=0.0, high=1.0, size=None):\n return _to_gpu(r.uniform(low=low, high=high, size=size))\n\n\ndef rand(*args):\n return _to_gpu(r.rand(*args))\n\n\ndef randn(*args):\n return _to_gpu(r.randn(*args))\n\n\ndef random(size=None):\n return _to_gpu(r.random(size=size))\n"
] | [
[
"numpy.random.uniform",
"numpy.random.randn",
"numpy.random.RandomState",
"numpy.random.random",
"numpy.random.rand",
"numpy.random.normal"
]
] |
mosvlad/tumor_mask_rcnn | [
"16d6b20431553e6e1cf1594686a1f503171d5f8d"
] | [
"inference_2.py"
] | [
"import os\nimport cv2\nimport sys\nimport random\nimport math\nimport re\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport skimage\nimport glob\n\nROOT_DIR = os.getcwd()\n\nsys.path.append(ROOT_DIR)\nfrom Mask_RCNN.mrcnn import utils\nfrom Mask_RCNN.mrcnn import visualize\nfrom Mask_RCNN.mrcnn.visualize import display_images\nimport Mask_RCNN.mrcnn.model as modellib\nfrom Mask_RCNN.mrcnn.model import log\n\nfrom train import TrainConfig\nfrom train import TumorDataset\n\n\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\nprint(os.getcwd())\ncustom_WEIGHTS_PATH = \"Mask_RCNN/logs/tumor_detect20211207T1827/mask_rcnn_tumor_detect_0100.h5\"\n\n\nclass InferenceConfig(TrainConfig):\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n\ndef get_ax(rows=1, cols=1, size=7):\n _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))\n return ax\n\ninference_config = InferenceConfig()\n\nDATASET_DIR = './brain-tumor-segmentation/brain_tumor_data/'\ndataset_val = TumorDataset()\ndataset_val.load_brain_tumor_images(DATASET_DIR, 'val')\ndataset_val.prepare()\n\nwith tf.device(\"/cpu:0\"):\n model = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR,\n config=inference_config)\n\nprint(\"Loading weights \", custom_WEIGHTS_PATH)\nmodel.load_weights(custom_WEIGHTS_PATH, by_name=True)\n\nfrom importlib import reload\nreload(visualize)\n\nimage_id = 3\nimage, image_meta, gt_class_id, gt_bbox, gt_mask =\\\n modellib.load_image_gt(dataset_val, inference_config, image_id, use_mini_mask=False)\ninfo = dataset_val.image_info[image_id]\nprint(\"image ID: {}.{} ({}) {}\".format(info[\"source\"], info[\"id\"], image_id,\n dataset_val.image_reference(image_id)))\n\n# Run object detection\nresults = model.detect([image], verbose=1)\nr = results[0]\nprint(r)\n\nvisualize.display_differences(\n image,\n gt_bbox, gt_class_id, gt_mask,\n r['rois'], r['class_ids'], r['scores'], r['masks'],\n class_names=['tumor'], title=\"\", ax=get_ax(),\n show_mask=True, show_box=True)\nplt.show()"
] | [
[
"tensorflow.device",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] |
huxian123/mask_detecting | [
"a9564d595edaff9317378fbe682cad4400760bff"
] | [
"yolo.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nClass definition of YOLO_v3 style detection model on image and video\n\"\"\"\n\nimport colorsys\nimport os\nfrom timeit import default_timer as timer\n\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.layers import Input\nfrom PIL import Image, ImageFont, ImageDraw\n\nfrom yolo3.model import yolo_eval, yolo_body, tiny_yolo_body\nfrom yolo3.utils import letterbox_image\nimport os\nfrom keras.utils import multi_gpu_model\n\nclass YOLO(object):\n _defaults = {\n \"model_path\": 'model_data/logs/trained_weights_final.h5',\n \"anchors_path\": 'model_data/yolo_anchors.txt',\n \"classes_path\": 'model_data/my_class.txt',\n \"score\" : 0.3,\n \"iou\" : 0.45,\n \"model_image_size\" : (416, 416),\n \"gpu_num\" : 1,\n }\n\n @classmethod\n def get_defaults(cls, n):\n if n in cls._defaults:\n return cls._defaults[n]\n else:\n return \"Unrecognized attribute name '\" + n + \"'\"\n\n def __init__(self, **kwargs):\n self.__dict__.update(self._defaults) # set up default values\n self.__dict__.update(kwargs) # and update with user overrides\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.sess = K.get_session()\n self.boxes, self.scores, self.classes = self.generate()\n\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n def generate(self):\n model_path = os.path.expanduser(self.model_path)\n assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'\n\n # Load model, or construct model and load weights.\n num_anchors = len(self.anchors)\n num_classes = len(self.class_names)\n is_tiny_version = num_anchors==6 # default setting\n try:\n self.yolo_model = load_model(model_path, compile=False)\n except:\n self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \\\n if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)\n self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match\n else:\n assert self.yolo_model.layers[-1].output_shape[-1] == \\\n num_anchors/len(self.yolo_model.output) * (num_classes + 5), \\\n 'Mismatch between model and given anchor and class sizes'\n\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n np.random.seed(10101) # Fixed seed for consistent colors across runs.\n np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.\n np.random.seed(None) # Reset seed to default.\n\n # Generate output tensor targets for filtered bounding boxes.\n self.input_image_shape = K.placeholder(shape=(2, ))\n if self.gpu_num>=2:\n self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,\n len(self.class_names), self.input_image_shape,\n score_threshold=self.score, iou_threshold=self.iou)\n return boxes, scores, classes\n\n def detect_image(self, image):\n start = timer()\n\n if self.model_image_size != (None, None):\n assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'\n assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'\n boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))\n else:\n new_image_size = (image.width - (image.width % 32),\n image.height - (image.height % 32))\n boxed_image = letterbox_image(image, new_image_size)\n image_data = np.array(boxed_image, dtype='float32')\n\n print(image_data.shape)\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n\n print('Found {} boxes for {}'.format(len(out_boxes), 'img'))\n print(out_classes)\n print(self.class_names)\n\n font = ImageFont.truetype(font='font/simsun.ttc',\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n\n thickness = (image.size[0] + image.size[1]) // 300\n\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = self.class_names[c]\n box = out_boxes[i]\n score = out_scores[i]\n\n label = '{} {:.2f}'.format(predicted_class, score)\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n print(label, (left, top), (right, bottom))\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n # My kingdom for a good redistributable image drawing library.\n for i in range(thickness):\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i],\n outline=self.colors[c])\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=self.colors[c])\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\n del draw\n\n end = timer()\n print(end - start)\n return image\n\n def close_session(self):\n self.sess.close()\n\ndef detect_video(yolo, video_path, output_path=\"\"):\n import cv2\n vid = cv2.VideoCapture(video_path)\n if not vid.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))\n video_fps = vid.get(cv2.CAP_PROP_FPS)\n video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n isOutput = True if output_path != \"\" else False\n if isOutput:\n print(\"!!! TYPE:\", type(output_path), type(video_FourCC), type(video_fps), type(video_size))\n out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)\n accum_time = 0\n curr_fps = 0\n fps = \"FPS: ??\"\n prev_time = timer()\n while True:\n return_value, frame = vid.read()\n image = Image.fromarray(frame)\n image = yolo.detect_image(image)\n result = np.asarray(image)\n curr_time = timer()\n exec_time = curr_time - prev_time\n prev_time = curr_time\n accum_time = accum_time + exec_time\n curr_fps = curr_fps + 1\n if accum_time > 1:\n accum_time = accum_time - 1\n fps = \"FPS: \" + str(curr_fps)\n curr_fps = 0\n cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.50, color=(255, 0, 0), thickness=2)\n cv2.namedWindow(\"result\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"result\", result)\n if isOutput:\n out.write(result)\n if cv2.waitKey(50) & 0xFF == ord('q'):\n break\n yolo.close_session()\n\nif __name__ == '__main__':\n yolo = YOLO()\n data_path = 'test_data/b025.jpg'\n video_path = 'test_data/Aha.mp4'\n detect_video(yolo, video_path)\n try:\n image = Image.open(data_path)\n except:\n print(\"Open error! Try again\")\n else:\n r_image = yolo.detect_image(image)\n r_image.show()\n yolo.close_session()\n"
] | [
[
"numpy.random.shuffle",
"numpy.random.seed",
"numpy.asarray",
"numpy.floor",
"numpy.expand_dims",
"numpy.array"
]
] |
ParhamYZ/MusicSourceSeparation | [
"26a42fbebdf50d2ae2ef674ef64f4c88cbe7e8e3"
] | [
"tests/test_transforms.py"
] | [
"import pytest\r\nimport numpy as np\r\nimport torch\r\nfrom openunmix import transforms\r\n\r\n\r\[email protected](params=[4096, 44100])\r\ndef nb_timesteps(request):\r\n return int(request.param)\r\n\r\n\r\[email protected](params=[1, 2])\r\ndef nb_channels(request):\r\n return request.param\r\n\r\n\r\[email protected](params=[1, 2])\r\ndef nb_samples(request):\r\n return request.param\r\n\r\n\r\[email protected](params=[1024, 2048, 4096])\r\ndef nfft(request):\r\n return int(request.param)\r\n\r\n\r\[email protected](params=[2, 4])\r\ndef hop(request, nfft):\r\n return nfft // request.param\r\n\r\n\r\[email protected](params=[\"torch\", \"asteroid\"])\r\ndef method(request):\r\n return request.param\r\n\r\n\r\[email protected]\r\ndef audio(request, nb_samples, nb_channels, nb_timesteps):\r\n return torch.rand((nb_samples, nb_channels, nb_timesteps))\r\n\r\n\r\ndef test_stft(audio, nfft, hop, method):\r\n # we should only test for center=True as\r\n # False doesn't pass COLA\r\n # https://github.com/pytorch/audio/issues/500\r\n stft, istft = transforms.make_filterbanks(n_fft=nfft, n_hop=hop, center=True, method=method)\r\n\r\n X = stft(audio)\r\n X = X.detach()\r\n out = istft(X, length=audio.shape[-1])\r\n assert np.sqrt(np.mean((audio.detach().numpy() - out.detach().numpy()) ** 2)) < 1e-6\r\n"
] | [
[
"torch.rand"
]
] |
CsabaWirnhardt/cbm | [
"1822addd72881057af34ac6a7c2a1f02ea511225"
] | [
"scripts/extraction/postgisC6Extract.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# This file is part of CbM (https://github.com/ec-jrc/cbm).\n# Author : Guido Lemoine\n# Credits : GTCAP Team\n# Copyright : 2021 European Commission, Joint Research Centre\n# License : 3-Clause BSD\n# Version : \n\nimport time\nimport sys\nimport os\nimport io\nimport json\n\nimport psycopg2\nimport psycopg2.extras\nimport rasterio\nimport pandas as pd\nfrom rasterstats import zonal_stats\nfrom datetime import datetime\n\nimport download_with_boto3 as dwb\n\nstart = time.time()\n\n# Rev 1.1. configuration parsing from json\nwith open('s3_config.json', 'r') as f:\n s3config = json.load(f)\ns3config = s3config['s3']\n\nwith open('db_config_c6.json', 'r') as f:\n dbconfig = json.load(f)\ndbconfig = dbconfig['database']\n\n# Input data base is postgis\nconnString = \"host={} dbname={} user={} port={} password={}\".format(\n dbconfig['connection']['host'], dbconfig['connection']['dbname'],\n dbconfig['connection']['dbuser'], dbconfig['connection']['port'],\n dbconfig['connection']['dbpasswd'])\n\ninconn = psycopg2.connect(connString)\nif not inconn:\n print(\"No in connection established\")\n sys.exit(1)\n\nincurs = inconn.cursor()\n\nsrid = -1\n\nsridSql = \"select srid from geometry_columns where f_table_name = '{}';\"\n\ntry:\n incurs.execute(sridSql.format(dbconfig['tables']['parcel_table']))\n result = incurs.fetchone()\n if not result:\n print(\"{} does not exist or is not a spatial table\")\n else:\n srid = result[0]\nexcept (Exception, psycopg2.DatabaseError) as error:\n print(error)\n inconn.close()\n sys.exit(1)\n\nprint(\"Parcel srid = \", srid)\n\n# Get the first image records that is not yet processed\nimagesql = \"\"\"\n SELECT id, reference, obstime from dias_catalogue, {}\n WHERE footprint && wkb_geometry and {} = '{}'\n And obstime between '{}' and '{}'\n And status ='ingested'\n And card='c6' order by obstime asc limit 1\"\"\"\n\nupdateSql = \"\"\"update dias_catalogue set status='{}' where id = {} and status = '{}'\"\"\"\n\ntry:\n incurs.execute(imagesql.format(\n dbconfig['tables']['aoi_table'], dbconfig['args']['aoi_field'],\n dbconfig['args']['name'], dbconfig['args']['startdate'],\n dbconfig['args']['enddate']))\n result = incurs.fetchone()\n if not result:\n print(\"No images with status 'ingested' found\")\n inconn.close()\n sys.exit(1)\n else:\n oid = result[0]\n reference = result[1]\n obstime = result[2]\n # Fails if this record is changed in the meantime\n incurs.execute(updateSql.format('inprogress', oid, 'ingested'))\n inconn.commit()\nexcept (Exception, psycopg2.DatabaseError) as error:\n print(error)\n inconn.close()\n sys.exit(1)\n\n# Count parcels inside this image footprint\nparcelcountsql = \"\"\"\n SELECT count(es.ogc_fid)\n FROM {} es, dias_catalogue dias, {} aoi\n WHERE es.wkb_geometry && st_transform(dias.footprint, {})\n And es.wkb_geometry && st_transform(st_buffer(aoi.wkb_geometry::geography, 1000)::geometry, {})\n And st_area(es.wkb_geometry) > 3000.0\n And aoi.{} = '{}' And dias.id = {}\n -- and es.ogc_fid not in (select distinct pid from {} where obsid = {})\n \"\"\"\n\nincurs.execute(parcelcountsql.format(\n dbconfig['tables']['parcel_table'],\n dbconfig['tables']['aoi_table'], srid, srid,\n dbconfig['args']['aoi_field'], dbconfig['args']['name'],\n oid, dbconfig['tables']['results_table'], oid))\n\nnrecs = incurs.fetchone()\n\n# If no parcels inside, we can stop\nif nrecs[0] == 0:\n print(\"No parcels inside image bounds\")\n incurs.execute(updateSql.format('No_parcels', oid, 'inprogress'))\n inconn.commit()\n incurs.close()\n inconn.close()\n sys.exit(1)\n\n# Copy input data from S3 to local disk\n# CREODIAS\ns3path = \"Sentinel-1/SAR/CARD-COH6/{}/{}/{}.tif\".format(\n datetime.strftime(obstime, '%Y/%m/%d'), reference, reference)\n\n# SOBLOO\n# s3path = \"{}/SLC/{}/\".format(reference.split('_')[0], reference)\n\nflist = dwb.listFileFromS3(s3path)\n\nif not flist:\n print(\"Resource {} not available in S3 storage (FATAL)\".format(s3path))\n incurs.execute(updateSql.format('C6_nopath', oid, 'inprogress'))\n inconn.commit()\n incurs.close()\n inconn.close()\n sys.exit(1)\n\ns3path = flist[0]\n\nfpath = 'data/{}'.format(s3path.split('/')[-1])\n\noutsrid = -1\n\nif dwb.getFileFromS3(s3path, fpath) == 0:\n print(\"Resource {} not available in S3 storage (FATAL)\".format(s3path))\n incurs.execute(updateSql.format('No S3 C6 img', oid, 'inprogress'))\n inconn.commit()\n incurs.close()\n inconn.close()\n sys.exit(1)\nelse:\n # Only if the header file is present can we open the image to\n # check its projection\n with rasterio.open(fpath) as src:\n outsrid = src.crs.to_epsg()\n\nprint('Out SRID: ', outsrid)\n\n# Open a connection to save results\noutconn = psycopg2.connect(connString)\nif not outconn:\n print(\"No out connection established\")\n incurs.execute(updateSql.format('no_out_conn', oid, 'inprogress'))\n inconn.commit()\n incurs.close()\n inconn.close()\n sys.exit(1)\n\n# Get the parcel polygon in this image' footprint\n\nincurs.close()\n# Open a named cursor\nincurs = inconn.cursor(name='fetch_image_coverage',\n cursor_factory=psycopg2.extras.DictCursor)\n\nparcelsql = \"\"\"\n SELECT es.ogc_fid, ST_AsGeoJSON(st_transform(es.wkb_geometry, {}))::json\n FROM {} es, dias_catalogue dias, {} aoi\n WHERE es.wkb_geometry && st_transform(dias.footprint, {})\n And es.wkb_geometry && st_transform(st_buffer(aoi.wkb_geometry::geography,\n 1000)::geometry, {})\n And st_area(es.wkb_geometry) > 3000.0\n And aoi.{} = '{}'\n And dias.id = {}\n -- and es.ogc_fid not in (select distinct pid from {} where obsid = {})\n \"\"\"\n\nincurs.execute(parcelsql.format(\n outsrid, dbconfig['tables']['parcel_table'],\n dbconfig['tables']['aoi_table'], srid, srid,\n dbconfig['args']['aoi_field'], dbconfig['args']['name'],\n oid, dbconfig['tables']['results_table'], oid))\n\nsqlload = time.time() - start\nprint(\"Images loaded and {} features selected from database in {} seconds\".format(\n nrecs[0], sqlload))\n\nnrows = {}\nnrows['VV'] = 0\nnrows['VH'] = 0\n\naffine = {}\narray = {}\n\nbands = ['VV', 'VH']\n\nwith rasterio.open(fpath) as src:\n for b in bands:\n affine[b] = src.transform\n array[b] = src.read(bands.index(b) + 1)\n\n\nwhile True: # nrows['VV'] < 2:\n rowset = incurs.fetchmany(size=2000)\n\n if not rowset:\n break\n\n features = {\"type\": \"FeatureCollection\",\n \"features\": [{\"type\": \"feature\", \"geometry\": f[1],\n \"properties\": {\"pid\": int(f[0])}} for f in rowset]}\n\n for b in bands:\n\n zs = zonal_stats(features, array[b], affine=affine[b], stats=[\n \"count\", \"mean\", \"std\", \"min\", \"max\",\n \"percentile_25\", \"percentile_50\", \"percentile_75\"], prefix=\"\",\n nodata=0, geojson_out=True)\n\n df = pd.DataFrame(zs)\n\n df = pd.DataFrame.from_dict(df.properties.to_dict(), orient='index')\n\n df['obsid'] = oid\n df['band'] = b\n\n df.rename(index=str, columns={\n \"percentile_25\": \"p25\", \"percentile_50\": \"p50\",\n \"percentile_75\": \"p75\"}, inplace=True)\n\n nrows[b] = nrows[b] + len(df)\n # df is the dataframe\n if len(df) > 0:\n df.dropna(inplace=True)\n if len(df.values) > 0:\n df_columns = list(df)\n s_buf = io.StringIO()\n df.to_csv(s_buf, header=False, index=False, sep=',')\n s_buf.seek(0)\n outcurs = outconn.cursor()\n # print(tuple(df_columns))\n try:\n #psycopg2.extras.execute_batch(outcurs, insert_stmt, df.values)\n outcurs.copy_from(\n s_buf, dbconfig['tables']['results_table'],\n columns=tuple(df_columns), sep=',')\n outconn.commit()\n except psycopg2.IntegrityError as e:\n print(\"insert statement {} contains duplicate index\".format(\n insert_stmt))\n # except Error as e:\n # print(e)\n finally:\n outcurs.close()\n else:\n print(\"No valid data in block {}\".format(nrows[b]))\n\noutconn.close()\n\nincurs.close()\n\nincurs = inconn.cursor()\n\ntry:\n incurs.execute(updateSql.format('extracted', oid, 'inprogress'))\n inconn.commit()\nexcept (Exception, psycopg2.DatabaseError) as error:\n print(error)\n inconn.close()\n if outconn:\n outconn.close()\n\nincurs.close()\ninconn.close()\n\nif os.path.exists(fpath):\n os.remove(fpath)\n\nprint(\"Total time required for {} features and {} bands: {} seconds\".format(\n nrows['VV'], len(bands), time.time() - start))\n"
] | [
[
"pandas.DataFrame"
]
] |
sabuj7177/CovidProject | [
"b4b7bcfa5ace165520507f489dc74da7b695e2f0"
] | [
"coronet/main2_worker_gradient_quantization.py"
] | [
"import time\n\nfrom numpy.random import seed\nseed(8) #1\n\nimport tensorflow\ntensorflow.random.set_seed(7)\n# tensorflow.random.set_random_seed(7)\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nimport os\n\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.models import Model ,load_model\nfrom tensorflow.keras.layers import Flatten, Dense, Dropout\nfrom tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.applications.vgg16 import decode_predictions\nfrom keras.applications.vgg16 import VGG16\nfrom tensorflow.keras.optimizers import Adam, RMSprop\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nimport numpy as np\nimport tensorflow as tf\nimport pickle\nimport socket\n\nfrom tensorflow.python.keras import models\nfrom tensorflow.python.keras import layers\n\nfrom tensorflow.keras import optimizers\n\nfrom os import listdir\nimport util\n\nBASE_PATH = 'coronet_org_data/four_classes'\n# data_list = listdir('/content/covid-19/four_classes/train')\ndata_list = listdir(BASE_PATH + '/train2')\n\n# Delete some classes that may interfere\n\nprint(len(data_list))\n\nstart_time = time.time()\n\nDATASET_PATH = BASE_PATH + '/train2'\nVALIDATION_PATH = BASE_PATH + '/val'\ntest_dir = BASE_PATH + '/test'\nIMAGE_SIZE = (150, 150)\nNUM_CLASSES = len(data_list)\nBATCH_SIZE = 10 # try reducing batch size or freeze more layers if your GPU runs out of memory\nNUM_EPOCHS = 10\nLEARNING_RATE = 0.0001\nTCP_IP = '127.0.0.1'\nport = 17001\n\n\ndef safe_recv(size, server_socket):\n data = bytearray()\n while 1:\n try:\n temp = server_socket.recv(size - len(data))\n data.extend(temp)\n recv_size = len(data)\n if recv_size >= size:\n break\n except:\n print(\"Error\")\n data = bytes(data)\n return data\n\n\n# Train datagen here is a preprocessor\ntrain_datagen = ImageDataGenerator(rescale=1. / 255,\n rotation_range=50,\n featurewise_center=True,\n featurewise_std_normalization=True,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.25,\n zoom_range=0.1,\n zca_whitening=True,\n channel_shift_range=20,\n horizontal_flip=True,\n vertical_flip=True,\n validation_split=0.2,\n fill_mode='constant')\n\n# For multiclass use categorical n for binary use binary\ntrain_batches = train_datagen.flow_from_directory(DATASET_PATH,\n target_size=IMAGE_SIZE,\n shuffle=True,\n batch_size=BATCH_SIZE,\n seed=42,\n class_mode=\"categorical\"\n # For multiclass use categorical n for binary use binary\n )\n\nvalid_batches = train_datagen.flow_from_directory(VALIDATION_PATH,\n target_size=IMAGE_SIZE,\n shuffle=True,\n batch_size=BATCH_SIZE,\n seed=42,\n class_mode=\"categorical\"\n # For multiclass use categorical n for binary use binary\n\n )\n\nfrom tensorflow.keras.applications import Xception\n\nconv_base = Xception(weights='imagenet',\n include_top=False,\n input_shape=(150, 150, 3))\n\nconv_base.trainable = True\n\nmodel = models.Sequential()\nmodel.add(conv_base)\n\nmodel.add(layers.Flatten())\nmodel.add(layers.Dropout(0.5))\nmodel.add(layers.Dense(256, activation='relu'))\nmodel.add(layers.Dense(4, activation='softmax'))\n\n# model.compile(loss='categorical_crossentropy', # for multiclass use categorical_crossentropy\n#\n# optimizer=optimizers.Adam(lr=LEARNING_RATE),\n# metrics=['acc'])\n\nprint(\"Batch len\")\nprint(len(train_batches))\nprint(len(valid_batches))\n\naccuracy = tf.keras.metrics.CategoricalAccuracy()\naccuracy_val = tf.keras.metrics.CategoricalAccuracy()\nloss_fn = tf.keras.losses.CategoricalCrossentropy()\noptimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)\n\n# STEP_SIZE_TRAIN=train_batches.n//train_batches.batch_size\n# STEP_SIZE_VALID=valid_batches.n//valid_batches.batch_size\nSTEP_SIZE_TRAIN = len(train_batches) - 1\nSTEP_SIZE_VALID = len(valid_batches) - 1\nprint(\"Step size len\")\nprint(STEP_SIZE_TRAIN)\nprint(STEP_SIZE_VALID)\n\n# result=model.fit_generator(train_batches,\n# steps_per_epoch =STEP_SIZE_TRAIN,\n# validation_data = valid_batches,\n# validation_steps = STEP_SIZE_VALID,\n# epochs= NUM_EPOCHS,\n# )\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((TCP_IP, port))\n\ntotal_gradient_size = 0\ntotal_weight_size = 0\ntotal_training_computation = 0\ntotal_training_communication = 0\n\nfor epoch in range(NUM_EPOCHS):\n print(\"###############################################\")\n # Iterate over the batches of a dataset.\n for step, (x, y) in enumerate(train_batches):\n train_comp = time.time()\n with tf.GradientTape() as tape:\n logits = model(x)\n # Compute the loss value for this batch.\n loss_value = loss_fn(y, logits)\n\n # Update the state of the `accuracy` metric.\n accuracy.update_state(y, logits)\n\n # Update the weights of the model to minimize the loss value.\n gradients = tape.gradient(loss_value, model.trainable_weights)\n mean_scaler, quantized_gradient, grad_shape = util.get_ternarized_gradients(gradients)\n # print(grad_shape)\n\n # only_grads_val = pickle.dumps(gradients, pickle.HIGHEST_PROTOCOL)\n # gradients_size = len(only_grads_val)\n # total_gradient_size += gradients_size\n # gradients_size = pickle.dumps(gradients_size, pickle.HIGHEST_PROTOCOL)\n # total_training_computation += time.time() - train_comp\n # train_comm = time.time()\n # # print(\"Size of gradients size; {}\", len(gradients_size))\n # s.sendall(gradients_size)\n # s.sendall(only_grads_val)\n\n mean_scaler = pickle.dumps(mean_scaler, pickle.HIGHEST_PROTOCOL)\n mean_scaler_size = len(mean_scaler)\n total_gradient_size += mean_scaler_size\n mean_scaler_size = pickle.dumps(mean_scaler_size, pickle.HIGHEST_PROTOCOL)\n\n quantized_gradient = pickle.dumps(quantized_gradient, pickle.HIGHEST_PROTOCOL)\n quantized_gradient_size = len(quantized_gradient)\n total_gradient_size += quantized_gradient_size\n quantized_gradient_size = pickle.dumps(quantized_gradient_size, pickle.HIGHEST_PROTOCOL)\n total_training_computation += time.time() - train_comp\n train_comm = time.time()\n\n s.sendall(quantized_gradient_size)\n s.sendall(quantized_gradient)\n\n s.sendall(mean_scaler_size)\n s.sendall(mean_scaler)\n\n\n recv_size = safe_recv(17, s)\n recv_size = pickle.loads(recv_size)\n total_weight_size += recv_size\n recv_data = safe_recv(recv_size, s)\n total_training_communication += time.time() - train_comm\n train_comp = time.time()\n weight_list = pickle.loads(recv_data)\n\n\n\n # print(gradients)\n # optimizer.apply_gradients(zip(gradients, model.trainable_weights))\n \n # weight_list = []\n # for w in model.trainable_weights:\n # weight_list.append(w.numpy())\n i =0\n for w in model.trainable_weights:\n w.assign(weight_list[i])\n i += 1\n\n # Logging the current accuracy value so far.\n if step % 20 == 0:\n print(\"Epoch:\", epoch, \"Step:\", step, \"Loss value:\", loss_value.numpy())\n print(\"Total running accuracy so far: %.3f\" % accuracy.result())\n total_training_computation += time.time() - train_comp\n if step >= STEP_SIZE_TRAIN:\n break\n\n # Reset the metric's state at the end of an epoch\n accuracy.reset_states()\n\n\n total_val_loss = 0\n for step, (x, y) in enumerate(valid_batches):\n with tf.GradientTape() as tape:\n logits = model(x)\n # Compute the loss value for this batch.\n loss_value = loss_fn(y, logits)\n\n # Update the state of the `accuracy` metric.\n accuracy_val.update_state(y, logits)\n total_val_loss += loss_value.numpy()\n if step >= STEP_SIZE_VALID:\n break\n\n # Logging the current accuracy value so far.\n print(\"Validation Loss value:\", total_val_loss/STEP_SIZE_VALID)\n print(\"Total validation accuracy so far: %.3f\" % (accuracy_val.result()))\n # Reset the metric's state at the end of an epoch\n accuracy_val.reset_states()\n\nelapsed_time = time.time()-start_time\nprint('Total time in {:.0f}m {:.0f}s'.format(\n elapsed_time // 60, elapsed_time % 60))\nprint('Training computation time in {:.0f}m {:.0f}s'.format(\n total_training_computation // 60, total_training_computation % 60))\nprint('Training communication time in {:.0f}m {:.0f}s'.format(\n total_training_communication // 60, total_training_communication % 60))\nprint(\"Total gradient size: \"+str(total_gradient_size))\nprint(\"Total weight size: \"+str(total_weight_size))\n\nimport matplotlib.pyplot as plt\n\n\ndef plot_acc_loss(result, epochs):\n acc = result.history['acc']\n loss = result.history['loss']\n val_acc = result.history['val_acc']\n val_loss = result.history['val_loss']\n plt.figure(figsize=(15, 5))\n plt.subplot(121)\n plt.plot(range(1, epochs), acc[1:], label='Train_acc')\n plt.plot(range(1, epochs), val_acc[1:], label='Val_acc')\n plt.title('Accuracy over ' + str(epochs) + ' Epochs', size=15)\n plt.legend()\n plt.grid(True)\n plt.subplot(122)\n plt.plot(range(1, epochs), loss[1:], label='Train_loss')\n plt.plot(range(1, epochs), val_loss[1:], label='Val_loss')\n plt.title('Loss over ' + str(epochs) + ' Epochs', size=15)\n plt.legend()\n plt.grid(True)\n plt.show()\n\n\n# plot_acc_loss(result, 80)\n\n# %%\n\n# Save the trained model and copy to drive\n\nmodel.save('4-class-Covid19-Mod-Xception.h5')\n# !cp /content/\"4-class-Covid19-Mod-Xception.h5\" /content/drive/\"My Drive\"/\"Colab Notebooks\"\n\n\n# %% md\n\n# ** Evaluate\n# using\n# evaluate\n# Generator **\n\n# %%\n\n# Create evaluate data generator from test set\n# Dont forget shuffle false\n\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n# test_dir = '/content/COVID-19 Radiography Database'\neval_generator = test_datagen.flow_from_directory(test_dir, target_size=IMAGE_SIZE, batch_size=1,\n shuffle=False, seed=42, class_mode=\"categorical\")\neval_generator.reset()\n\n# %%\n\n# Evalute the trained model on evaluate generator\neval_generator.reset()\nx = model.evaluate_generator(eval_generator,\n steps=np.ceil(len(eval_generator)),\n use_multiprocessing=False,\n verbose=1,\n workers=1,\n )\n\nprint('Test loss:', x[0])\nprint('Test accuracy:', x[1])\n\n# Poor test accuracy due to the small dataset size\n\n# %% md\n\n# ** Create\n# DataGen\n# on\n# single\n# folder /\n#\n#\n# class and predict ! **\n\n# %%\n\n\n# IMAGE_SIZE = (150, 150)\n# test_datagen = ImageDataGenerator(rescale=1. / 255)\n# test_dir = 'data/COVID-19 Radiography Database'\n# pred_generator = test_datagen.flow_from_directory(\n# test_dir, target_size=IMAGE_SIZE,\n# batch_size=1,\n# shuffle=False,\n#\n# seed=42,\n#\n# class_mode=\"categorical\")\n# pred_generator.reset()\n#\n# count = [0, 0, 0, 0]\n#\n# files = pred_generator.filenames\n#\n# for i in range(len(files)):\n# x, y = pred_generator.next()\n# img = x\n# predict = model.predict(img)\n#\n# p = np.argmax(predict, axis=-1)\n# print(str(p[0]) + \" \" + files[pred_generator.batch_index - 1])\n# # print(predict)\n# # p=model.predict_classes(img)\n# count[p[0]] += 1\n#\n# # print(str(p[0])+\" \"+files[i])\n# print(count)\n#\n# # %% md\n#\n# ### **`Predict Results using predict generator and evaluate the accuracy and Confusion matrix `**\n#\n# # %%\n#\n# from sklearn.metrics import confusion_matrix\n# from sklearn.metrics import plot_confusion_matrix\n# from sklearn.metrics import classification_report\n#\n# filenames = eval_generator.filenames\n# nb_samples = len(filenames)\n# eval_generator.reset()\n# predict = model.predict_generator(eval_generator, steps=np.ceil(len(eval_generator)))\n# pp = predict\n# predict = np.argmax(predict, axis=-1)\n# classes = eval_generator.classes[eval_generator.index_array]\n# acc = sum(predict == classes) / len(predict)\n# names = [\"covid\", \"normal\", \"pneumonia_bac\", \"pneumonia_vir\"]\n# # print(confusion_matrix(classes,predict))\n#\n# font = {\n# 'family': 'Times New Roman',\n# 'size': 12\n# }\n# plt.rc('font', **font)\n# cm = confusion_matrix(classes, predict)\n# print(cm)\n# print(classification_report(classes, predict))\n# plt.imshow(cm, cmap=plt.cm.Blues)\n# plt.xlabel('Predicted labels \\nAccuracy: {:0.2f}'.format(acc * 100))\n# plt.ylabel(\"True labels\")\n# plt.xticks(classes, [])\n# plt.yticks(classes, [])\n# plt.title('Confusion matrix ')\n# plt.colorbar()\n# plt.show()\n\n# %% md\n\n# ** Test\n# Single\n# image **\n\n# %%\n\n# import cv2\n# from skimage import transform\n#\n# img_r = cv2.imread('/content/test/x.jpg')\n#\n# img1 = np.array(img_r).astype('float32') / 255\n# img2 = transform.resize(img1, (150, 150, 3))\n#\n# img = np.expand_dims(img2, axis=0)\n#\n# r = model.predict(img)\n#\n# names = dict((v, k) for k, v in labels.items())\n# index = np.argmax(r)\n# name = names.get(index, \"Unknown\")\n#\n# p = round(r.max() * 100, 3) # to find maximum score\n#\n# scores = r\n# print(scores)\n#\n# font = {\n# 'family': 'Times New Roman',\n# 'size': 9,\n#\n# }\n# plt.rc('font', **font)\n#\n# # plt.title(name +\" (\"+ str(p)+\")\")\n# plt.title(names[0] + \" \" + str(round(scores[0][0] * 100, 1)) + \"%\" + \"\\n\" + names[1] + \" \" + str(\n# round(scores[0][1] * 100, 1)) + \"%\" + \"\\n\" + names[2] + \" \" + str(round(scores[0][2] * 100, 1)) + \"%\" + \"\\n\" +\n# names[3] + \" \" + str(round(scores[0][3] * 100, 1)) + \"%\")\n#\n# plt.imshow(img2)\n#\n# # %% md\n#\n# # ** Test\n# # Whole\n# # Folder **\n#\n# # %%\n#\n# import cv2\n# from skimage import transform\n#\n# count = [0, 0, 0, 0]\n# folder_name = \"/content/drive/My Drive/Datasets/covid-19/covidnew/covid\"\n# files = os.listdir(folder_name)\n# for i in range(len(files)):\n# img_r = cv2.imread(folder_name + \"/\" + files[i])\n#\n# img = np.array(img_r).astype('float32') / 255\n#\n# img = transform.resize(img, (150, 150, 3))\n# img = np.expand_dims(img, axis=0)\n#\n# predict = model.predict(img)\n# p = np.argmax(predict, axis=-1)\n# # p=model.predict_classes(img)\n# count[p[0]] += 1\n# print(str(p[0]) + \" \" + files[i])\n#\n# print()\n#\n# print(count)"
] | [
[
"tensorflow.keras.optimizers.Adam",
"tensorflow.python.keras.layers.Dense",
"matplotlib.pyplot.legend",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.python.keras.models.Sequential",
"matplotlib.pyplot.figure",
"tensorflow.keras.metrics.CategoricalAccuracy",
"matplotlib.pyplot.grid",
"numpy.random.seed",
"tensorflow.keras.losses.CategoricalCrossentropy",
"matplotlib.pyplot.subplot",
"tensorflow.python.keras.layers.Flatten",
"matplotlib.pyplot.show",
"tensorflow.GradientTape",
"tensorflow.python.keras.layers.Dropout",
"tensorflow.random.set_seed",
"tensorflow.keras.applications.Xception"
]
] |
mumumu99/latent-pose-reenactment | [
"bfe8175f9cf3d67d46c21194bb5b6f898ef3ea53"
] | [
"embedders/FAb-Net/Datasets/generate_large_voxceleb.py"
] | [
"\nimport os\n\nids = [d for d in os.listdir(VOX_CELEB_LOCATION) if d[0:2] == 'id']\n\n\ntrain = ids[0:int(0.7*len(ids))]\nval = ids[int(0.7*len(ids)):int(0.8*len(ids))]\ntest = ids[int(0.8*len(ids)):]\n\nimport numpy as np\nnp.save('./large_voxceleb/train.npy', np.array(train))\nnp.save('./large_voxceleb/test.npy', np.array(test))\nnp.save('./large_voxceleb/val.npy', np.array(val))\n\nprint(np.array(val).shape)\nprint(val[0])\n"
] | [
[
"numpy.array"
]
] |
aaryapatel007/Hippocampal-Volume-Quantification-in-Alzheimer-Progression | [
"5c9eff98572c1d2647a742d285805d9e328ab14f"
] | [
"train model/src/inference/UNetInferenceAgent.py"
] | [
"\"\"\"\nContains class that runs inferencing\n\"\"\"\nimport torch\nimport numpy as np\n\nfrom networks.RecursiveUNet import UNet\n\nfrom utils.utils import med_reshape\n\nclass UNetInferenceAgent:\n \"\"\"\n Stores model and parameters and some methods to handle inferencing\n \"\"\"\n def __init__(self, parameter_file_path='', model=None, device=\"cpu\", patch_size=64):\n\n self.model = model\n self.patch_size = patch_size\n self.device = device\n\n if model is None:\n self.model = UNet(num_classes=3)\n\n if parameter_file_path:\n self.model.load_state_dict(torch.load(parameter_file_path, map_location=self.device))\n\n self.model.to(device)\n\n def single_volume_inference_unpadded(self, volume):\n \"\"\"\n Runs inference on a single volume of arbitrary patch size,\n padding it to the conformant size first\n\n Arguments:\n volume {Numpy array} -- 3D array representing the volume\n\n Returns:\n 3D NumPy array with prediction mask\n \"\"\"\n \n raise NotImplementedError\n\n def single_volume_inference(self, volume):\n \"\"\"\n Runs inference on a single volume of conformant patch size\n\n Arguments:\n volume {Numpy array} -- 3D array representing the volume\n\n Returns:\n 3D NumPy array with prediction mask\n \"\"\"\n self.model.eval()\n\n # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis\n\n # TASK: Write code that will create mask for each slice across the X (0th) dimension. After \n # that, put all slices into a 3D Numpy array. You can verify if your method is \n # correct by running it on one of the volumes in your training set and comparing \n # with the label in 3D Slicer.\n \n slc_tensor = torch.from_numpy(volume).type(torch.cuda.FloatTensor).unsqueeze(1).to(self.device)\n prediction = self.model(slc_tensor)\n masks = torch.argmax(prediction, dim = 1).cpu().detach().numpy().astype(int)\n \n return masks\n"
] | [
[
"torch.from_numpy",
"torch.load",
"torch.argmax"
]
] |
cv-hci-project/PyTorch-VAE | [
"e9b9d122eb52f76e096942b300a8db97a123be13"
] | [
"models/base.py"
] | [
"from abc import abstractmethod\n\nimport matplotlib.pyplot as plt\nimport pytorch_lightning as pl\nimport torch\nimport torchvision.utils as vutils\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom torchvision.datasets import CelebA\n\nfrom datasets.concrete_cracks import ConcreteCracksDataset\nfrom datasets.sdnet2018 import SDNet2018\nfrom models.types_ import *\n\n\nclass BaseVAE(pl.LightningModule):\n\n def __init__(self, params: dict) -> None:\n super().__init__()\n\n self.params = params\n self.curr_device = None\n\n try:\n num_workers = params[\"dataloader_workers\"]\n except KeyError:\n num_workers = 1\n\n self.additional_dataloader_args = {'num_workers': num_workers, 'pin_memory': True}\n\n def encode(self, input: Tensor) -> List[Tensor]:\n raise NotImplementedError\n\n def decode(self, input: Tensor) -> Any:\n raise NotImplementedError\n\n def sample(self, batch_size: int, current_device: int, **kwargs) -> Tensor:\n raise RuntimeWarning()\n\n def generate(self, x: Tensor, **kwargs) -> Tensor:\n raise NotImplementedError\n\n @abstractmethod\n def forward(self, *inputs: Tensor) -> Tensor:\n pass\n\n @abstractmethod\n def loss_function(self, *inputs: Any, **kwargs) -> Tensor:\n pass\n\n def training_step(self, batch, batch_idx, optimizer_idx=0):\n real_img, labels = batch\n self.curr_device = real_img.device\n\n results = self.forward(real_img, labels=labels)\n train_loss = self.loss_function(*results,\n M_N=self.params['batch_size'] / self.num_train_imgs,\n optimizer_idx=optimizer_idx,\n batch_idx=batch_idx)\n\n # TODO this is deprecated\n self.logger.experiment.log({key: val.item() for key, val in train_loss.items()})\n\n return train_loss\n\n def validation_step(self, batch, batch_idx, optimizer_idx=0):\n real_img, labels = batch\n self.curr_device = real_img.device\n\n results = self.forward(real_img, labels=labels)\n val_loss = self.loss_function(*results,\n M_N=self.params['batch_size'] / self.num_val_imgs,\n optimizer_idx=optimizer_idx,\n batch_idx=batch_idx)\n\n return val_loss\n\n def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([x['loss'] for x in outputs]).mean()\n tensorboard_logs = {'avg_val_loss': avg_loss}\n\n if self.current_epoch % 5 == 0 or self.current_epoch == (self.trainer.max_epochs - 1):\n self.sample_images()\n\n return {'val_loss': avg_loss, 'log': tensorboard_logs}\n\n def sample_images(self, save=True, display=False):\n # Get sample reconstruction image\n test_input, test_label = next(iter(self.sample_dataloader))\n test_input = test_input.to(self.curr_device)\n test_label = test_label.to(self.curr_device)\n recons = self.generate(test_input, labels=test_label)\n\n if save:\n vutils.save_image(recons.data,\n f\"{self.logger.save_dir}{self.logger.name}/version_{self.logger.version}/\"\n f\"recons_{self.logger.name}_{self.current_epoch}.png\",\n normalize=True,\n nrow=12)\n\n if display:\n plt.imshow(vutils.make_grid(recons.data, normalize=True, nrow=12).permute(2, 1, 0).numpy())\n plt.title(\"Reconstructed images\")\n plt.show()\n\n # vutils.save_image(test_input.data,\n # f\"{self.logger.save_dir}{self.logger.name}/version_{self.logger.version}/\"\n # f\"real_img_{self.logger.name}_{self.current_epoch}.png\",\n # normalize=True,\n # nrow=12)\n\n try:\n samples = self.sample(144,\n self.curr_device,\n labels=test_label)\n if save:\n vutils.save_image(samples.cpu().data,\n f\"{self.logger.save_dir}{self.logger.name}/version_{self.logger.version}/\"\n f\"{self.logger.name}_{self.current_epoch}.png\",\n normalize=True,\n nrow=12)\n if display:\n plt.imshow(vutils.make_grid(samples.data, normalize=True, nrow=12).permute(2, 1, 0).numpy())\n plt.title(\"Sampled images\")\n plt.show()\n except:\n pass\n\n del test_input, recons # , samples\n\n def configure_optimizers(self):\n\n optims = []\n scheds = []\n\n optimizer = optim.Adam(self.parameters(),\n lr=self.params['LR'],\n weight_decay=self.params['weight_decay'])\n optims.append(optimizer)\n # Check if more than 1 optimizer is required (Used for adversarial training)\n try:\n if self.params['LR_2'] is not None:\n optimizer2 = optim.Adam(self.params['submodel'].parameters(),\n lr=self.params['LR_2'])\n optims.append(optimizer2)\n except:\n pass\n\n try:\n if self.params['scheduler_gamma'] is not None:\n scheduler = optim.lr_scheduler.ExponentialLR(optims[0],\n gamma=self.params['scheduler_gamma'])\n scheds.append(scheduler)\n\n # Check if another scheduler is required for the second optimizer\n try:\n if self.params['scheduler_gamma_2'] is not None:\n scheduler2 = optim.lr_scheduler.ExponentialLR(optims[1],\n gamma=self.params['scheduler_gamma_2'])\n scheds.append(scheduler2)\n except:\n pass\n return optims, scheds\n except:\n return optims\n\n def train_dataloader(self):\n transform = self.data_transforms()\n\n if self.params['dataset'] == 'celeba':\n dataset = CelebA(root=self.params['data_path'],\n split=\"train\",\n transform=transform,\n download=False)\n elif self.params['dataset'] == \"concrete-cracks\":\n dataset = ConcreteCracksDataset(root_dir=self.params['data_path'],\n split=\"train\",\n abnormal_data=False,\n transform=transform)\n elif self.params['dataset'] == \"SDNET2018\":\n dataset = SDNet2018(root_dir=self.params['data_path'],\n split=\"train\",\n abnormal_data=False,\n transform=transform)\n else:\n raise ValueError('Undefined dataset type')\n\n self.num_train_imgs = len(dataset)\n\n return DataLoader(dataset,\n batch_size=self.params['batch_size'],\n shuffle=True,\n drop_last=True,\n **self.additional_dataloader_args)\n\n def val_dataloader(self):\n transform = self.data_transforms()\n\n if self.params['dataset'] == 'celeba':\n dataset = CelebA(root=self.params['data_path'],\n split=\"test\",\n transform=transform,\n download=False)\n elif self.params['dataset'] == 'concrete-cracks':\n dataset = ConcreteCracksDataset(root_dir=self.params['data_path'],\n split=\"val\",\n abnormal_data=False,\n transform=transform)\n elif self.params['dataset'] == 'SDNET2018':\n dataset = SDNet2018(root_dir=self.params['data_path'],\n split=\"val\",\n abnormal_data=False,\n transform=transform)\n else:\n raise ValueError('Undefined dataset type')\n\n self.sample_dataloader = DataLoader(dataset,\n batch_size=self.params['batch_size'],\n shuffle=True,\n drop_last=True,\n **self.additional_dataloader_args)\n\n self.num_val_imgs = len(self.sample_dataloader)\n\n return self.sample_dataloader\n\n def data_transforms(self):\n\n SetRange = transforms.Lambda(lambda X: 2 * X - 1.)\n SetScale = transforms.Lambda(lambda X: X / X.sum(0).expand_as(X))\n\n if self.params['dataset'] == 'celeba':\n transform = transforms.Compose([transforms.RandomHorizontalFlip(),\n transforms.CenterCrop(148),\n transforms.Resize((self.params['img_size'], self.params['img_size'])),\n transforms.ToTensor(),\n SetRange])\n elif self.params['dataset'] == 'concrete-cracks':\n transform = transforms.Compose([transforms.Resize((self.params['img_size'], self.params['img_size'])),\n transforms.ToTensor(),\n SetRange])\n elif self.params['dataset'] == 'SDNET2018':\n transform = transforms.Compose([transforms.Resize((self.params['img_size'], self.params['img_size'])),\n transforms.ToTensor(),\n SetRange])\n else:\n raise ValueError('Undefined dataset type')\n return transform\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.stack",
"torch.optim.lr_scheduler.ExponentialLR",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
]
] |
hwsamuel/modin | [
"4d0a3155b31104ac8083b223bd71ff3e541ecd92"
] | [
"modin/pandas/datetimes.py"
] | [
"# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport pandas\n\nfrom .dataframe import DataFrame\nfrom .series import Series\n\n\ndef to_datetime(\n arg,\n errors=\"raise\",\n dayfirst=False,\n yearfirst=False,\n utc=None,\n format=None,\n exact=True,\n unit=None,\n infer_datetime_format=False,\n origin=\"unix\",\n cache=True,\n):\n \"\"\"\n Convert argument to datetime.\n\n Parameters\n ----------\n arg : int, float, str, datetime, list, tuple, 1-d array, Series, DataFrame/dict-like\n The object to convert to a datetime.\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n - If 'raise', then invalid parsing will raise an exception.\n - If 'coerce', then invalid parsing will be set as NaT.\n - If 'ignore', then invalid parsing will return the input.\n dayfirst : bool, default False\n Specify a date parse order if `arg` is str or its list-likes.\n If True, parses dates with the day first, eg 10/11/12 is parsed as\n 2012-11-10.\n Warning: dayfirst=True is not strict, but will prefer to parse\n with day first (this is a known bug, based on dateutil behavior).\n yearfirst : bool, default False\n Specify a date parse order if `arg` is str or its list-likes.\n\n - If True parses dates with the year first, eg 10/11/12 is parsed as\n 2010-11-12.\n - If both dayfirst and yearfirst are True, yearfirst is preceded (same\n as dateutil).\n\n Warning: yearfirst=True is not strict, but will prefer to parse\n with year first (this is a known bug, based on dateutil behavior).\n utc : bool, default None\n Return UTC DatetimeIndex if True (converting any tz-aware\n datetime.datetime objects as well).\n format : str, default None\n The strftime to parse time, eg \"%d/%m/%Y\", note that \"%f\" will parse\n all the way up to nanoseconds.\n See strftime documentation for more information on choices:\n https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior.\n exact : bool, True by default\n Behaves as:\n - If True, require an exact format match.\n - If False, allow the format to match anywhere in the target string.\n\n unit : str, default 'ns'\n The unit of the arg (D,s,ms,us,ns) denote the unit, which is an\n integer or float number. This will be based off the origin.\n Example, with unit='ms' and origin='unix' (the default), this\n would calculate the number of milliseconds to the unix epoch start.\n infer_datetime_format : bool, default False\n If True and no `format` is given, attempt to infer the format of the\n datetime strings based on the first non-NaN element,\n and if it can be inferred, switch to a faster method of parsing them.\n In some cases this can increase the parsing speed by ~5-10x.\n origin : scalar, default 'unix'\n Define the reference date. The numeric values would be parsed as number\n of units (defined by `unit`) since this reference date.\n\n - If 'unix' (or POSIX) time; origin is set to 1970-01-01.\n - If 'julian', unit must be 'D', and origin is set to beginning of\n Julian Calendar. Julian day number 0 is assigned to the day starting\n at noon on January 1, 4713 BC.\n - If Timestamp convertible, origin is set to Timestamp identified by\n origin.\n cache : bool, default True\n If True, use a cache of unique, converted dates to apply the datetime\n conversion. May produce significant speed-up when parsing duplicate\n date strings, especially ones with timezone offsets. The cache is only\n used when there are at least 50 values. The presence of out-of-bounds\n values will render the cache unusable and may slow down parsing.\n\n Returns\n -------\n datetime\n If parsing succeeded.\n Return type depends on input:\n\n - list-like: DatetimeIndex\n - Series: Series of datetime64 dtype\n - scalar: Timestamp\n\n In case when it is not possible to return designated types (e.g. when\n any element of input is before Timestamp.min or after Timestamp.max)\n return will have datetime.datetime type (or corresponding\n array/Series).\n \"\"\"\n if not isinstance(arg, (DataFrame, Series)):\n return pandas.to_datetime(\n arg,\n errors=errors,\n dayfirst=dayfirst,\n yearfirst=yearfirst,\n utc=utc,\n format=format,\n exact=exact,\n unit=unit,\n infer_datetime_format=infer_datetime_format,\n origin=origin,\n cache=cache,\n )\n return arg._to_datetime(\n errors=errors,\n dayfirst=dayfirst,\n yearfirst=yearfirst,\n utc=utc,\n format=format,\n exact=exact,\n unit=unit,\n infer_datetime_format=infer_datetime_format,\n origin=origin,\n cache=cache,\n )\n"
] | [
[
"pandas.to_datetime"
]
] |
souradip93/GCDT | [
"5991044307f59598ea224b64f1f3b915fa00ebcc"
] | [
"thumt/bin/trainer.py"
] | [
"#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2018 The THUMT Authors\n\nimport argparse\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport thumt.data.dataset as dataset\nimport thumt.data.record as record\nimport thumt.data.vocab as vocabulary\nimport thumt.models as models\nimport thumt.utils.hooks as hooks\nimport thumt.utils.utils as utils\nimport thumt.utils.parallel as parallel\nimport thumt.utils.search as search\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(\n description=\"Training neural machine translation models\",\n usage=\"trainer.py [<args>] [-h | --help]\"\n )\n\n # input files\n parser.add_argument(\"--input\", type=str, nargs=2,\n help=\"Path of source and target corpus\")\n parser.add_argument(\"--glove_emb_path\", type=str, default=None,\n help=\"Path of glove embeddings\")\n parser.add_argument(\"--bert_emb_path\", type=str, default=None,\n help=\"Path of bert embeddings\")\n parser.add_argument(\"--record\", type=str,\n help=\"Path to tf.Record data\")\n parser.add_argument(\"--output\", type=str, default=\"train\",\n help=\"Path to saved models\")\n parser.add_argument(\"--vocabulary\", type=str, nargs=3,\n help=\"Path of source and target vocabulary\")\n parser.add_argument(\"--validation\", type=str,\n help=\"Path of validation file\")\n parser.add_argument(\"--references\", type=str, nargs=\"+\",\n help=\"Path of reference files\")\n\n # model and configuration\n parser.add_argument(\"--model\", type=str, required=True,\n help=\"Name of the model\")\n parser.add_argument(\"--parameters\", type=str, default=\"\",\n help=\"Additional hyper parameters\")\n\n return parser.parse_args(args)\n\n\ndef default_parameters():\n params = tf.contrib.training.HParams(\n input=[\"\", \"\"],\n output=\"\",\n record=\"\",\n model=\"rnnsearch\",\n vocab=[\"\", \"\"],\n # Default training hyper parameters\n num_threads=6,\n batch_size=128,\n max_length=256,\n length_multiplier=1,\n mantissa_bits=2,\n warmup_steps=50,\n train_steps=100000,\n buffer_size=10000,\n constant_batch_size=False,\n device_list=[0],\n update_cycle=1,\n initializer=\"xavier\",\n initializer_gain=0.08,\n adam_beta1=0.9,\n adam_beta2=0.999,\n adam_epsilon=1e-6,\n r0=2.0,\n s=1000, \n e=4000,\n clip_grad_norm=5.0,\n learning_rate=1.0,\n learning_rate_decay=\"rnnplus_warmup_decay\",\n learning_rate_boundaries=[0],\n learning_rate_values=[0.0],\n keep_checkpoint_max=100,\n keep_top_checkpoint_max=5,\n gpu_memory_fraction=1,\n # Validation\n eval_steps=100000,\n eval_secs=0,\n eval_batch_size=64,\n top_beams=1,\n beam_size=4,\n decode_alpha=0.6,\n decode_length=0,\n decode_constant=5.0,\n decode_normalize=False,\n validation=\"\",\n references=[\"\"],\n save_checkpoint_secs=0,\n save_checkpoint_steps=1000,\n )\n\n return params\n\n\ndef import_params(model_dir, model_name, params):\n model_dir = os.path.abspath(model_dir)\n p_name = os.path.join(model_dir, \"params.json\")\n m_name = os.path.join(model_dir, model_name + \".json\")\n\n if not tf.gfile.Exists(p_name) or not tf.gfile.Exists(m_name):\n return params\n\n with tf.gfile.Open(p_name) as fd:\n tf.logging.info(\"Restoring hyper parameters from %s\" % p_name)\n json_str = fd.readline()\n params.parse_json(json_str)\n\n with tf.gfile.Open(m_name) as fd:\n tf.logging.info(\"Restoring model parameters from %s\" % m_name)\n json_str = fd.readline()\n params.parse_json(json_str)\n\n return params\n\n\ndef export_params(output_dir, name, params):\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MkDir(output_dir)\n\n # Save params as params.json\n filename = os.path.join(output_dir, name)\n with tf.gfile.Open(filename, \"w\") as fd:\n fd.write(params.to_json())\n\n\ndef collect_params(all_params, params):\n collected = tf.contrib.training.HParams()\n\n for k in params.values().keys():\n collected.add_hparam(k, getattr(all_params, k))\n\n return collected\n\n\ndef merge_parameters(params1, params2):\n\n params = tf.contrib.training.HParams()\n\n for (k, v) in params1.values().items():\n params.add_hparam(k, v)\n\n params_dict = list(params.values()) ## key value pair\n\n for (k, v) in params2.values().items():\n if k in params_dict:\n # Override\n setattr(params, k, v)\n else:\n params.add_hparam(k, v)\n\n return params\n\n\ndef override_parameters(params, args):\n params.model = args.model\n params.input = args.input or params.input\n params.glove_emb_path = args.glove_emb_path \n params.bert_emb_path = args.bert_emb_path\n params.output = args.output or params.output\n params.record = args.record or params.record\n params.vocab = args.vocabulary or params.vocab\n params.validation = args.validation or params.validation\n params.references = args.references or params.references\n params.parse(args.parameters)\n\n params.vocabulary = {\n \"source\": vocabulary.load_vocabulary(params.vocab[0]),\n \"target\": vocabulary.load_vocabulary(params.vocab[1]),\n \"char\" : vocabulary.load_vocabulary(params.vocab[2])\n }\n params.vocabulary[\"source\"] = vocabulary.process_vocabulary(\n params.vocabulary[\"source\"], params\n )\n params.vocabulary[\"target\"] = vocabulary.process_vocabulary(\n params.vocabulary[\"target\"], params\n )\n params.vocabulary[\"char\"] = vocabulary.process_vocabulary(\n params.vocabulary[\"char\"], params\n )\n\n control_symbols = [params.pad, params.bos, params.eos, params.unk]\n\n params.mapping = {\n \"source\": vocabulary.get_control_mapping(\n params.vocabulary[\"source\"],\n control_symbols\n ),\n \"target\": vocabulary.get_control_mapping(\n params.vocabulary[\"target\"],\n control_symbols\n ),\n \"char\": vocabulary.get_control_mapping(\n params.vocabulary[\"char\"],\n control_symbols\n )\n }\n\n return params\n\n\ndef get_initializer(params):\n if params.initializer == \"xavier\":\n return tf.contrib.layers.xavier_initializer()\n elif params.initializer == \"uniform\":\n max_val = params.initializer_gain\n return tf.random_uniform_initializer(-max_val, max_val)\n elif params.initializer == \"normal\":\n return tf.random_normal_initializer(0.0, params.initializer_gain)\n elif params.initializer == \"normal_unit_scaling\":\n return tf.variance_scaling_initializer(params.initializer_gain,\n mode=\"fan_avg\",\n distribution=\"normal\")\n elif params.initializer == \"uniform_unit_scaling\":\n return tf.variance_scaling_initializer(params.initializer_gain,\n mode=\"fan_avg\",\n distribution=\"uniform\")\n else:\n raise ValueError(\"Unrecognized initializer: %s\" % params.initializer)\n\n\ndef get_learning_rate_decay(learning_rate, global_step, params):\n if params.learning_rate_decay == \"noam\":\n step = tf.to_float(global_step)\n warmup_steps = tf.to_float(params.warmup_steps)\n multiplier = params.hidden_size ** -0.5\n decay = multiplier * tf.minimum((step + 1) * (warmup_steps ** -1.5),\n (step + 1) ** -0.5)\n\n return learning_rate * decay\n elif params.learning_rate_decay == \"new_warmup_rsqrt_decay\":\n step = tf.to_float(global_step)\n warmup_steps = tf.to_float(params.warmup_steps)\n multiplier = params.hidden_size ** -0.5\n decay = params.r0 * multiplier * tf.minimum((step + 1) * (warmup_steps ** -1.0) * (warmup_steps ** -0.5),\n (step + 1) ** -0.5)\n\n return learning_rate * decay\n elif params.learning_rate_decay == \"rnnplus_warmup_decay\":\n step = tf.to_float(global_step)\n n = float(len(params.device_list))\n warmup_steps = tf.to_float(params.warmup_steps)\n decay = tf.minimum(1 + step * (n - 1) / (n * warmup_steps), tf.minimum(n, n * ((2*n) ** ((params.s - n * step) / (params.e - params.s)))))\n\n return tf.maximum(learning_rate * decay, 5e-6)\n elif params.learning_rate_decay == \"piecewise_constant\":\n return tf.train.piecewise_constant(tf.to_int32(global_step),\n params.learning_rate_boundaries,\n params.learning_rate_values)\n elif params.learning_rate_decay == \"none\":\n return learning_rate\n else:\n raise ValueError(\"Unknown learning_rate_decay\")\n\n\ndef session_config(params):\n optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L1,\n do_function_inlining=True)\n graph_options = tf.GraphOptions(optimizer_options=optimizer_options)\n config = tf.ConfigProto(allow_soft_placement=True,\n graph_options=graph_options)\n if params.device_list:\n device_str = \",\".join([str(i) for i in params.device_list])\n config.gpu_options.visible_device_list = device_str\n config.gpu_options.per_process_gpu_memory_fraction = params.gpu_memory_fraction\n config.gpu_options.allow_growth = True\n return config\n\n\ndef decode_target_ids(inputs, params):\n decoded = []\n vocab = params.vocabulary[\"target\"]\n\n for item in inputs:\n syms = []\n for idx in item:\n sym = vocab[idx]\n\n if sym == params.eos:\n break\n\n if sym == params.pad:\n break\n\n syms.append(sym)\n decoded.append(syms)\n\n return decoded\n\n\ndef main(args):\n tf.logging.set_verbosity(tf.logging.INFO)\n model_cls = models.get_model(args.model)\n params = default_parameters()\n # Import and override parameters\n # Priorities (low -> high):\n # default -> saved -> command\n params = merge_parameters(params, model_cls.get_parameters())\n params = import_params(args.output, args.model, params)\n override_parameters(params, args)\n\n # Export all parameters and model specific parameters\n export_params(params.output, \"params.json\", params)\n export_params(\n params.output,\n \"%s.json\" % args.model,\n collect_params(params, model_cls.get_parameters())\n )\n\n # Build Graph\n with tf.Graph().as_default():\n if not params.record:\n # Build input queue\n if params.use_bert and params.bert_emb_path:\n features = dataset.get_training_input_with_bert(params.input + [params.bert_emb_path], params)\n else:\n features = dataset.get_training_input(params.input, params)\n else:\n features = record.get_input_features( # ??? \n os.path.join(params.record, \"*train*\"), \"train\", params\n )\n\n # Build model\n initializer = get_initializer(params)\n model = model_cls(params)\n\n # Multi-GPU setting\n sharded_losses = parallel.parallel_model(\n model.get_training_func(initializer),\n features,\n params.device_list\n )\n loss = tf.add_n(sharded_losses) / len(sharded_losses)\n\n # Create global step\n global_step = tf.train.get_or_create_global_step()\n\n # Print parameters\n all_weights = {v.name: v for v in tf.trainable_variables()}\n total_size = 0\n\n for v_name in sorted(list(all_weights)):\n v = all_weights[v_name]\n tf.logging.info(\"%s\\tshape %s\", v.name[:-2].ljust(80),\n str(v.shape).ljust(20))\n v_size = np.prod(np.array(v.shape.as_list())).tolist() # mutiple all dimension size\n total_size += v_size\n tf.logging.info(\"Total trainable variables size: %d\", total_size)\n\n learning_rate = get_learning_rate_decay(params.learning_rate,\n global_step, params)\n learning_rate = tf.convert_to_tensor(learning_rate, dtype=tf.float32)\n tf.summary.scalar(\"learning_rate\", learning_rate)\n\n # Create optimizer\n opt = tf.train.AdamOptimizer(learning_rate,\n beta1=params.adam_beta1,\n beta2=params.adam_beta2,\n epsilon=params.adam_epsilon)\n\n if params.update_cycle == 1:\n train_op = tf.contrib.layers.optimize_loss(\n name=\"training\",\n loss=loss,\n global_step=global_step,\n learning_rate=learning_rate,\n clip_gradients=params.clip_grad_norm or None,\n optimizer=opt,\n colocate_gradients_with_ops=True\n )\n zero_op = tf.no_op(\"zero_op\")\n collect_op = tf.no_op(\"collect_op\")\n else:\n grads_and_vars = opt.compute_gradients(\n loss, colocate_gradients_with_ops=True)\n gradients = [item[0] for item in grads_and_vars]\n variables = [item[1] for item in grads_and_vars]\n variables = utils.replicate_variables(variables)\n zero_op = utils.zero_variables(variables)\n collect_op = utils.collect_gradients(gradients, variables)\n\n scale = 1.0 / params.update_cycle\n gradients, variables = utils.scale_gradients(grads_and_vars, scale)\n\n # Gradient clipping avoid greadient explosion!!\n if isinstance(params.clip_grad_norm or None, float):\n gradients, _ = tf.clip_by_global_norm(gradients,\n params.clip_grad_norm)\n\n # Update variables\n grads_and_vars = list(zip(gradients, variables))\n with tf.control_dependencies([collect_op]):\n train_op = opt.apply_gradients(grads_and_vars, global_step)\n\n # Validation\n '''\n if params.validation and params.references[0]:\n files = [params.validation] + list(params.references)\n eval_inputs = files\n eval_input_fn = dataset.get_evaluation_input\n else:\n print(\"Don't evaluate\")\n eval_input_fn = None\n '''\n # Add hooks\n train_hooks = [\n tf.train.StopAtStepHook(last_step=params.train_steps),\n tf.train.NanTensorHook(loss), # Monitors the loss tensor and stops training if loss is NaN\n tf.train.LoggingTensorHook(\n {\n \"step\": global_step,\n \"loss\": loss,\n \"chars\": tf.shape(features[\"chars\"]),\n \"source\": tf.shape(features[\"source\"]),\n #\"bert\": tf.shape(features[\"bert\"]),\n \"lr\": learning_rate\n },\n every_n_iter=1\n ),\n tf.train.CheckpointSaverHook(\n checkpoint_dir=params.output,\n save_secs=params.save_checkpoint_secs or None,\n save_steps=params.save_checkpoint_steps or None,\n saver=tf.train.Saver(\n max_to_keep=params.keep_checkpoint_max,\n sharded=False\n )\n )\n ]\n\n config = session_config(params)\n '''\n if not eval_input_fn is None:\n train_hooks.append(\n hooks.EvaluationHook(\n lambda f: search.create_inference_graph(\n model.get_evaluation_func(), f, params\n ),\n lambda: eval_input_fn(eval_inputs, params),\n lambda x: decode_target_ids(x, params),\n params.output,\n config,\n params.keep_top_checkpoint_max,\n eval_secs=params.eval_secs,\n eval_steps=params.eval_steps\n )\n )\n '''\n\n with tf.train.MonitoredTrainingSession(\n checkpoint_dir=params.output, hooks=train_hooks,\n save_checkpoint_secs=None, config=config) as sess:\n while not sess.should_stop():\n utils.session_run(sess, zero_op)\n for i in range(1, params.update_cycle):\n utils.session_run(sess, collect_op)\n sess.run(train_op)\n\n\nif __name__ == \"__main__\":\n main(parse_args())\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.variance_scaling_initializer",
"tensorflow.no_op",
"tensorflow.logging.set_verbosity",
"tensorflow.convert_to_tensor",
"tensorflow.train.StopAtStepHook",
"tensorflow.gfile.MkDir",
"tensorflow.minimum",
"tensorflow.clip_by_global_norm",
"tensorflow.train.MonitoredTrainingSession",
"tensorflow.Graph",
"tensorflow.contrib.layers.optimize_loss",
"tensorflow.random_normal_initializer",
"tensorflow.train.get_or_create_global_step",
"tensorflow.random_uniform_initializer",
"tensorflow.contrib.training.HParams",
"tensorflow.GraphOptions",
"tensorflow.train.NanTensorHook",
"tensorflow.add_n",
"tensorflow.shape",
"tensorflow.to_float",
"tensorflow.train.Saver",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.ConfigProto",
"tensorflow.control_dependencies",
"tensorflow.logging.info",
"tensorflow.train.AdamOptimizer",
"tensorflow.trainable_variables",
"tensorflow.OptimizerOptions",
"tensorflow.to_int32",
"tensorflow.gfile.Exists",
"tensorflow.gfile.Open",
"tensorflow.maximum"
]
] |
craft-ai/craft-ai-client-python | [
"3d8b3d9a49c0c70964deaeb9645130dd54f9a0b3"
] | [
"tests/test_pandas_get_generator_operations.py"
] | [
"import unittest\n\nfrom craft_ai.pandas import CRAFTAI_PANDAS_ENABLED\n\nif CRAFTAI_PANDAS_ENABLED:\n import copy\n import pandas as pd\n\n import craft_ai.pandas\n\n from .data import pandas_valid_data\n from .utils import generate_entity_id\n from . import settings\n\n AGENT_ID_1_BASE = \"test_1_df_pd\"\n AGENT_ID_2_BASE = \"test_2_df_pd\"\n GENERATOR_ID_BASE = \"test_pandas_gen_df_pd\"\n\n SIMPLE_AGENT_CONFIGURATION = pandas_valid_data.SIMPLE_AGENT_CONFIGURATION\n SIMPLE_AGENT_DATA = pandas_valid_data.SIMPLE_AGENT_DATA\n COMPLEX_AGENT_CONFIGURATION = pandas_valid_data.COMPLEX_AGENT_CONFIGURATION\n COMPLEX_AGENT_CONFIGURATION_2 = pandas_valid_data.COMPLEX_AGENT_CONFIGURATION_2\n COMPLEX_AGENT_DATA = pandas_valid_data.COMPLEX_AGENT_DATA\n COMPLEX_AGENT_DATA_2 = pandas_valid_data.COMPLEX_AGENT_DATA_2\n VALID_GENERATOR_CONFIGURATION = pandas_valid_data.VALID_GENERATOR_CONFIGURATION\n VALID_COMPLEX_GENERATOR_CONFIGURATION = (\n pandas_valid_data.VALID_COMPLEX_GENERATOR_CONFIGURATION\n )\n VALID_TIMESTAMP = pandas_valid_data.VALID_TIMESTAMP\n VALID_LAST_TIMESTAMP = pandas_valid_data.VALID_LAST_TIMESTAMP\n\n CLIENT = craft_ai.pandas.Client(settings.CRAFT_CFG)\n\n\[email protected](CRAFTAI_PANDAS_ENABLED is False, \"pandas is not enabled\")\nclass TestPandasSimpleGeneratorWithOpperations(unittest.TestCase):\n def setUp(self):\n self.agent_1_id = generate_entity_id(AGENT_ID_1_BASE + \"GeneratorWithOp\")\n self.generator_id = generate_entity_id(GENERATOR_ID_BASE + \"GeneratorWithOp\")\n\n CLIENT.delete_agent(self.agent_1_id)\n CLIENT.delete_generator(self.generator_id)\n CLIENT.create_agent(SIMPLE_AGENT_CONFIGURATION, self.agent_1_id)\n\n CLIENT.add_agent_operations(self.agent_1_id, SIMPLE_AGENT_DATA)\n\n generator_configuration = copy.deepcopy(VALID_GENERATOR_CONFIGURATION)\n generator_configuration[\"filter\"] = [self.agent_1_id]\n CLIENT.create_generator(generator_configuration, self.generator_id)\n\n def tearDown(self):\n CLIENT.delete_agent(self.agent_1_id)\n CLIENT.delete_generator(self.generator_id)\n\n def test_simple_pd_get_generator_operations(self):\n df = CLIENT.get_generator_operations(self.generator_id, None, None)\n\n self.assertIsInstance(df, pd.DataFrame)\n self.assertEqual(len(df), 300)\n self.assertEqual(len(df.dtypes), 7)\n self.assertEqual(\n df.timestamp.min(),\n pd.Timestamp(\"2019-12-31 23:00:00+0000\", tz=\"UTC\").value / 1e9,\n )\n self.assertEqual(\n df.timestamp.max(),\n pd.Timestamp(\"2020-01-01 03:59:00+0000\", tz=\"UTC\").value / 1e9,\n )\n\n def test_get_generator_operations_with_pdtimestamp(self):\n\n ops_df = CLIENT.get_generator_operations(\n self.generator_id,\n pd.Timestamp(VALID_TIMESTAMP, unit=\"s\", tz=\"UTC\"),\n pd.Timestamp(VALID_LAST_TIMESTAMP, unit=\"s\", tz=\"UTC\"),\n )\n\n ground_truth_ops_df = CLIENT.get_generator_operations(\n self.generator_id, VALID_TIMESTAMP, VALID_LAST_TIMESTAMP,\n )\n\n self.assertIsInstance(ops_df, pd.DataFrame)\n self.assertFalse(ops_df.empty)\n self.assertNotEqual(ops_df.get(\"agent_id\").any(), None)\n self.assertNotEqual(ops_df.columns.any(), None)\n self.assertTrue(ops_df.equals(ground_truth_ops_df))\n\n\[email protected](CRAFTAI_PANDAS_ENABLED is False, \"pandas is not enabled\")\nclass TestPandasComplexGeneratorWithOpperations(unittest.TestCase):\n def setUp(self):\n self.agent_1_id = generate_entity_id(AGENT_ID_1_BASE + \"GeneratorWithOp\")\n self.agent_2_id = generate_entity_id(AGENT_ID_2_BASE + \"GeneratorWithOp\")\n self.generator_id = generate_entity_id(GENERATOR_ID_BASE + \"GeneratorWithOp\")\n\n CLIENT.delete_agent(self.agent_1_id)\n CLIENT.delete_agent(self.agent_2_id)\n CLIENT.delete_generator(self.generator_id)\n CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION, self.agent_1_id)\n CLIENT.create_agent(COMPLEX_AGENT_CONFIGURATION_2, self.agent_2_id)\n CLIENT.add_agent_operations(self.agent_1_id, COMPLEX_AGENT_DATA)\n CLIENT.add_agent_operations(self.agent_2_id, COMPLEX_AGENT_DATA_2)\n generator_configuration = copy.deepcopy(VALID_COMPLEX_GENERATOR_CONFIGURATION)\n generator_configuration[\"filter\"] = [self.agent_1_id, self.agent_2_id]\n\n CLIENT.create_generator(generator_configuration, self.generator_id)\n\n def tearDown(self):\n CLIENT.delete_agent(self.agent_1_id)\n CLIENT.delete_agent(self.agent_2_id)\n CLIENT.delete_generator(self.generator_id)\n\n def test_complex_pd_get_generator_operations(self):\n df = CLIENT.get_generator_operations(self.generator_id, None, None)\n\n self.assertIsInstance(df, pd.DataFrame)\n self.assertEqual(len(df), 20)\n self.assertEqual(len(df.dtypes), 5)\n self.assertEqual(\n df.timestamp.min(),\n pd.Timestamp(\"2019-12-31 23:00:00+0000\", tz=\"UTC\").value / 1e9,\n )\n self.assertEqual(\n df.timestamp.max(),\n pd.Timestamp(\"2020-01-09 23:00:00+0000\", tz=\"UTC\").value / 1e9,\n )\n"
] | [
[
"pandas.Timestamp"
]
] |
grybd/oneflow | [
"82237ad096a10527591660c09b61444c42917e69"
] | [
"python/oneflow/test/modules/test_view.py"
] | [
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom test_util import GenArgList\n\nimport oneflow as flow\nimport oneflow.unittest\n\n\ndef _test_view(test_case, device):\n x = np.array(\n [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]\n ).astype(np.float32)\n input = flow.tensor(\n x, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n of_out = input.view(2, 2, 2, -1)\n of_shape = of_out.numpy().shape\n np_shape = (2, 2, 2, 2)\n test_case.assertTrue(np.array_equal(of_shape, np_shape))\n of_out = of_out.sum()\n of_out.backward()\n np_grad = np.array(\n [\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n ]\n )\n test_case.assertTrue(np.allclose(np_grad, input.grad.numpy(), 0.0001, 0.0001))\n\n\ndef _test_view_flow_size(test_case, device):\n x = np.array(\n [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]\n ).astype(np.float32)\n input = flow.tensor(\n x, dtype=flow.float32, device=flow.device(device), requires_grad=True\n )\n shape = flow.Size([2, 2, 2, -1])\n of_out = input.view(shape)\n np_shape = (2, 2, 2, 2)\n test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_shape))\n of_out = of_out.sum()\n of_out.backward()\n np_grad = np.array(\n [\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n ]\n )\n test_case.assertTrue(np.allclose(np_grad, input.grad.numpy(), 0.0001, 0.0001))\n\n\[email protected]_unless_1n1d()\nclass TestView(flow.unittest.TestCase):\n def test_view(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_fun\"] = [\n _test_view,\n _test_view_flow_size,\n ]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n for arg in GenArgList(arg_dict):\n arg[0](test_case, *arg[1:])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.array",
"numpy.array_equal"
]
] |
papanisaicharan/Scalable-energy-efficient-scheme-on-mobile-nodes | [
"43f8d9c82b603c33803ee5d07f056eaeabdaf530"
] | [
"SEES_CODE/hybrid_placement.py"
] | [
"\"\"\" \r\n\r\nThis is a hybrid placement algorithm(offline mode)\r\n\"\"\"\r\n\r\nfrom sympy.solvers import solve\r\nfrom sympy import Symbol\r\n#this is testing plot\r\nfrom matplotlib import pyplot as plt\r\nfrom matplotlib.ticker import MultipleLocator\r\nimport numpy as np\r\nimport math\r\nimport random\r\nimport Node as nd\r\nimport EH_relay as relay_nd\r\nimport LBS as L_B_S\r\nimport Zone as zone\r\nimport Network as nw\r\n\r\ndef getpoints(startx,starty,endx,endy,id):\r\n rangeX = (startx,endx)\r\n rangeY = (starty,endy)\r\n randPoints = []\r\n excluded = set()\r\n i = 0\r\n while i<D:\r\n x = random.randrange(*rangeX)\r\n y = random.randrange(*rangeY)\r\n if (x,y) in excluded: continue\r\n randPoints.append((x,y))\r\n i += 1\r\n excluded.update((x, y))\r\n \r\n secure_random = random.SystemRandom()\r\n nodeswithenergy = []#(x,y),energy,id\r\n for j in range(len(nodesinlevel)):\r\n for i in range(nodesinlevel[j]):\r\n list1 = []\r\n list1.append(secure_random.choice(randPoints))\r\n randPoints.remove(list1[0])\r\n list1.append(Et[j])\r\n list1.append(id)\r\n list1.append(j+1)\r\n id = id+1\r\n nodeswithenergy.append(list1)\r\n return(nodeswithenergy)\r\n\r\ndef get_rand_uniform_points_LBS(x1,x2,y1,y2):\r\n final_list = []\r\n t1 = []\r\n t2 = []\r\n t1.append(random.randint(x1,x2))\r\n t2.append(random.randint(y1,y2))\r\n for i in range(no_lbs_on_each_side-1):\r\n p1 = random.randint(x1,x2)\r\n p2 = random.randint(y1,y2)\r\n while check_validation(t1,t2,p1,p2):\r\n p1 = random.randint(x1,x2)\r\n p2 = random.randint(y1,y2)\r\n t1.append(p1)\r\n t2.append(p2)\r\n final_list.append(t1)\r\n final_list.append(t2)\r\n return final_list\r\n\r\ndef euclidean_distance(x1,y1,x2,y2):\r\n distance = math.sqrt(math.pow(x2 - x1, 2) + math.pow(y2 - y1, 2) ) \r\n return distance\r\n\r\ndef check_validation(t1,t2,p1,p2):\r\n all_distances = []\r\n for i in range(len(t1)):\r\n all_distances.append(euclidean_distance(p1,p2,t1[i],t2[i]))\r\n for i in all_distances:\r\n if i < distance_btw_lbs:\r\n return True\r\n return False\r\n\r\n#taking inputs,parameters setting\r\nN = 2000#int(input(\"Enter N( the total number of HN nodes) : \"))\r\nFs = 20#int(input(\"Enter Fs() : \"))\r\nL = 200#int(input(\"Enter L(length of area) : \"))\r\nB = 12#int(input(\"Enter B(LBS) : \"))\r\nn = 2#int(input(\"Enter n(number of heterogeneity level) : \"))\r\n\r\n#taking constants\r\nalpha = 2#float(input(\"Enter alpha : \"))#singh has taken this as 0.5\r\n#beta = input(\"Enter beta\")\r\ngamma = 0.4#float(input(\"Enter gamma : \"))#intial gamma will be given\r\ntheta = 0.025#float(input(\"Enter theta : \"))#should validate a equation\r\n\r\nEinti = 0.5#float(input(\"Enter E1(initial energy) : \"))\r\n\r\n#we need to validate theta and gamma values and find beta constant\r\nif((gamma / (2*(n-1)) ) > theta):\r\n\tprint(\"validated theta and gamma values\")\r\n\t# finding gamma values\r\n\tgammavalues = [gamma]\r\n\t#getting gamma values,this is done according to singh model\r\n\tfor i in range(1,n+1):\r\n\t #gammai = gammai-1 - 2*theta\r\n\t gammavalues.append(round(gammavalues[i-1] - 2*theta,3))\r\n\t \r\n\tprint(\"gamma-values : \",gammavalues)\r\n\t#number of nodes in each level\r\n\tbeta = gammavalues[len(gammavalues)-1]\r\n\tlastgamma = beta\r\n\tgammavalues[:] = gammavalues[:len(gammavalues)-1]\r\n\r\n\tprint(\"gamma-values : \",gammavalues)\r\n\t#https://docs.sympy.org/latest/modules/solvers/solvers.html#systems-of-polynomial-equations\r\n\tbeta = Symbol('x',positive=True)\r\n\tf =1\r\n\tfor i in range(n-1,-1,-1):\r\n\t f = f*(beta-gammavalues[i])\r\n\t f= f+1\r\n\r\n\tf = f-2\r\n\tprint(\"equation used for solving beta: \",f)\r\n\tbeta = solve(f, beta)\r\n\tprint(\"positive beta value: \",beta)\r\n\tNt = [] #indicate cardinality of n categories\r\n\tEt = [] #indicate Energy of n categories\r\n\tEnergySinghTotal = []\r\n\r\n\tfor i in range(1,n+1):\r\n\t # we compute Nit and Eit and append them to Nt and Et,formulea are shown below\r\n\t #(Einti * (1 + ((i − 1) * alpha)))\r\n\t et = i-1\r\n\t et = et*alpha\r\n\t et = et+1\r\n\t et = et*Einti\r\n\t Et.append(et)\r\n\t #Nit = N × (beta − gamma1) × (beta − gamma2) × (beta − gamma3)×⋯× (beta − gammai)\r\n\t nt = N\r\n\t for j in range(0,i):\r\n\t nt = nt *(beta[0] - gammavalues[j])\r\n\t Nt.append(nt)\r\n\r\n\t# print(\"enregy of each HN type : \",Et)\r\n\t# print(\"total number of HN nodes in each level : \",Nt)\r\n\r\n\tNt1 = [round(Nt[i],1) for i in range(len(Nt))]\r\n\tfor i in range(len(Nt1)):\r\n\t if (Nt1[i]-int(Nt1[i])) >= 0.5:\r\n\t Nt1[i] = math.ceil(Nt1[i])\r\n\t else:\r\n\t Nt1[i] = math.floor(Nt1[i])\r\n\t \r\n\t# print(\"rounded total number of HN nodes in each level : \",Nt1)\r\n\ttotal_energy_of_all_nodes = 0.0\r\n\tNt[:] = Nt1[:]\r\n\tfor i in range(len(Nt1)):\r\n\t print(\"Nodes in level - \",i+1,\" = \",Nt[i] ,\" , Energy = \",Et[i])\r\n\t total_energy_of_all_nodes +=Nt[i]*Et[i]\r\n\t# print(\"rounded total number of HN nodes in each level : \",Nt)\r\n\t#checking\r\n\tsum1 = 0\r\n\tfor i in Nt:\r\n\t sum1=sum1+i\r\n\tif N == sum1:\r\n\t print(\"sum of nodes in all the level is equal to N \")\r\n\telse:\r\n\t print(\"sum of nodes in all the level is not equal to N,Something went wrong \")\r\n\t#for sake of solving\r\n\t# print(Nt)\r\n\tfor i in range(len(Nt)):\r\n\t Nt[i] = float(Nt[i]/100)\r\n\t #print(Nt[i])\r\n\t if (Nt[i] - int(Nt[i])) >= 0.5:\r\n\t Nt[i] = math.ceil(Nt[i])*100\r\n\t else:\r\n\t Nt[i] = math.floor(Nt[i])*100\r\n\t \r\n\tprint(\"rounded total number of HN nodes in each level : \",Nt)\r\n\tZ = math.pow(math.ceil(math.sqrt(N)/math.sqrt(Fs)),2)\r\n\tprint(\"number of zones : \",Z)#number of zones\r\n\tNz = []\r\n\tfor z in range(1,int(Z)+1):\r\n\t if z==1:\r\n\t Nz.append(math.ceil(N/Z))\r\n\t else:\r\n\t k = 0\r\n\t for i in Nz:\r\n\t k = k + i\r\n\t \r\n\t Nz.append(math.ceil((N - k)/(Z - z + 1)))\r\n\t \r\n\tprint(\"number of nodes in each zones : \",Nz)\r\n\r\n\trsmax = []#node sensing\r\n\trcmax = []#communication ranges\r\n\r\n\tfor i in range(0,int(Z)):\r\n\t rsmax.append( (L/math.sqrt(int(Z) )) * math.sqrt(2) )\r\n\t rcmax.append( (L/math.sqrt(int(Z)))*math.sqrt(2)*2)\r\n\r\n\t# print(\"sensing range\",rsmax,\" communication range \",rcmax)\r\n\tR = int(math.pow( math.sqrt(int(Z))+1 , 2 ))\r\n\tprint(\"number of relay nodes : \",R)\r\n\tD = int(L/math.sqrt(Z))\r\n\tprint(\"length of working area : \",D)\r\n\r\n\tnodesinlevel = []\r\n\tfor i in range(len(Nt)):\r\n\t nodesinlevel.append(int(Nt[i]/int(Z)))\r\n\r\n\tzones_objects = [] # all nodes objects\r\n\tobject_of_zones = [] # each element in this represent a zone object\r\n\tfor i in range(0,int(math.sqrt(int(Z)))):\r\n\t\tfor j in range(0,int(math.sqrt(int(Z)))):\r\n\t\t\tnp = []\r\n\t\t\tfor p in getpoints(j*20,i*20,(j+1)*20,(i+1)*20,j*20+1+200*i):\r\n\t\t\t\tnp.append(nd.Node(p[0][0],p[0][1],p[1],p[2],p[3],i*10+j+1))\r\n\t\t\tzones_objects.extend(np)\r\n\t\t\tobject_of_zones.append(zone.Zone(i*10+j+1,np))\r\n\t\r\n\r\n\t# for i in zones_objects:\r\n\t# \tprint(i.getlocation(),i.get_node_id(),i.get_e_initial())\r\n\t#for each corner place a EH node\r\n\tEH = []\r\n\tcount = 1\r\n\tfor i in range(0,L+1,20):\r\n\t\tfor j in range(0,L+1,20):\r\n\t\t if count <= R: \r\n\t\t \tEH.append(relay_nd.EH_relay(i,j,count))\r\n\t\t \tcount+=1\r\n\r\n\tEHx = []\r\n\tEHy = []\r\n\tfor j in EH:\r\n\t EHx.append(j.getlocation()[0])\r\n\t EHy.append(j.getlocation()[1])\r\n\t# print(EHx,EHy)\r\n\r\n\tx = [[] for i in range(len(Et))]\r\n\ty = [[] for i in range(len(Et))]\r\n\r\n\tfor i in zones_objects:\r\n\t for k in range(len(Et)):\r\n\t if Et[k] == i.get_e_initial():\r\n\t x[k].append(i.getlocation()[0])\r\n\t y[k].append(i.getlocation()[1])\r\n\r\n\t# we will definately start plotting form (0,0), so\r\n\t# we can place base stations from 20 to 50 distance from working area uniformly so as to cover all the nodes\r\n\tdist_lbs = int(input(\"Enter the distance of LBS from working area : \"))\r\n\tperimneter_lbs = (L+2*dist_lbs)*4\r\n\tdistance_btw_lbs = int(perimneter_lbs/B)\r\n\r\n\tleft = [(-dist_lbs, i) for i in range(-dist_lbs, L+2*dist_lbs+1,distance_btw_lbs )]\r\n\ttop = [(i,L+dist_lbs ) for i in range(-dist_lbs+distance_btw_lbs,L+2*dist_lbs+1, distance_btw_lbs)]\r\n\tright = [(L+dist_lbs, i) for i in range(L+dist_lbs-distance_btw_lbs, -dist_lbs-1, -distance_btw_lbs)]\r\n\tbottom = [(i, -dist_lbs) for i in range(L+dist_lbs-distance_btw_lbs, -dist_lbs, -distance_btw_lbs)]\r\n\tidx = left+top+right+bottom\r\n\t#local base station objects\r\n\tlocal_bs = []\r\n\tcount = 1\r\n\tfor i in idx:\r\n\t\tlocal_bs.append(L_B_S.LBS(i[0],i[1],count))\r\n\t\tcount+=1\r\n\r\n\t# network formation\r\n\tnetwork = nw.Network(object_of_zones,EH,local_bs)\r\n\r\n\t#print(x,y)\r\n\tfig = plt.figure()#defining size\r\n\tfig.set_size_inches(100,100)\r\n\tax1 = fig.add_subplot(1,1,1)#adding a plot to figure\r\n\r\n\tspacing = D # This can be your user specified spacing. \r\n\tminorLocator = MultipleLocator(spacing)\r\n\t# jet = plt.get_cmap('jet')\r\n\t# colors = iter(jet(np.linspace(0,1,10)))\r\n\tcolors = ['g','r','c','m','y','k','b']\r\n\tfor i in range(0,len(x),1):\r\n\t ax1.plot(x[i],y[i], 'o',color = colors[i])\r\n\r\n\tHN_ids = []\r\n\tfor i in zones_objects:\r\n\t\tHN_ids.append(i.get_node_id())\r\n\t# print(HN_ids)\r\n\r\n\tfor i, txt in enumerate(HN_ids):\r\n\t\tax1.annotate(txt, (zones_objects[i].getlocation()[0],zones_objects[i].getlocation()[1]))\r\n\r\n\tax1.plot(EHx,EHy, 'D',color = colors[len(colors)-1],markersize=12)\r\n\r\n\tEH_ids = []\r\n\tfor i in EH:\r\n\t\tEH_ids.append(i.get_node_id())\r\n\t# print(EH_ids)\r\n\r\n\tfor i, txt in enumerate(EH_ids):\r\n\t\tax1.annotate(txt, (EH[i].getlocation()[0],EH[i].getlocation()[1]))\r\n\r\n\tx = [-50, -50, -20, -20]\r\n\ty = [-50, L+50, L+50, -50]\r\n\tax1.fill(x,y,'y')\r\n\tx = [ -20,L+50,L+50 ,-20]\r\n\ty = [ L+50,L+50,L+20 ,L+20]\r\n\tax1.fill(x,y,'y')\r\n\tx = [ L+50,L+50,L+20 ,L+20]\r\n\ty = [ L+50,-50 ,-50,L+50]\r\n\tax1.fill(x,y,'y')\r\n\tx = [ -20,-20,L+20 ,L+20]\r\n\ty = [ -50,-20 ,-20,-50]\r\n\tax1.fill(x,y,'y')\r\n\tfor i in local_bs:\r\n\t\t# print(i.get_node_id())\r\n\t\tax1.plot(i.getlocation()[0],i.getlocation()[1], 'D',color = colors[1],markersize=20)\r\n\r\n\tLB_ids = []\r\n\tfor i in local_bs:\r\n\t\tLB_ids.append(i.get_node_id())\r\n\t# print(LB_ids)\r\n\r\n\tfor i, txt in enumerate(LB_ids):\r\n\t\tax1.annotate(txt, (local_bs[i].getlocation()[0],local_bs[i].getlocation()[1]))\r\n\r\n\r\n\t# Set minor tick locations.\r\n\tax1.yaxis.set_minor_locator(minorLocator)\r\n\tax1.xaxis.set_minor_locator(minorLocator)\r\n\r\n\tplt.axis([-60, L+60, -60, L+60])#defining axix x and y\r\n\t# Set grid to use minor tick locations. \r\n\r\n\tax1.grid(which = 'minor')#only major works fine\r\n\r\n\tplt.show()\r\n\t\r\nelse:\r\n print(\"error in input values\")"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis"
]
] |
jthhk/Binance-volatility-trading-bot | [
"d815716d2161c5d07cea0506049d73450bd5ef5b"
] | [
"strategies/vyacheslav_signalbuy_VolScan.py"
] | [
"# VolScan is a Binance Volatility Bot(BVT Bot)\n# compatible module that generates crypto buying signals based upon negative price change & volatility.\n# It does this in two different ways,\n# the main one being by calculating the aggregate price change within a user defined period,\n# the second way being by use of the Coefficient Of Variation(CV),\n# which is a statistical measure of the dispersion of data points in a data series around the mean,\n# and is used in certain markets to ascertain the volatility of products:\n# https://www.investopedia.com/terms/c/coefficientofvariation.asp.\n#\n# VolScan provides the option to use either signals generating method individually,\n# or combined within user defined settings.\n# Volscan will provide all the buying signals required for your bot,\n# so other external signal generating modules should be disabled.\n#\n# The way that VolScan works is that it collects all the cryto coin/token data for all USDT coin\n# pairings that appear on Binance into user defined \"scanning periods\" which are varying numbers of minutes in length,\n# each period then being split into the number of individual scans that make up the period.\n# Example. you decide you want your scanning period to be 3 minutes in duration,\n# and within that period you want all coins scanned every 30 seconds,\n# so in total VolScan will carry out 2 scans per minute for 3 minutes in total = 6 price check scans,\n# it then checks the variables between the current price & the previous price all the way back through the total number\n# of scans, coming up with an aggregate change in price % for the whole scanning period.\n# It then removes all coins that have positive changes in price %,\n# and creates a list of all the coins that had a negative change in price, the list is in sequential order,\n# the highest negative price change at the top, the lowest negative price change at the bottom.\n#\n# The Coefficient of Variation method works along similar lines,\n# but concentrates purely on standard deviation in price ranges,\n# the mean or average price which then is calculated into the final CV score for the scanning period....\n# the higher the CV score, the higher the volatility of the coins/tokens.\n# The CV rated coins are then created into a tickers list in exactly\n# the same way as the previously described negative change in price coins.\n#\n# Whichever way you choose to have your tickers lists created,\n# they will then be dynamically updated at the end of every scanning period with a completely new lists\n# of the latest high volatilty coin results.\n#\n# The VolScan module is easy to format with most processes done automatically for you,\n# below are the user defined settings you will need to create to get started using the module:\n\n\nimport os\nimport numpy as np\nfrom time import sleep\nfrom datetime import datetime\n\nfrom binance.client import Client\n\nfrom helpers.parameters import parse_args, load_config\n# Load creds modules\nfrom helpers.handle_creds import (\n load_correct_creds\n)\n\nargs = parse_args()\nDEFAULT_CONFIG_FILE = 'config.yml'\nDEFAULT_CREDS_FILE = 'creds.yml'\n\nconfig_file = args.config if args.config else DEFAULT_CONFIG_FILE\ncreds_file = args.creds if args.creds else DEFAULT_CREDS_FILE\nparsed_creds = load_config(creds_file)\nparsed_config = load_config(config_file)\n\n# Load trading vars\nPAIR_WITH = parsed_config['trading_options']['PAIR_WITH']\nEX_PAIRS = parsed_config['trading_options']['FIATS']\n\n# Load creds for correct environment\naccess_key, secret_key = load_correct_creds(parsed_creds)\nclient = Client(access_key, secret_key)\n\n\n# SCANNING_PERIOD - by default, we check the price difference for each coin on Binance for the last 3 minutes,\n# you can change this value for different results.\n# This also determines how often each iteration of the code is executed.\nSCANNING_PERIOD = 3 # minutes\n\n# TIME_SLEEP - how many seconds do you want between each price scan.\n# By default, every 12 seconds the price change will be recorded during SCANNING_PERIOD (3min)\n# After which the calculation is performed. The price change is also calculated every 12 seconds.\nTIME_SLEEP = 30 # seconds\n\n# If True, an updated list of coins will be generated from the site - http://edgesforledges.com/watchlists/binance.\n# If False, then the list you create in TICKERS_LIST = 'tickers.txt' will be used.\nCREATE_TICKER_LIST = False\n\n# NUMBER_COINS_IN_LIST - Limit the number of coins that can be added to the dynamic list of volatile coins. For example,\n# if NUMBER_COINS_IN_LIST = 20,\n# then each period only 20 sorted coins will be added to the list (Above the lowest values with a minus sign).\nNUMBER_COINS_IN_LIST = 20\n\n# CV_INDEX - Coefficient of Variation. Only those coins with a COV greater than the specified value will be displayed.\nCoV_INDEX = 0.0\n\n# CREATE_LIST_BY_COV_AND_PRICE_CHANGE is a filter for creating dynamic lists of the most volatile coins.\n# If COV_FILTER = True, lists of volatile coins will take into account the CoV parameter.\n# For example,\n# if CoV_INDEX = 0.5, then only coins with CoV above 0.5 and price change less than 0 will be added to list.\n# If False will be used only Price Change.\nCREATE_LIST_BY_COV_AND_PRICE_CHANGE = False\n\n# CREATE_LIST_BY_ONLY_COV - If True - A dynamic list of volatile coins will be created only based on the CoV parameter.\n# For example: If CoV_INDEX = 0.3 then the list will include coins with CoV_INDEX greater than 0.3 and the list will be\n# sorted\n# (At the top there will be coins with the highest CoV)\n# If False The list will be created only based on the Price Change.\nCREATE_LIST_BY_ONLY_COV = False\n\n# When creating a ticker list from the source site:\n# http://edgesforledges.com you can use the parameter (all or innovation-zone).\n# ticker_type = 'innovation-zone'\nticker_type = 'all'\nif CREATE_TICKER_LIST:\n TICKERS_LIST = 'tickers_all_USDT.txt'\nelse:\n TICKERS_LIST = 'tickers_all.txt'\n\n# BTC_FILTER - This feature is still in development.\n# Objective: Check the change in the price of bitcoin over the scanning period and,\n# based upon the results, either halt the bot from buying, or allow it to continue.\n# make further purchases of coins.\n# For example, if Bitcoin price change = 1.0 and coin price change is negative (-0.8), we give a buy signal....\n# BTC_FILTER = False\n\n\nSIGNAL_NAME = 'vyacheslav_signalbuy_VolScan'\nSIGNAL_FILE_BUY = 'signals/' + SIGNAL_NAME + '.buy'\n\nclass txcolors:\n BUY = '\\033[92m'\n WARNING = '\\033[93m'\n SELL_LOSS = '\\033[91m'\n SELL_PROFIT = '\\033[32m'\n DIM = '\\033[2m\\033[35m'\n DEFAULT = '\\033[39m'\n YELLOW = '\\033[33m'\n TURQUOISE = '\\033[36m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\n ITALICS = '\\033[3m'\n\n\n# get_price() function, takes 1 parameter (Binance client).\n# And it returns a dictionary of coins,\n# with the given keys ('symbol'(str), 'price'(float), 'time', 'price_list'(list), 'change_price'(float), 'cov'(float)).\ndef get_price(client_api):\n initial_price = {}\n tickers = [line.strip() for line in open(TICKERS_LIST)]\n prices = client_api.get_all_tickers()\n\n for coin in prices:\n for item in tickers:\n if item + PAIR_WITH == coin['symbol'] and all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS):\n initial_price[coin['symbol']] = {'symbol': coin['symbol'],\n 'price': coin['price'],\n 'time': datetime.now(),\n 'price_list': [],\n 'change_price': 0.0,\n 'cov': 0.0}\n return initial_price\n\n\n# Function с_о_v(), takes 1 parameter (List of coin prices for the period 'price_list': []).\n# And it returns the Coefficient of Variation (float) of the coin.\ndef c_o_v(price_list):\n if price_list:\n a = np.array(price_list, float)\n cov = round((a.std() / a.mean()) * 100, 2)\n return cov\n return 0.0\n\n\n# Percentage_price_change() function, takes 1 parameter (List of coin prices for the period 'price_list': []).\n# And it returns the percentage of price change.\ndef percentage_price_change(price_list):\n if price_list:\n return round(sum([100 * (b - a) / a for a, b in zip(price_list[::1], price_list[1::1])]), 4)\n\n\n# sort_list_coins() function, takes 2 parameters (List of coins and sorting type).\n# Based on the sorting type, sorts the coins in the list by their 'change_price' or 'cov'.\n# And it returns a sorted list.\ndef sort_list_coins(list_coins, sort_type='change_price'):\n if sort_type == 'cov':\n sort_list = sorted(list_coins, key=lambda x: x[f'{sort_type}'], reverse=True)\n else:\n sort_list = sorted(list_coins, key=lambda x: x[f'{sort_type}'])\n return sort_list\n\n\n# do_work () function, takes 1 parameter (Binance client). This is the main function of the module.\n# Which, in an endless cycle, searches for coins with a negative indicator of price change,\n# sorts them and gives buy signals.\ndef do_work():\n # Initializing coins for data storage.\n init_price = get_price(client)\n list_volatility = []\n count = 0\n\n while True:\n print(f'{txcolors.YELLOW}{SIGNAL_NAME} launched with a period of {SCANNING_PERIOD} minutes.')\n print(f\"{txcolors.YELLOW}Number of coins to scan - {len(init_price)}\")\n # We reset the data every period.\n if count == (SCANNING_PERIOD * 60) / TIME_SLEEP:\n init_price = get_price(client)\n list_volatility = []\n count = 0\n\n # Start a cycle to collect prices for each coin within a period.\n while count < (SCANNING_PERIOD * 60) / TIME_SLEEP:\n count += 1\n print(f'{txcolors.YELLOW}{SIGNAL_NAME} Round {count} complete. Next scan in {TIME_SLEEP} seconds.')\n try:\n # Requesting the latest coin prices\n last_price = get_price(client)\n\n for coin in last_price:\n # if len(init_price[coin]['price_list']) == (SCANNING_PERIOD * 60) / TIME_SLEEP:\n # del init_price[coin]['price_list'][0]\n init_price[coin]['price_list'].append(float(last_price[coin]['price']))\n\n if len(init_price[coin]['price_list']) == (SCANNING_PERIOD * 60) / TIME_SLEEP:\n coin_price_list = init_price[coin]['price_list']\n percent_change_price = percentage_price_change(coin_price_list)\n cov = c_o_v(coin_price_list)\n\n if CREATE_LIST_BY_COV_AND_PRICE_CHANGE:\n condition = percent_change_price < 0 and cov >= CoV_INDEX\n\n elif CREATE_LIST_BY_ONLY_COV:\n condition = cov >= CoV_INDEX\n\n else:\n condition = percent_change_price < 0\n\n if condition:\n if init_price[coin] not in list_volatility:\n init_price[coin]['time'] = datetime.now()\n init_price[coin]['change_price'] = percent_change_price\n init_price[coin]['cov'] = cov\n\n list_volatility.append(init_price[coin])\n\n if not list_volatility:\n print(f'{txcolors.YELLOW}Stand by for next update ...')\n else:\n if os.path.exists(SIGNAL_FILE_BUY):\n os.remove(SIGNAL_FILE_BUY)\n\n if CREATE_LIST_BY_ONLY_COV:\n sort_t = 'cov'\n else:\n sort_t = 'change_price'\n sort_list_vol_coin = sort_list_coins(list_volatility, sort_type=sort_t)\n\n for item in sort_list_vol_coin[:NUMBER_COINS_IN_LIST]:\n print(f'{txcolors.YELLOW}{SIGNAL_NAME}: detected a signal on{txcolors.END} '\n f'{txcolors.YELLOW}{item[\"symbol\"]}{txcolors.END}'\n )\n with open(SIGNAL_FILE_BUY, 'a+') as f:\n f.write(item[\"symbol\"] + '\\n')\n\n sleep(TIME_SLEEP)\n except Exception as e:\n print(f'{SIGNAL_NAME}: Exception do_work() 1: {e}')\n continue\n except KeyboardInterrupt as ki:\n continue"
] | [
[
"numpy.array"
]
] |
MSLars/allennlp | [
"2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475"
] | [
"tests/modules/seq2seq_encoders/gated_cnn_encoder_test.py"
] | [
"import torch\n\nfrom allennlp.common.testing import AllenNlpTestCase\nfrom allennlp.modules.seq2seq_encoders.gated_cnn_encoder import GatedCnnEncoder\n\n\nclass TestGatedCnnEncoder(AllenNlpTestCase):\n def test_gated_cnn_encoder(self):\n cnn_encoder = GatedCnnEncoder(\n input_dim=32,\n layers=[[[4, 32]], [[1, 16], [5, 16], [1, 32]], [[1, 64], [5, 64], [1, 32]]],\n )\n\n token_embeddings = torch.rand(5, 10, 32)\n mask = torch.ones(5, 10).bool()\n mask[0, 7:] = False\n mask[1, 5:] = False\n\n output = cnn_encoder(token_embeddings, mask)\n assert list(output.size()) == [5, 10, 64]\n\n def test_gated_cnn_encoder_dilations(self):\n cnn_encoder = GatedCnnEncoder(\n input_dim=32, layers=[[[2, 32, 1]], [[2, 32, 2]], [[2, 32, 4]], [[2, 32, 8]]]\n )\n\n token_embeddings = torch.rand(5, 10, 32)\n mask = torch.ones(5, 10).bool()\n mask[0, 7:] = False\n mask[1, 5:] = False\n\n output = cnn_encoder(token_embeddings, mask)\n assert list(output.size()) == [5, 10, 64]\n\n def test_gated_cnn_encoder_layers(self):\n cnn_encoder = GatedCnnEncoder(\n input_dim=32,\n layers=[[[4, 32]], [[1, 16], [5, 16], [1, 32]], [[1, 64], [5, 64], [1, 32]]],\n return_all_layers=True,\n )\n\n token_embeddings = torch.rand(5, 10, 32)\n mask = torch.ones(5, 10).bool()\n mask[0, 7:] = False\n mask[1, 5:] = False\n\n output = cnn_encoder(token_embeddings, mask)\n assert len(output) == 3\n concat_layers = torch.cat([layer.unsqueeze(1) for layer in output], dim=1)\n assert list(concat_layers.size()) == [5, 3, 10, 64]\n"
] | [
[
"torch.ones",
"torch.rand"
]
] |
Guangrui-best/ML_from_scratch | [
"afbdfb7046544bd5639a2b73fa4fe711b4c86593"
] | [
"mlfromscratch/lda.py"
] | [
"import numpy as np\n\nclass LDA:\n\n def __init__(self, n_components):\n self.n_components = n_components\n self.linear_discriminants = None\n\n def fit(self, X, y):\n n_features = X.shape[1]\n class_labels = np.unique(y)\n\n # Within class scatter matrix:\n # SW = sum((X_c - mean_X_c)^2 )\n\n # Between class scatter:\n # SB = sum( n_c * (mean_X_c - mean_overall)^2 )\n\n mean_overall = np.mean(X, axis=0)\n SW = np.zeros((n_features, n_features))\n SB = np.zeros((n_features, n_features))\n for c in class_labels:\n X_c = X[y == c]\n mean_c = np.mean(X_c, axis=0)\n # (4, n_c) * (n_c, 4) = (4,4) -> transpose\n SW += (X_c - mean_c).T.dot((X_c - mean_c))\n\n # (4, 1) * (1, 4) = (4,4) -> reshape\n n_c = X_c.shape[0]\n mean_diff = (mean_c - mean_overall).reshape(n_features, 1)\n SB += n_c * (mean_diff).dot(mean_diff.T)\n\n # Determine SW^-1 * SB\n A = np.linalg.inv(SW).dot(SB)\n # Get eigenvalues and eigenvectors of SW^-1 * SB\n eigenvalues, eigenvectors = np.linalg.eig(A)\n # -> eigenvector v = [:,i] column vector, transpose for easier calculations\n # sort eigenvalues high to low\n eigenvectors = eigenvectors.T\n idxs = np.argsort(abs(eigenvalues))[::-1]\n eigenvalues = eigenvalues[idxs]\n eigenvectors = eigenvectors[idxs]\n # store first n eigenvectors\n self.linear_discriminants = eigenvectors[0:self.n_components]\n\n def transform(self, X):\n # project data\n return np.dot(X, self.linear_discriminants.T)\n"
] | [
[
"numpy.zeros",
"numpy.linalg.inv",
"numpy.linalg.eig",
"numpy.dot",
"numpy.unique",
"numpy.mean"
]
] |
elcronos/adversarial_genattack | [
"b86b07a938a6aef54a41891fc2de3feeaa8e03aa"
] | [
"main.py"
] | [
"\"\"\"\nAuthor: Moustafa Alzantot ([email protected])\n\n\"\"\"\nimport time\nimport os\nimport sys\nimport random\nimport numpy as np\n\nimport tensorflow as tf \nfrom setup_inception import ImageNet, InceptionModel\n\nimport utils\nfrom genattack_tf2 import GenAttack2\n\nflags = tf.app.flags\nflags.DEFINE_string('input_dir', '', 'Path for input images.')\nflags.DEFINE_string('output_dir', 'output', 'Path to save results.')\nflags.DEFINE_integer('test_size', 1, 'Number of test images.')\nflags.DEFINE_bool('verbose', True, 'Print logs.')\nflags.DEFINE_integer('test_example', default=None, help='Test only one image')\n\nflags.DEFINE_float('mutation_rate', default=0.005, help='Mutation rate')\nflags.DEFINE_float('eps', default=0.10, help='maximum L_inf distance threshold')\nflags.DEFINE_float('alpha', default=0.20, help='Step size')\nflags.DEFINE_integer('pop_size', default=6, help='Population size')\nflags.DEFINE_integer('max_steps', default=10000, help='Maximum number of iterations')\nflags.DEFINE_integer('resize_dim', None, 'Reduced dimension for dimensionality reduction')\nflags.DEFINE_bool('adaptive', True, 'Turns on the dynamic scaling of mutation prameters')\nflags.DEFINE_string('model', 'inception', 'model name')\nflags.DEFINE_integer('target', None, 'target class. if not provided will be random')\nFLAGS = flags.FLAGS\n\nif __name__ == '__main__':\n\n # random.seed(FLAGS.seed)\n # tf.set_random_seed(FLAGS.seed)\n # np.random.seed(FLAGS.seed)\n\n dataset = ImageNet(FLAGS.input_dir)\n inputs, targets, reals, paths = utils.generate_data(dataset, FLAGS.test_size)\n \n with tf.Session() as sess:\n model = InceptionModel(sess, use_log=True)\n test_in = tf.placeholder(tf.float32, (1,299,299,3), 'x')\n test_pred = tf.argmax(model.predict(test_in), axis=1)\n \n \n attack = GenAttack2(model=model,\n pop_size=FLAGS.pop_size,\n mutation_rate = FLAGS.mutation_rate,\n eps=FLAGS.eps,\n max_steps=FLAGS.max_steps,\n alpha=FLAGS.alpha,\n resize_dim=FLAGS.resize_dim,\n adaptive=FLAGS.adaptive)\n num_valid_images = len(inputs)\n total_count = 0 # Total number of images attempted\n success_count = 0\n logger = utils.ResultLogger(FLAGS.output_dir, FLAGS.flag_values_dict())\n for ii in range(num_valid_images):\n if (FLAGS.test_example and FLAGS.test_example != ii):\n continue\n input_img = inputs[ii]\n input_img_path = paths[ii]\n if FLAGS.target:\n target_label = FLAGS.target + 1\n else:\n target_label = np.argmax(targets[ii])\n real_label = reals[ii]\n orig_pred = sess.run(test_pred, feed_dict={test_in: [input_img]})[0]\n if FLAGS.verbose:\n print('Real = {}, Predicted = {}, Target = {}'.format(\n real_label, orig_pred, target_label))\n if orig_pred != real_label:\n if FLAGS.verbose:\n print('\\t Skipping incorrectly classified image.')\n continue\n total_count += 1\n start_time = time.time()\n result = attack.attack(sess, input_img, target_label)\n end_time = time.time()\n attack_time = (end_time-start_time)\n if result is not None:\n adv_img, query_count, margin_log = result\n final_pred = sess.run(test_pred, feed_dict={test_in: [adv_img]})[0]\n if (final_pred == target_label):\n success_count += 1\n print('--- SUCCEEEED ----')\n logger.add_result(ii, input_img, adv_img, real_label,\n target_label, query_count, attack_time, margin_log)\n else:\n print('Attack failed')\n logger.close(num_attempts=total_count)\n print('Number of success = {} / {}.'.format(success_count, total_count))\n"
] | [
[
"tensorflow.placeholder",
"numpy.argmax",
"tensorflow.Session"
]
] |
sanowar-raihan/nerf-meta | [
"dbb97431b613acb3dfdc7075344c6e1fd1b6cf51"
] | [
"shapenet_train.py"
] | [
"import argparse\nimport json\nimport copy\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom datasets.shapenet import build_shapenet\nfrom models.nerf import build_nerf\nfrom models.rendering import get_rays_shapenet, sample_points, volume_render\n\n\ndef inner_loop(model, optim, imgs, poses, hwf, bound, num_samples, raybatch_size, inner_steps):\n \"\"\"\n train the inner model for a specified number of iterations\n \"\"\"\n pixels = imgs.reshape(-1, 3)\n\n rays_o, rays_d = get_rays_shapenet(hwf, poses)\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n\n num_rays = rays_d.shape[0]\n for step in range(inner_steps):\n indices = torch.randint(num_rays, size=[raybatch_size])\n raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]\n pixelbatch = pixels[indices] \n t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],\n num_samples, perturb=True)\n \n optim.zero_grad()\n rgbs, sigmas = model(xyz)\n colors = volume_render(rgbs, sigmas, t_vals, white_bkgd=True)\n loss = F.mse_loss(colors, pixelbatch)\n loss.backward()\n optim.step()\n\n\ndef train_meta(args, meta_model, meta_optim, data_loader, device):\n \"\"\"\n train the meta_model for one epoch using reptile meta learning\n https://arxiv.org/abs/1803.02999\n \"\"\"\n for imgs, poses, hwf, bound in data_loader:\n imgs, poses, hwf, bound = imgs.to(device), poses.to(device), hwf.to(device), bound.to(device)\n imgs, poses, hwf, bound = imgs.squeeze(), poses.squeeze(), hwf.squeeze(), bound.squeeze()\n\n meta_optim.zero_grad()\n\n inner_model = copy.deepcopy(meta_model)\n inner_optim = torch.optim.SGD(inner_model.parameters(), args.inner_lr)\n\n inner_loop(inner_model, inner_optim, imgs, poses,\n hwf, bound, args.num_samples,\n args.train_batchsize, args.inner_steps)\n \n with torch.no_grad():\n for meta_param, inner_param in zip(meta_model.parameters(), inner_model.parameters()):\n meta_param.grad = meta_param - inner_param\n \n meta_optim.step()\n\n\ndef report_result(model, imgs, poses, hwf, bound, num_samples, raybatch_size):\n \"\"\"\n report view-synthesis result on heldout views\n \"\"\"\n ray_origins, ray_directions = get_rays_shapenet(hwf, poses)\n\n view_psnrs = []\n for img, rays_o, rays_d in zip(imgs, ray_origins, ray_directions):\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n t_vals, xyz = sample_points(rays_o, rays_d, bound[0], bound[1],\n num_samples, perturb=False)\n \n synth = []\n num_rays = rays_d.shape[0]\n with torch.no_grad():\n for i in range(0, num_rays, raybatch_size):\n rgbs_batch, sigmas_batch = model(xyz[i:i+raybatch_size])\n color_batch = volume_render(rgbs_batch, sigmas_batch, \n t_vals[i:i+raybatch_size],\n white_bkgd=True)\n synth.append(color_batch)\n synth = torch.cat(synth, dim=0).reshape_as(img)\n error = F.mse_loss(img, synth)\n psnr = -10*torch.log10(error)\n view_psnrs.append(psnr)\n \n scene_psnr = torch.stack(view_psnrs).mean()\n return scene_psnr\n\n\ndef val_meta(args, model, val_loader, device):\n \"\"\"\n validate the meta trained model for few-shot view synthesis\n \"\"\"\n meta_trained_state = model.state_dict()\n val_model = copy.deepcopy(model)\n \n val_psnrs = []\n for imgs, poses, hwf, bound in val_loader:\n imgs, poses, hwf, bound = imgs.to(device), poses.to(device), hwf.to(device), bound.to(device)\n imgs, poses, hwf, bound = imgs.squeeze(), poses.squeeze(), hwf.squeeze(), bound.squeeze()\n\n tto_imgs, test_imgs = torch.split(imgs, [args.tto_views, args.test_views], dim=0)\n tto_poses, test_poses = torch.split(poses, [args.tto_views, args.test_views], dim=0)\n\n val_model.load_state_dict(meta_trained_state)\n val_optim = torch.optim.SGD(val_model.parameters(), args.tto_lr)\n\n inner_loop(val_model, val_optim, tto_imgs, tto_poses, hwf,\n bound, args.num_samples, args.tto_batchsize, args.tto_steps)\n \n scene_psnr = report_result(val_model, test_imgs, test_poses, hwf, bound, \n args.num_samples, args.test_batchsize)\n val_psnrs.append(scene_psnr)\n\n val_psnr = torch.stack(val_psnrs).mean()\n return val_psnr\n\n\ndef main():\n parser = argparse.ArgumentParser(description='shapenet few-shot view synthesis')\n parser.add_argument('--config', type=str, required=True,\n help='config file for the shape class (cars, chairs or lamps)')\n args = parser.parse_args()\n\n with open(args.config) as config:\n info = json.load(config)\n for key, value in info.items():\n args.__dict__[key] = value\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n train_set = build_shapenet(image_set=\"train\", dataset_root=args.dataset_root,\n splits_path=args.splits_path, num_views=args.train_views)\n train_loader = DataLoader(train_set, batch_size=1, shuffle=True)\n\n val_set = build_shapenet(image_set=\"val\", dataset_root=args.dataset_root,\n splits_path=args.splits_path,\n num_views=args.tto_views+args.test_views)\n val_loader = DataLoader(val_set, batch_size=1, shuffle=False)\n\n meta_model = build_nerf(args)\n meta_model.to(device)\n\n meta_optim = torch.optim.Adam(meta_model.parameters(), lr=args.meta_lr)\n\n for epoch in range(1, args.meta_epochs+1):\n train_meta(args, meta_model, meta_optim, train_loader, device)\n val_psnr = val_meta(args, meta_model, val_loader, device)\n print(f\"Epoch: {epoch}, val psnr: {val_psnr:0.3f}\")\n\n torch.save({\n 'epoch': epoch,\n 'meta_model_state_dict': meta_model.state_dict(),\n 'meta_optim_state_dict': meta_optim.state_dict(),\n }, f'meta_epoch{epoch}.pth')\n\n\nif __name__ == '__main__':\n main()"
] | [
[
"torch.utils.data.DataLoader",
"torch.nn.functional.mse_loss",
"torch.stack",
"torch.randint",
"torch.split",
"torch.no_grad",
"torch.cuda.is_available",
"torch.log10",
"torch.cat"
]
] |
cxxixi/Online-opinions-on-weibo | [
"ae4586b8b42d166c9a2386319891a04d390585fb"
] | [
"plot/event1/comment_concat.py"
] | [
"import pandas as pd\n\ncsv1 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\500-593.csv',header=None)\ncsv2 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment594.csv',header=None)\ncsv3 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment855.csv',header=None)\ncsv4 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment993.csv',header=None)\ncsv5 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment1123.csv',header=None)\ncsv6 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment1137.csv',header=None)\ncsv7 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment1.csv',header=None)\ncsv8 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment350.csv',header=None)\ncsv9 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\addition.csv',header=None)\ncsv10 = pd.read_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\addition_1.csv',header=None)\n\n\n\ncomment_data = pd.concat([csv1,csv2,csv3,csv4,csv5,csv6,csv7,csv8,csv9,csv10],axis=0)\ncomment_data.head()\ncomment_data.shape\ncomment_data.columns = ['tweetid', 'comment_id', 'created_at', 'text', 'like_counts', 'reply_id', 'reply_text', 'user_id',\\\n 'profile_url', 'screen_name', 'verified', 'verified_type']\n\ncomment_data = comment_data.drop_duplicates()\ncomment_data.groupby(by=['tweetid']).size().sort_values(ascending=False)\n\n\ncomment_data.to_csv('D:\\my_documents\\competition\\government\\Report\\event1\\\\comment_data.csv')\n\n\n\n\n\n\n\n"
] | [
[
"pandas.read_csv",
"pandas.concat"
]
] |
donalrinho/zfit | [
"9769ef7d56a6be9a5d438e47b80ea5a8f772bc24"
] | [
"examples/custom_pdf_advanced.py"
] | [
"# Copyright (c) 2021 zfit\n\nimport tensorflow as tf\n\nimport zfit\nfrom zfit import z\n\n\nclass CustomPDF2D(zfit.pdf.BasePDF):\n \"\"\"My custom, 2 dimensional pdf. The axes are: Energy, Momentum.\n\n \"\"\"\n\n def __init__(self, param1, param2, param3, obs, name=\"CustomPDF\", ):\n # we can now do complicated stuff here if needed\n # only thing: we have to specify explicitly here what is which parameter\n params = {'super_param': param1, # we can change/compose etc parameters\n 'param2': param2, 'param3': param3}\n super().__init__(obs, params, name=name)\n\n def _unnormalized_pdf(self, x):\n energy, momentum = x.unstack_x()\n param1 = self.params['super_param']\n param2 = self.params['param2']\n param3 = self.params['param3']\n\n # just a fantasy function\n probs = param1 * tf.cos(energy ** 2) + tf.math.log(param2 * momentum ** 2) + param3\n return probs\n\n\n# add an analytic integral\n\n# define the integral function\ndef integral_full(limits, norm_range, params, model):\n lower, upper = limits.rect_limits # for a more detailed guide, see the space.py example\n param1 = params['super_param']\n param2 = params['param2']\n param3 = params['param3']\n\n lower = z.convert_to_tensor(lower)\n upper = z.convert_to_tensor(upper)\n\n # calculate the integral here, dummy integral, wrong!\n integral = param1 * param2 * param3 + z.reduce_sum([lower, upper])\n return integral\n\n\n# define the space over which it is defined. Here, we use the axes\nlower_full = (-10, zfit.Space.ANY_LOWER)\nupper_full = (10, zfit.Space.ANY_UPPER)\nintegral_full_limits = zfit.Space(axes=(0, 1),\n limits=(lower_full, upper_full))\n\nCustomPDF2D.register_analytic_integral(func=integral_full,\n limits=integral_full_limits)\n\n\n# define the partial integral function\ndef integral_axis1(x, limits, norm_range, params, model):\n data_0 = x.unstack_x() # data from axis 0\n\n param1 = params['super_param']\n param2 = params['param2']\n param3 = params['param3']\n\n lower, upper = limits.limit1d # for a more detailed guide, see the space.py example\n lower = z.convert_to_tensor(lower) # the limits are now 1-D, for axis 1\n upper = z.convert_to_tensor(upper)\n\n # calculate the integral here, dummy integral\n integral = data_0 ** 2 * param1 * param2 * param3 + z.reduce_sum([lower, upper])\n # notice that the returned shape will be in the same as data_0, e.g. the number of events given in x\n return integral\n\n\n# define the space over which it is defined. Here, we use the axes\nlower_axis1 = ((zfit.Space.ANY_LOWER,),)\nupper_axis1 = ((zfit.Space.ANY_UPPER,),)\nintegral_axis1_limits = zfit.Space(axes=(1,), # axes one corresponds to the second obs, here obs2\n limits=(lower_axis1, upper_axis1))\n\nCustomPDF2D.register_analytic_integral(func=integral_axis1,\n limits=integral_axis1_limits)\n\nif __name__ == '__main__':\n import numpy as np\n\n obs = zfit.Space('obs1', (-10, 10)) * zfit.Space('obs2', (-3, 5))\n pdf = CustomPDF2D(1, 2, 3, obs=obs)\n sample = pdf.sample(n=1000)\n pdf.pdf([[2., 2.5], [5.4, 3.2]])\n x_part = zfit.Data.from_numpy(array=np.array([2.1, 2.2, 3.2]), obs='obs1')\n\n # integrate over obs2 with limits 1, 2 for the `x_part`. This will use the analytic integral above\n pdf.partial_integrate(x=x_part, limits=zfit.Space('obs2', (1, 2)))\n # we can explicitly call the analytic integral. Without registering it (e.g. comment the line with the `register`\n # and run again), it will raise an error\n pdf.partial_analytic_integrate(x=x_part, limits=zfit.Space('obs2', (1, 2)))\n"
] | [
[
"numpy.array",
"tensorflow.cos",
"tensorflow.math.log"
]
] |
jiaojiao1234/RISE | [
"fd85aa6e475534a74faab5c4644c63dc0c01d236"
] | [
"Jupyter/UDO_Free/RP/test_start.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 8 22:09:47 2021\n\n@author: Apple\n\"\"\"\ndef start():\n import numpy as np\n import scipy.io as sio\n import sklearn.ensemble\n from sklearn import svm\n from sklearn.model_selection import StratifiedKFold\n from sklearn.metrics import confusion_matrix\n from sklearn import preprocessing\n import joblib\n from sklearn import neighbors\n from sklearn.model_selection import StratifiedShuffleSplit\n from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n from sklearn.tree import DecisionTreeClassifier\n import random\n from sklearn.linear_model import LogisticRegression \n from sklearn.ensemble import GradientBoostingClassifier\n from sklearn.ensemble import AdaBoostClassifier\n from sklearn.naive_bayes import GaussianNB\n from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n from sklearn.ensemble import VotingClassifier\n from nonconformist.nc import MarginErrFunc\n import warnings\n warnings.filterwarnings(\"ignore\", message=\"Numerical issues were encountered \")\n import sys\n sys.path.insert(0,'/root/RISE-Version2/')\n from Statistical_vector.statistical_vector import train_statistical_vector, test_statistical_vector_param, non_condition_p\n \n \n min_max_scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(-1,1))\n myclassifier = [svm.SVC(probability = True, break_ties=True, decision_function_shape='ovr', random_state=0),\n sklearn.ensemble.RandomForestClassifier(n_estimators=100,random_state=0),\n DecisionTreeClassifier(random_state=0),neighbors.KNeighborsClassifier(n_neighbors=5, weights='uniform',\n algorithm='auto', leaf_size=30,\n p=2, metric='minkowski',\n metric_params=None, n_jobs=1),\n LogisticRegression(random_state=0),GradientBoostingClassifier(n_estimators=100,random_state=0),\n LinearDiscriminantAnalysis(), AdaBoostClassifier(),\n GaussianNB(),QuadraticDiscriminantAnalysis()] \n \n \n times = ['1'] ##test set\n train_name = ['2','3','4'] ##train set\n filepath = r'/root/RISE-Version2/Jupyter/UDO_Free/RP/data/' \n filename = ['zjq_']\n class_index = 3\n class_num = 5 \n \n \n ##load test data\n #print('\\n---------------test data is ' + times[0] + ' scenario-------------\\n')\n data = sio.loadmat(filepath + filename[0] + times[0] + '.mat')\n xx2 = data['alldata']\n yy2 = data['alllabel']\n yy2 = yy2.flatten()\n test_x = xx2\n test_y = yy2\n \n ##load train data\n #print('\\n-------training data is ' + str(train_name) + ' scenario----------\\n')\n xx1 = np.empty(shape=[0, xx2.shape[1]])\n yy1 = np.empty(shape=[1, 0],dtype=int) \n yy1 = yy1.flatten() \n for ii in train_name:\n data = sio.loadmat(filepath + filename[0] + ii+ '.mat')\n x1 = data['alldata']\n y1 = data['alllabel']\n y1 = y1.flatten()\n x1 = min_max_scaler.fit_transform(x1)\n xx1 = np.append(xx1, x1, axis=0)\n yy1 = np.append(yy1, y1, axis=0)\n yy1 = yy1.flatten()\n \n index = [t for t in range(xx1.shape[0])] \n random.shuffle(index)\n x_train11 = xx1[index]\n x_train1 = x_train11\n y_train1 = yy1[index]\n #y_train1 = y_train1 - 1 \n \n \n ############################ Without RISE ###############################\n print('\\n-------- The performance of the underlying model without RISE --------\\n')\n x_test1 = min_max_scaler.fit_transform(test_x)\n y_test1 = test_y\n #y_test1 = y_test1 - 1\n clf_dif = myclassifier[class_index]\n clf_dif.fit(x_train1,y_train1)\n acc_dif = clf_dif.score(x_test1,y_test1)\n print('The accuracy without RISE: ',acc_dif)\n y_true_dif, y_pred_dif = y_test1,clf_dif.predict(x_test1)\n test_confusion_matrix = confusion_matrix(y_true_dif, y_pred_dif)\n print('Confusion matrix without RISE: \\n',test_confusion_matrix)\n \n return x_train1, y_train1, x_test1, y_test1, myclassifier, y_true_dif, y_pred_dif,class_num,class_index"
] | [
[
"scipy.io.loadmat",
"sklearn.svm.SVC",
"numpy.empty",
"sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis",
"sklearn.tree.DecisionTreeClassifier",
"numpy.append",
"sklearn.metrics.confusion_matrix",
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.linear_model.LogisticRegression",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis",
"sklearn.ensemble.GradientBoostingClassifier",
"sklearn.naive_bayes.GaussianNB"
]
] |
DREAMS-lab/mask_rcnn_pytorch | [
"1fcc8489758aa4673c29a32a2413f7f86742414f"
] | [
"rock_c3.py"
] | [
"\"\"\"\nrock.py\nZhiang Chen, Feb 2020\ndata class for mask rcnn\n\"\"\"\n\nimport os\nimport numpy as np\nimport torch\nfrom PIL import Image\nimport pickle\nimport matplotlib.pyplot as plt\n\n\"\"\"\n./datasets/\n Rock/\n data/\n 0_8.npy\n 0_9.npy\n 1_4.npy\n ...\n\"\"\"\n\nclass Dataset(object):\n def __init__(self, data_path, transforms=None, input_channel=6, include_name=True):\n self.data_path = data_path\n self.transforms = transforms\n self.data_files = [f for f in os.listdir(data_path) if f.endswith(\".npy\")]\n self.input_channel = input_channel\n self.include_name = include_name\n\n def __getitem__(self, idx):\n data_path = os.path.join(self.data_path, self.data_files[idx])\n\n data = np.load(data_path)\n\n if self.input_channel == 6:\n image = data[:, :, :self.input_channel]\n elif self.input_channel == 3:\n image = data[:, :, :3]\n elif self.input_channel == 4:\n rgb = data[:, :, :3]\n dem = data[:, :, 3:]\n d = dem[:,:,0]*0.33 + dem[:,:,1]*0.33 + dem[:,:,2]*0.33\n image = np.append(rgb, np.expand_dims(d, axis=2), axis=2)\n\n if data.shape[2] == 6:\n masks = np.ones_like(image[:, :, :3]) * 255\n else:\n masks = data[:, :, 6:]\n num_objs = masks.shape[2]\n \"\"\"\n for i in reversed(range(num_objs)):\n mask = masks[:, :, i]\n if mask.max() < 250:\n masks = np.delete(masks, i, axis=2)\n num_objs = masks.shape[2]\n \"\"\"\n # 0 encoding non-damaged is supposed to be 1 for training.\n # In training, 0 is of background\n obj_ids = np.ones(num_objs)\n\n masks = masks >= 250 # convert to binary masks\n\n boxes = []\n\n for i in range(num_objs):\n pos = np.where(masks[:, :, i])\n xmin = np.min(pos[1])\n xmax = np.max(pos[1])\n ymin = np.min(pos[0])\n ymax = np.max(pos[0])\n boxes.append([xmin, ymin, xmax, ymax])\n\n # convert everything into a torch.Tensor\n boxes = torch.as_tensor(boxes, dtype=torch.float32)\n # labels = torch.ones((num_objs,), dtype=torch.int64)\n labels = torch.as_tensor(obj_ids, dtype=torch.int64)\n masks = torch.as_tensor(masks, dtype=torch.uint8)\n masks = masks.permute((2, 0, 1))\n\n image_id = torch.tensor([idx])\n area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])\n # suppose all instances are not crowd\n iscrowd = torch.zeros((num_objs,), dtype=torch.int64)\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = labels\n target[\"masks\"] = masks\n target[\"image_id\"] = image_id\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n if self.include_name:\n target[\"image_name\"] = data_path\n\n if self.transforms is not None:\n image, target = self.transforms(image, target)\n\n return image, target\n\n def __len__(self):\n return len(self.data_files)\n\n def show(self, idx):\n image, target = self.__getitem__(idx)\n rgb = image[:, :, :3].astype(np.uint8)\n rgb = Image.fromarray(rgb)\n rgb.show()\n masks = target[\"masks\"]\n masks = masks.permute((1, 2, 0))\n masks = masks.numpy()\n masks = masks.max(axis=2) * 255\n masks = Image.fromarray(masks)\n masks.show()\n\n def imageStat(self):\n images = np.empty((0, 6), float)\n for data_file in self.data_files:\n if len(data_file.split('_'))==2:\n data_path = os.path.join(self.data_path, data_file)\n data = np.load(data_path)\n print(data.shape)\n image = data[:, :, :6].astype(float).reshape(-1, 6)/255.0\n images = np.append(images, image, axis=0)\n return np.mean(images, axis=0).tolist(), np.std(images, axis=0).tolist(), \\\n np.max(images, axis=0).tolist(), np.min(images, axis=0).tolist()\n\n\n def imageStat2(self):\n images = np.empty((0, 3), float)\n import random\n random.shuffle(self.data_files)\n for data_file in self.data_files[:40]:\n if True:\n data_path = os.path.join(self.data_path, data_file)\n data = np.load(data_path)\n image = data[:, :, :3].astype(float).reshape(-1, 3)/255.0\n images = np.append(images, image, axis=0)\n return np.mean(images, axis=0).tolist(), np.std(images, axis=0).tolist(), \\\n np.max(images, axis=0).tolist(), np.min(images, axis=0).tolist()\n\n\nif __name__ == \"__main__\":\n #ds = Dataset(\"./datasets/Rock/data/\")\n ds = Dataset(\"./datasets/hypolith_sample_set_throop/npy\",input_channel=3)\n # image_mean, image_std, image_max, image_min = ds.imageStat()\n\n\n id = 29\n image, target = ds[id]\n print(target['image_name'])\n ds.show(id)\n\n id = 28\n image, target = ds[id]\n print(target['image_name'])\n ds.show(id)\n print(ds.imageStat2())\n"
] | [
[
"numpy.load",
"numpy.ones",
"numpy.empty",
"numpy.append",
"torch.as_tensor",
"torch.tensor",
"numpy.ones_like",
"numpy.max",
"numpy.expand_dims",
"numpy.min",
"torch.zeros",
"numpy.std",
"numpy.where",
"numpy.mean"
]
] |
shantanusharma/keras | [
"662f6c5bb82b54d90ec8e863ac7a44c3b8c1b938"
] | [
"keras/engine/sequential.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"Home of the `Sequential` model.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nimport copy\nimport warnings\nfrom keras import layers as layer_module\nfrom keras.engine import base_layer\nfrom keras.engine import functional\nfrom keras.engine import input_layer\nfrom keras.engine import training_utils\nfrom keras.saving.saved_model import model_serialization\nfrom keras.utils import generic_utils\nfrom keras.utils import layer_utils\nfrom keras.utils import tf_inspect\nfrom keras.utils import tf_utils\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.util.tf_export import keras_export\n\n\nSINGLE_LAYER_OUTPUT_ERROR_MSG = ('All layers in a Sequential model should have '\n 'a single output tensor. For multi-output '\n 'layers, use the functional API.')\n\n\n@keras_export('keras.Sequential', 'keras.models.Sequential')\nclass Sequential(functional.Functional):\n \"\"\"`Sequential` groups a linear stack of layers into a `tf.keras.Model`.\n\n `Sequential` provides training and inference features on this model.\n\n Examples:\n\n >>> # Optionally, the first layer can receive an `input_shape` argument:\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))\n >>> # Afterwards, we do automatic shape inference:\n >>> model.add(tf.keras.layers.Dense(4))\n\n >>> # This is identical to the following:\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.Input(shape=(16,)))\n >>> model.add(tf.keras.layers.Dense(8))\n\n >>> # Note that you can also omit the `input_shape` argument.\n >>> # In that case the model doesn't have any weights until the first call\n >>> # to a training/evaluation method (since it isn't yet built):\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(8))\n >>> model.add(tf.keras.layers.Dense(4))\n >>> # model.weights not created yet\n\n >>> # Whereas if you specify the input shape, the model gets built\n >>> # continuously as you are adding layers:\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))\n >>> model.add(tf.keras.layers.Dense(4))\n >>> len(model.weights)\n 4\n\n >>> # When using the delayed-build pattern (no input shape specified), you can\n >>> # choose to manually build your model by calling\n >>> # `build(batch_input_shape)`:\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(8))\n >>> model.add(tf.keras.layers.Dense(4))\n >>> model.build((None, 16))\n >>> len(model.weights)\n 4\n\n ```python\n # Note that when using the delayed-build pattern (no input shape specified),\n # the model gets built the first time you call `fit`, `eval`, or `predict`,\n # or the first time you call the model on some input data.\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Dense(8))\n model.add(tf.keras.layers.Dense(1))\n model.compile(optimizer='sgd', loss='mse')\n # This builds the model for the first time:\n model.fit(x, y, batch_size=32, epochs=10)\n ```\n \"\"\"\n\n @trackable.no_automatic_dependency_tracking\n def __init__(self, layers=None, name=None):\n \"\"\"Creates a `Sequential` model instance.\n\n Args:\n layers: Optional list of layers to add to the model.\n name: Optional name for the model.\n \"\"\"\n # Skip the init in FunctionalModel since model doesn't have input/output yet\n super(functional.Functional, self).__init__( # pylint: disable=bad-super-call\n name=name, autocast=False)\n base_layer.keras_api_gauge.get_cell('Sequential').set(True)\n self.supports_masking = True\n self._compute_output_and_mask_jointly = True\n self._auto_track_sub_layers = False\n self._inferred_input_shape = None\n self._has_explicit_input_shape = False\n self._input_dtype = None\n self._layer_call_argspecs = {}\n self._created_nodes = set()\n # Flag that indicate whether the sequential network topology has been\n # created. It is false when there isn't any layer, or the layers doesn't\n # have input shape.\n self._graph_initialized = False\n\n # Unfortunately some Sequential models using custom layers or FeatureColumn\n # layers have multiple inputs. This is fundamentally incompatible with\n # most of the Sequential API, and we have to disable a number of features\n # for such models.\n self._use_legacy_deferred_behavior = False\n\n # Add to the model any layers passed to the constructor.\n if layers:\n if not isinstance(layers, (list, tuple)):\n layers = [layers]\n for layer in layers:\n self.add(layer)\n\n @property\n def layers(self):\n # Historically, `sequential.layers` only returns layers that were added\n # via `add`, and omits the auto-generated `InputLayer` that comes at the\n # bottom of the stack.\n # `Trackable` manages the `_layers` attributes and does filtering\n # over it.\n layers = super(Sequential, self).layers\n if layers and isinstance(layers[0], input_layer.InputLayer):\n return layers[1:]\n return layers[:]\n\n @trackable.no_automatic_dependency_tracking\n def add(self, layer):\n \"\"\"Adds a layer instance on top of the layer stack.\n\n Args:\n layer: layer instance.\n\n Raises:\n TypeError: If `layer` is not a layer instance.\n ValueError: In case the `layer` argument does not\n know its input shape.\n ValueError: In case the `layer` argument has\n multiple output tensors, or is already connected\n somewhere else (forbidden in `Sequential` models).\n \"\"\"\n # If we are passed a Keras tensor created by keras.Input(), we can extract\n # the input layer from its keras history and use that without any loss of\n # generality.\n if hasattr(layer, '_keras_history'):\n origin_layer = layer._keras_history[0]\n if isinstance(origin_layer, input_layer.InputLayer):\n layer = origin_layer\n logging.warning(\n 'Please add `keras.layers.InputLayer` instead of `keras.Input` to '\n 'Sequential model. `keras.Input` is intended to be used by '\n 'Functional model.')\n\n if isinstance(layer, tf.Module):\n if not isinstance(layer, base_layer.Layer):\n layer = functional.ModuleWrapper(layer)\n else:\n raise TypeError('The added layer must be '\n 'an instance of class Layer. '\n 'Found: ' + str(layer))\n\n tf_utils.assert_no_legacy_layers([layer])\n if not self._is_layer_name_unique(layer):\n raise ValueError('All layers added to a Sequential model '\n 'should have unique names. Name \"%s\" is already the name'\n ' of a layer in this model. Update the `name` argument '\n 'to pass a unique name.' % (layer.name,))\n\n self.built = False\n set_inputs = False\n self._maybe_create_attribute('_self_tracked_trackables', [])\n if not self._self_tracked_trackables:\n if isinstance(layer, input_layer.InputLayer):\n # Case where the user passes an Input or InputLayer layer via `add`.\n set_inputs = True\n else:\n batch_shape, dtype = training_utils.get_input_shape_and_dtype(layer)\n if batch_shape:\n # Instantiate an input layer.\n x = input_layer.Input(\n batch_shape=batch_shape, dtype=dtype, name=layer.name + '_input')\n # This will build the current layer\n # and create the node connecting the current layer\n # to the input layer we just created.\n layer(x)\n set_inputs = True\n\n if set_inputs:\n outputs = tf.nest.flatten(layer._inbound_nodes[-1].outputs)\n if len(outputs) != 1:\n raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)\n self.outputs = outputs\n self.inputs = layer_utils.get_source_inputs(self.outputs[0])\n self.built = True\n self._has_explicit_input_shape = True\n\n elif self.outputs:\n # If the model is being built continuously on top of an input layer:\n # refresh its output.\n output_tensor = layer(self.outputs[0])\n if len(tf.nest.flatten(output_tensor)) != 1:\n raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)\n self.outputs = [output_tensor]\n self.built = True\n\n if set_inputs or self._graph_initialized:\n self._init_graph_network(self.inputs, self.outputs)\n self._graph_initialized = True\n else:\n self._self_tracked_trackables.append(layer)\n self._handle_deferred_layer_dependencies([layer])\n\n self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)\n\n @trackable.no_automatic_dependency_tracking\n def pop(self):\n \"\"\"Removes the last layer in the model.\n\n Raises:\n TypeError: if there are no layers in the model.\n \"\"\"\n if not self.layers:\n raise TypeError('There are no layers in the model.')\n\n layer = self._self_tracked_trackables.pop()\n self._layer_call_argspecs.pop(layer)\n if not self.layers:\n self.outputs = None\n self.inputs = None\n self.built = False\n self._inferred_input_shape = None\n self._has_explicit_input_shape = False\n self._graph_initialized = False\n elif self._graph_initialized:\n self.layers[-1]._outbound_nodes = []\n self.outputs = [self.layers[-1].output]\n self._init_graph_network(self.inputs, self.outputs)\n self.built = True\n\n @trackable.no_automatic_dependency_tracking\n def _build_graph_network_for_inferred_shape(self,\n input_shape,\n input_dtype=None):\n if input_shape is None or not self.layers:\n return\n if not tf.__internal__.tf2.enabled() or not tf.compat.v1.executing_eagerly_outside_functions():\n # This behavior is disabled in V1 or when eager execution is disabled.\n return\n if (not self._has_explicit_input_shape and\n not self._use_legacy_deferred_behavior):\n # Determine whether the input shape is novel, i.e. whether the model\n # should be rebuilt.\n input_shape = tuple(input_shape)\n if self._inferred_input_shape is None:\n new_shape = input_shape\n else:\n new_shape = relax_input_shape(self._inferred_input_shape, input_shape)\n if (new_shape is not None and new_shape != self._inferred_input_shape):\n # A novel shape has been received: we need to rebuild the model.\n # In case we are inside a graph function, we step out of it.\n with tf.init_scope():\n inputs = input_layer.Input(\n batch_shape=new_shape,\n dtype=input_dtype,\n name=self.layers[0].name + '_input')\n layer_input = inputs\n created_nodes = set()\n for layer in self.layers:\n # Clear nodes previously created via this method. This prevents\n # node accumulation and ensures that e.g. `layer.output` is\n # always connected to `model.inputs`\n # (this is important e.g. for the feature extraction use case).\n # We don't just do `layer._inbound_nodes = []` in order\n # not to break shared layers added to Sequential models (which is\n # technically illegal as per the `add()` docstring,\n # but wasn't previously disabled).\n clear_previously_created_nodes(layer, self._created_nodes)\n try:\n # Create Functional API connection by calling the current layer\n layer_output = layer(layer_input)\n except: # pylint:disable=bare-except\n # Functional API calls may fail for a number of reasons:\n # 1) The layer may be buggy. In this case it will be easier for\n # the user to debug if we fail on the first call on concrete data,\n # instead of our own call on a symbolic input.\n # 2) The layer is dynamic (graph-incompatible) and hasn't\n # overridden `compute_output_shape`. In this case, it is\n # impossible to build a graph network.\n # 3) The layer is otherwise incompatible with the Functional API\n # (e.g. this is the case for some probabilistic layers that rely\n # on hacks and that do not return tensors).\n # In all these cases, we should avoid creating a graph network\n # (or we simply can't).\n self._use_legacy_deferred_behavior = True\n return\n if len(tf.nest.flatten(layer_output)) != 1:\n raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)\n # Keep track of nodes just created above\n track_nodes_created_by_last_call(layer, created_nodes)\n layer_input = layer_output\n outputs = layer_output\n self._created_nodes = created_nodes\n try:\n # Initialize a graph Network. This call will never fail for\n # a stack of valid Keras layers.\n # However some users have layers that are fundamentally incompatible\n # with the Functional API, which do not return tensors. In this\n # case, we fall back to the legacy deferred behavior.\n # TODO(fchollet): consider raising here, as we should not be\n # supporting such layers.\n self._init_graph_network(inputs, outputs)\n self._graph_initialized = True\n except: # pylint:disable=bare-except\n self._use_legacy_deferred_behavior = True\n self._inferred_input_shape = new_shape\n\n @generic_utils.default\n def build(self, input_shape=None):\n if self._graph_initialized:\n self._init_graph_network(self.inputs, self.outputs)\n else:\n if input_shape is None:\n raise ValueError('You must provide an `input_shape` argument.')\n self._build_graph_network_for_inferred_shape(input_shape)\n if not self.built:\n input_shape = tuple(input_shape)\n self._build_input_shape = input_shape\n super(Sequential, self).build(input_shape)\n self.built = True\n\n def call(self, inputs, training=None, mask=None): # pylint: disable=redefined-outer-name\n # If applicable, update the static input shape of the model.\n if not self._has_explicit_input_shape:\n if not tf.is_tensor(inputs) and not isinstance(\n inputs, tf.Tensor):\n # This is a Sequential with mutiple inputs. This is technically an\n # invalid use case of Sequential, but we tolerate it for backwards\n # compatibility.\n self._use_legacy_deferred_behavior = True\n self._build_input_shape = tf.nest.map_structure(_get_shape_tuple, inputs)\n if tf.__internal__.tf2.enabled():\n logging.warning('Layers in a Sequential model should only have a '\n 'single input tensor, but we receive a %s input: %s'\n '\\nConsider rewriting this model with the Functional '\n 'API.' % (type(inputs), inputs))\n else:\n self._build_graph_network_for_inferred_shape(inputs.shape, inputs.dtype)\n\n if self._graph_initialized:\n if not self.built:\n self._init_graph_network(self.inputs, self.outputs)\n return super(Sequential, self).call(inputs, training=training, mask=mask)\n\n outputs = inputs # handle the corner case where self.layers is empty\n for layer in self.layers:\n # During each iteration, `inputs` are the inputs to `layer`, and `outputs`\n # are the outputs of `layer` applied to `inputs`. At the end of each\n # iteration `inputs` is set to `outputs` to prepare for the next layer.\n kwargs = {}\n argspec = self._layer_call_argspecs[layer].args\n if 'mask' in argspec:\n kwargs['mask'] = mask\n if 'training' in argspec:\n kwargs['training'] = training\n\n outputs = layer(inputs, **kwargs)\n\n if len(tf.nest.flatten(outputs)) != 1:\n raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)\n # `outputs` will be the inputs to the next layer.\n inputs = outputs\n mask = getattr(outputs, '_keras_mask', None)\n return outputs\n\n def compute_output_shape(self, input_shape):\n shape = input_shape\n for layer in self.layers:\n shape = layer.compute_output_shape(shape)\n return shape\n\n def compute_mask(self, inputs, mask):\n # TODO(omalleyt): b/123540974 This function is not really safe to call\n # by itself because it will duplicate any updates and losses in graph\n # mode by `call`ing the Layers again.\n outputs = self.call(inputs, mask=mask)\n return getattr(outputs, '_keras_mask', None)\n\n def predict_proba(self, x, batch_size=32, verbose=0):\n \"\"\"Generates class probability predictions for the input samples.\n\n The input samples are processed batch by batch.\n\n Args:\n x: input data, as a Numpy array or list of Numpy arrays\n (if the model has multiple inputs).\n batch_size: integer.\n verbose: verbosity mode, 0 or 1.\n\n Returns:\n A Numpy array of probability predictions.\n \"\"\"\n warnings.warn('`model.predict_proba()` is deprecated and '\n 'will be removed after 2021-01-01. '\n 'Please use `model.predict()` instead.')\n preds = self.predict(x, batch_size, verbose)\n if preds.min() < 0. or preds.max() > 1.:\n logging.warning('Network returning invalid probability values. '\n 'The last layer might not normalize predictions '\n 'into probabilities '\n '(like softmax or sigmoid would).')\n return preds\n\n def predict_classes(self, x, batch_size=32, verbose=0):\n \"\"\"Generate class predictions for the input samples.\n\n The input samples are processed batch by batch.\n\n Args:\n x: input data, as a Numpy array or list of Numpy arrays\n (if the model has multiple inputs).\n batch_size: integer.\n verbose: verbosity mode, 0 or 1.\n\n Returns:\n A numpy array of class predictions.\n \"\"\"\n warnings.warn('`model.predict_classes()` is deprecated and '\n 'will be removed after 2021-01-01. '\n 'Please use instead:'\n '* `np.argmax(model.predict(x), axis=-1)`, '\n ' if your model does multi-class classification '\n ' (e.g. if it uses a `softmax` last-layer activation).'\n '* `(model.predict(x) > 0.5).astype(\"int32\")`, '\n ' if your model does binary classification '\n ' (e.g. if it uses a `sigmoid` last-layer activation).')\n proba = self.predict(x, batch_size=batch_size, verbose=verbose)\n if proba.shape[-1] > 1:\n return proba.argmax(axis=-1)\n else:\n return (proba > 0.5).astype('int32')\n\n def get_config(self):\n layer_configs = []\n for layer in super(Sequential, self).layers:\n # `super().layers` include the InputLayer if available (it is filtered out\n # of `self.layers`). Note that `self._self_tracked_trackables` is managed\n # by the tracking infrastructure and should not be used.\n layer_configs.append(generic_utils.serialize_keras_object(layer))\n config = {\n 'name': self.name,\n 'layers': copy.deepcopy(layer_configs)\n }\n if not self._is_graph_network and self._build_input_shape is not None:\n config['build_input_shape'] = self._build_input_shape\n return config\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n if 'name' in config:\n name = config['name']\n build_input_shape = config.get('build_input_shape')\n layer_configs = config['layers']\n else:\n name = None\n build_input_shape = None\n layer_configs = config\n model = cls(name=name)\n for layer_config in layer_configs:\n layer = layer_module.deserialize(layer_config,\n custom_objects=custom_objects)\n model.add(layer)\n if (not model.inputs and build_input_shape and\n isinstance(build_input_shape, (tuple, list))):\n model.build(build_input_shape)\n return model\n\n @property\n def input_spec(self):\n if hasattr(self, '_manual_input_spec'):\n return self._manual_input_spec\n if self.layers and hasattr(self.layers[0], 'input_spec'):\n return self.layers[0].input_spec\n return None\n\n @input_spec.setter\n def input_spec(self, value):\n self._manual_input_spec = value\n\n @property\n def _trackable_saved_model_saver(self):\n return model_serialization.SequentialSavedModelSaver(self)\n\n def _is_layer_name_unique(self, layer):\n for ref_layer in self.layers:\n if layer.name == ref_layer.name and ref_layer is not layer:\n return False\n return True\n\n def _assert_weights_created(self):\n if self._graph_initialized:\n return\n # When the graph has not been initialized, use the Model's implementation to\n # to check if the weights has been created.\n super(functional.Functional, self)._assert_weights_created() # pylint: disable=bad-super-call\n\n\ndef _get_shape_tuple(t):\n if hasattr(t, 'shape'):\n shape = t.shape\n if isinstance(shape, tuple):\n return shape\n if shape.rank is not None:\n return tuple(shape.as_list())\n return None\n return None\n\n\ndef relax_input_shape(shape_1, shape_2):\n if shape_1 is None or shape_2 is None:\n return None\n if len(shape_1) != len(shape_2):\n return None\n return tuple(None if d1 != d2 else d1 for d1, d2 in zip(shape_1, shape_2))\n\n\ndef clear_previously_created_nodes(layer, created_nodes):\n \"\"\"Remove nodes from `created_nodes` from the layer's inbound_nodes.\"\"\"\n for node in layer._inbound_nodes:\n prev_layers = node.inbound_layers\n for prev_layer in tf.nest.flatten(prev_layers):\n prev_layer._outbound_nodes = [\n n for n in prev_layer._outbound_nodes\n if n not in created_nodes]\n layer._inbound_nodes = [\n n for n in layer._inbound_nodes if n not in created_nodes]\n\n\ndef track_nodes_created_by_last_call(layer, created_nodes):\n \"\"\"Adds to `created_nodes` the nodes created by the last call to `layer`.\"\"\"\n if not layer._inbound_nodes:\n return\n created_nodes.add(layer._inbound_nodes[-1])\n prev_layers = layer._inbound_nodes[-1].inbound_layers\n for prev_layer in tf.nest.flatten(prev_layers):\n if prev_layer._outbound_nodes:\n created_nodes.add(prev_layer._outbound_nodes[-1])\n"
] | [
[
"tensorflow.compat.v2.is_tensor",
"tensorflow.compat.v2.compat.v1.executing_eagerly_outside_functions",
"tensorflow.compat.v2.__internal__.tf2.enabled",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.init_scope"
]
] |
mehdijj/ppd | [
"9e7626a47886d9d2016873d722f97a930e96540d"
] | [
"utils.py"
] | [
"import numpy as np\r\nimport Augmentor\r\nimport os\r\n\r\n\r\ndef _permute_index(l, seed):\r\n \"\"\"\r\n Creates a permutation of np.array([0, ..., l-1]) and its inverse\r\n :param l: length of the array to permute\r\n :param seed: permutation seed\r\n :return: (s, s_inverse) where s is permutation of np.array([0, ..., l-1]) and s_inverse is its inverse\r\n \"\"\"\r\n st0 = np.random.get_state()\r\n s = np.arange(l)\r\n np.random.seed(seed)\r\n np.random.shuffle(s)\r\n s_inverse = np.argsort(s)\r\n np.random.set_state(st0)\r\n return s, s_inverse\r\n\r\n\r\ndef permute(data, seed):\r\n \"\"\"\r\n Permutes images in the data with given seed for each channel.\r\n :param data: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)\r\n :param seed: permutation seed. If seed=None returns data without permutation\r\n :return: numpy array with shape (nb_images, img_rows, img_cols, nb_channels) of permuted images\r\n \"\"\"\r\n \"\"\"\r\n Permutes images in the data with given seed. If seed=None, returns data without permutation.\r\n Assumes data has shape (nb_images, img_rows, img_cols, nb_channels)\r\n \"\"\"\r\n nb_images, img_rows, img_cols, nb_channels = data.shape\r\n if seed is None:\r\n return data\r\n l = img_rows * img_cols # length of the permutation array\r\n s, _ = _permute_index(l, seed)\r\n output = np.zeros(data.shape)\r\n for ch in range(nb_channels):\r\n output[:, :, :, ch] = data[:, :, :, ch].reshape(-1, l)[:, s].reshape(-1, img_rows, img_cols)\r\n return output\r\n\r\n\r\ndef ipermute(data, seed):\r\n \"\"\"\r\n inverse of permute\r\n :param data: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)\r\n :param seed: permutation seed. If seed=None returns data without permutation\r\n :return: numpy array with shape (nb_images, img_rows, img_cols, nb_channels) of inverse permuted images\r\n \"\"\"\r\n nb_images, img_rows, img_cols, nb_channels = data.shape\r\n if seed is None:\r\n return data\r\n l = img_rows * img_cols # length of the permutation array\r\n _, s_inverse = _permute_index(l, seed)\r\n output = np.zeros(data.shape)\r\n for ch in range(nb_channels):\r\n output[:, :, :, ch] = data[:, :, :, ch].reshape(-1, l)[:, s_inverse].reshape(-1, img_rows, img_cols)\r\n return output\r\n\r\n\r\ndef fourier(data):\r\n \"\"\"\r\n converts each channel of images in the data to its 2-dimensional discrete Fourier transform.\r\n :param data: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)\r\n :return: numpy array with shape (nb_images, img_rows, img_cols, 2*nb_channels)\r\n The first half of output channels are magnitude information, the second half are phase info in range (-pi, pi)\r\n \"\"\"\r\n channels = data.shape[-1]\r\n output_shape = list(data.shape)\r\n output_shape[-1] = channels*2\r\n data_f = np.zeros(output_shape)\r\n for i in range(data.shape[0]):\r\n for ch in range(channels):\r\n f = np.fft.fft2(data[i, :, :, ch])\r\n fshift = np.fft.fftshift(f)\r\n magnitude = np.abs(fshift)\r\n phase = np.angle(fshift)\r\n data_f[i, :, :, ch] = magnitude\r\n data_f[i, :, :, ch + channels] = phase\r\n return data_f\r\n\r\n\r\ndef pol2cart(r, theta):\r\n \"\"\"\r\n Convert polar representation of a complex number to cartesian representation\r\n :param r: scalar or numpy array denoting magnitude component of the complex number\r\n :param theta: scalar or numpy array denoting phase of the complex number in radians.\r\n \"\"\"\r\n return r * np.exp(1j * theta)\r\n\r\n\r\ndef ifourier(data_f):\r\n \"\"\"\r\n inverse of fourier function\r\n :param data_f: numpy array with shape (nb_images, img_rows, img_cols, 2*nb_channels)\r\n The first half of output channels are magnitude information, the second half are phase info in range (-pi, pi)\r\n :return: numpy array with shape (nb_images, img_rows, img_cols, nb_channels) denoting data in pixel domain.\r\n \"\"\"\r\n channels = int(data_f.shape[-1]/2)\r\n output_shape = list(data_f.shape)\r\n output_shape[-1] = channels\r\n data = np.zeros(output_shape, dtype='complex') # The dtype is now changed to 'complex' not to lose any information.\r\n for i in range(data_f.shape[0]):\r\n for ch in range(channels):\r\n fshift = pol2cart(data_f[i, :, :, ch], data_f[i, :, :, ch + channels])\r\n f = np.fft.ifftshift(fshift)\r\n data[i, :, :, ch] = np.fft.ifft2(f)\r\n return data\r\n\r\n\r\ndef phase2pixel(phase):\r\n \"\"\"\r\n reconstruct pixel domain from phase by adding unity magnitude.\r\n :param phase: numpy array with shape (nb_images, img_rows, img_cols, nb_channels) containing phase component\r\n of two dimensional discrete Fourier transform.\r\n :return: numpy array with same shape as phase denoting pixel reconstruction from phase only\r\n while setting magnitude=1\r\n \"\"\"\r\n magnitude = np.ones(phase.shape)\r\n data_f = np.concatenate((magnitude, phase), axis=3)\r\n return ifourier(data_f)\r\n\r\n\r\ndef pixel2phase(data):\r\n \"\"\"\r\n converts each channel of images in the data to phase component of its 2-dimensional discrete Fourier transform.\r\n :param data: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)\r\n :return: numpy array with same shape as data\r\n \"\"\"\r\n channels = data.shape[-1]\r\n return fourier(data)[:, :, :, channels:]\r\n\r\n\r\ndef augment(path_to_training_data, nb_samples):\r\n if os.path.exists(os.path.join(path_to_training_data, 'output')):\r\n print('Augmented data is already saved to {0}'.format(os.path.join(path_to_training_data, 'output')))\r\n return\r\n p = Augmentor.Pipeline(path_to_training_data)\r\n\r\n # augmentation pipeline\r\n p.rotate(probability=0.5, max_left_rotation=10, max_right_rotation=10)\r\n p.zoom(probability=.5, min_factor=0.8, max_factor=1.2)\r\n p.random_distortion(probability=.5, grid_width=6, grid_height=6, magnitude=1)\r\n\r\n print(p.status())\r\n print('{0} samples generated and saved to {1}'.format(nb_samples, os.path.join(path_to_training_data, 'output')))\r\n p.sample(nb_samples)\r\n\r\n\r\ndef load_images_from_folder(folder):\r\n \"\"\"\r\n loads png images and labels from folder. The folder must contain subfolders of images for different labels.\r\n For example, it should contain subfolders 0, 1, 2, ... where each subfolder contains images of the\r\n corresponding label. Note that the first time this function is called, it saves images and labels as npy\r\n files in the path of folder for later reference.\r\n :param folder: string of path to the folder.\r\n :return: a tuple (images, labels) of numpy arrays\r\n \"\"\"\r\n images = []\r\n labels = []\r\n if 'images.npy' in os.listdir(folder) and 'labels.npy' in os.listdir(folder):\r\n images = np.load(os.path.join(folder, 'images.npy'))\r\n labels = np.load(os.path.join(folder, 'labels.npy'))\r\n else:\r\n from PIL import Image\r\n for subfolder in os.listdir(folder):\r\n if subfolder.isdigit():\r\n for filename in os.listdir(os.path.join(folder, subfolder)):\r\n img = Image.open(os.path.join(folder, subfolder, filename))\r\n img_arr = np.array(img, dtype='uint8')\r\n images.append(img_arr)\r\n labels.append(int(subfolder))\r\n perm = np.random.permutation(len(labels))\r\n images = np.array(images)[perm]\r\n labels = np.array(labels)[perm]\r\n np.save(os.path.join(folder, 'images'), images)\r\n np.save(os.path.join(folder, 'labels'), labels)\r\n return images, labels\r\n\r\n\r\ndef log_attack(attack_name, adv_x, perturbation_strength, attack_params):\r\n \"\"\"\r\n saves adv_x with name perturbation_strength in folder with attack_name\r\n :param attack_name: string name of attack\r\n :param adv_x: numpy array of adversarial images\r\n :param perturbation_strength: scalar showing perturbation strength of the adversarial images.\r\n used for filename of adv_x\r\n :param attack_params: dictionary of parameters of the attack\r\n \"\"\"\r\n directory = os.path.join('Attack Logs', attack_name)\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n\r\n import json\r\n with open(os.path.join(directory, 'params' + str(perturbation_strength) + '.txt'), 'w') as file:\r\n file.write(json.dumps(attack_params)) # use `json.loads` to do the reverse\r\n np.save(os.path.join(directory, str(perturbation_strength)), adv_x)\r\n\r\n\r\ndef _read_attack(attack_name, perturbation_strength):\r\n \"\"\"\r\n loads adv_x with perturbation_strength from folder with attack_name\r\n :param attack_name: string of attack name used for folder to save\r\n :param perturbation_strength: a float or string of attack file\r\n \"\"\"\r\n filename = os.path.join('Attack Logs', attack_name, str(perturbation_strength) + '.npy')\r\n return np.load(filename)\r\n\r\n\r\ndef measure_perturbation(x, adv_x, order):\r\n \"\"\"\r\n average perturbation between x and adv_x. Note that each image is converted to\r\n a vector of size (img_rows*img_cols*nb_channels) and then norm is calculated.\r\n :param x: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)\r\n :param adv_x: numpy array with same shape as x\r\n :param order: order of the norm (mimics numpy) possible values are np.inf, 1 or 2\r\n :return: a scalar denoting perturbation between x and adv_x averaged over images.\r\n \"\"\"\r\n nb_images, _, _, _ = x.shape\r\n dev = (x-adv_x).reshape(nb_images, -1)\r\n dev_norms = np.linalg.norm(dev, order, axis=1)\r\n return np.mean(dev_norms)\r\n\r\n\r\ndef random_perturb(x, perturbation_strength, order):\r\n \"\"\"\r\n randomly perturbes pixels of x with perturbation_strength such that\r\n measure_perturbation(x, random_perturb(x, perturbation_strength, order), order) = perturbation_strength.\r\n For order=np.inf each pixel is perturbed with either -perturbation_strenth or perturbation_strength.\r\n For order = 1 and order = 2, images of the pixel are perturbed with a uniform random noise with mean zero.\r\n :param x: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)\r\n :param perturbation_strength: a scalar that is strength of noise.\r\n :param order: order of the norm (mimics numpy) possible values are np.inf, 1 or 2\r\n :return: numpy array with same shape as x denoting random perturbation of pixels of x with perturbation_strength\r\n \"\"\"\r\n nb_images, img_rows, img_cols, nb_channels = x.shape\r\n if order == np.inf:\r\n dev = (np.random.randint(0, 2, size=nb_images*img_rows*img_cols*nb_channels) * 2 * perturbation_strength - perturbation_strength)\r\n elif order == 1:\r\n tmp = np.random.rand(nb_images, img_rows*img_cols*nb_channels) - 0.5\r\n coef = perturbation_strength / np.sum(np.abs(tmp), axis=1)\r\n dev = tmp * np.expand_dims(coef, axis=1)\r\n elif order == 2:\r\n tmp = np.random.rand(nb_images, img_rows*img_cols*nb_channels) - 0.5\r\n coef = perturbation_strength / np.linalg.norm(tmp, 2, axis=1)\r\n dev = tmp * np.expand_dims(coef, axis=1)\r\n else:\r\n raise(ValueError('order should be np.inf, 1 or 2'))\r\n return x + dev.reshape(x.shape)\r\n\r\n\r\ndef read_attack(attack_name):\r\n \"\"\"\r\n reads a dictionary whose keys are perturbation strength and values are numpy array of adversarial test images\r\n :param attack_name: string of attack name (the folder containing adversarial images)\r\n :return: a dictionary with (key, value) as (scalar of perturbation strength, numpy array of adversarial images)\r\n \"\"\"\r\n directory = os.path.join('Attack Logs', attack_name)\r\n out = dict()\r\n for filename in os.listdir(directory):\r\n if filename.endswith('.npy'):\r\n path_to_file = os.path.join(directory, filename)\r\n out[np.float(os.path.splitext(filename)[0])] = np.load(path_to_file)\r\n return out\r\n\r\n\r\ndef log_plot_data(attack_name, header, arr):\r\n \"\"\"\r\n concatenates numpy arrays in arr and saves them as 'plot_data.csv'.\r\n :param attack_name: string of attack name (the folder in which data is to be logged)\r\n :param header: list of strings denoting header name for element of arr\r\n :param arr: list of numpy arrays to be logged. For example: [strength, adv_acc, ...]\r\n \"\"\"\r\n import pandas as pd\r\n directory = os.path.join('Attack Logs', attack_name)\r\n tmp = np.concatenate(tuple([np.array(a).reshape(-1, 1) for a in arr]), axis=1)\r\n df = pd.DataFrame(tmp, columns=header)\r\n df.to_csv(os.path.join(directory, 'plot_data'), index=False)\r\n\r\n\r\ndef load_plot_data(attack_name):\r\n \"\"\"\r\n reads data saved with log_plot_data\r\n :param attack_name: string of attack name (the folder to read from)\r\n :return: a pandas dataFrame containing plot data.\r\n \"\"\"\r\n import pandas as pd\r\n path = os.path.join('Attack Logs', attack_name, 'plot_data')\r\n df = pd.read_csv(path)\r\n return df\r\n\r\n\r\ndef mnist_denoise(data):\r\n \"\"\"\r\n denoise MNIST data by making background black.\r\n :param data: numpy array of shape (nb_images, img_rows, img_cols, nb_channels)\r\n :return: numpy array of denoised data with the same shape as input\r\n \"\"\"\r\n threshold = .45\r\n data[data < threshold] = 0\r\n return data\r\n"
] | [
[
"numpy.ones",
"numpy.fft.fftshift",
"numpy.random.seed",
"numpy.argsort",
"numpy.abs",
"numpy.expand_dims",
"numpy.random.rand",
"numpy.mean",
"numpy.random.set_state",
"numpy.load",
"numpy.zeros",
"numpy.random.get_state",
"pandas.read_csv",
"numpy.fft.fft2",
"numpy.arange",
"numpy.linalg.norm",
"numpy.random.shuffle",
"numpy.fft.ifft2",
"pandas.DataFrame",
"numpy.exp",
"numpy.fft.ifftshift",
"numpy.angle",
"numpy.array",
"numpy.concatenate",
"numpy.random.randint"
]
] |
HERA-Team/vis_cpu | [
"32de2318efd6690e4a8c6c64aad5eae6f429b10a"
] | [
"tests/test_plot.py"
] | [
"\"\"\"Compare vis_cpu with pyuvsim visibilities.\"\"\"\nimport numpy as np\nfrom pyuvsim.analyticbeam import AnalyticBeam\n\nfrom vis_cpu import conversions, plot\n\nnsource = 10\n\n\ndef test_source_az_za_beam():\n \"\"\"Test function that calculates the Az and ZA positions of sources.\"\"\"\n # Observation latitude and LST\n hera_lat = -30.7215\n lst = 0.78\n\n # Add random sources\n ra = np.random.uniform(low=0.0, high=360.0, size=nsource - 1)\n dec = -30.72 + np.random.random(nsource - 1) * 10.0\n ra = np.deg2rad(ra)\n dec = np.deg2rad(dec)\n\n # Point source coordinate transform, from equatorial to Cartesian\n crd_eq = conversions.point_source_crd_eq(ra, dec)\n\n # Beam model\n beam = AnalyticBeam(type=\"gaussian\", diameter=14.0)\n\n # Calculate source locations and positions\n az, za, beamval = plot._source_az_za_beam(\n lst, crd_eq, beam, ref_freq=100.0e6, latitude=np.deg2rad(hera_lat)\n )\n assert np.all(np.isfinite(az))\n assert np.all(np.isfinite(za))\n # (Values of beamval should be NaN below the horizon)\n\n\ndef test_animate_source_map():\n \"\"\"Test function that animates source positions vs LST.\"\"\"\n # Observation latitude and LSTs\n hera_lat = -30.7215\n lsts = np.linspace(0.0, 2.0 * np.pi, 5)\n\n # Add random sources\n ra = np.random.uniform(low=0.0, high=360.0, size=nsource - 1)\n dec = -30.72 + np.random.random(nsource - 1) * 10.0\n ra = np.deg2rad(ra)\n dec = np.deg2rad(dec)\n\n # Beam model\n beam = AnalyticBeam(type=\"gaussian\", diameter=14.0)\n\n # Generate animation\n anim = plot.animate_source_map(\n ra,\n dec,\n lsts,\n beam,\n interval=200,\n ref_freq=100.0e6,\n latitude=np.deg2rad(hera_lat),\n )\n assert anim is not None\n"
] | [
[
"numpy.random.uniform",
"numpy.random.random",
"numpy.linspace",
"numpy.isfinite",
"numpy.deg2rad"
]
] |
LBJ-Wade/bilby | [
"b1e02f1dfae03d4939cae9c95eff300c22919689",
"b1e02f1dfae03d4939cae9c95eff300c22919689"
] | [
"bilby/gw/source.py",
"examples/gw_examples/injection_examples/create_your_own_source_model.py"
] | [
"import numpy as np\n\nfrom ..core import utils\nfrom ..core.utils import logger\nfrom .conversion import bilby_to_lalsimulation_spins\nfrom .utils import (lalsim_GetApproximantFromString,\n lalsim_SimInspiralFD,\n lalsim_SimInspiralChooseFDWaveform,\n lalsim_SimInspiralWaveformParamsInsertTidalLambda1,\n lalsim_SimInspiralWaveformParamsInsertTidalLambda2,\n lalsim_SimInspiralChooseFDWaveformSequence)\n\n\ndef lal_binary_black_hole(\n frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1,\n phi_12, a_2, tilt_2, phi_jl, theta_jn, phase, **kwargs):\n \"\"\" A Binary Black Hole waveform model using lalsimulation\n\n Parameters\n ==========\n frequency_array: array_like\n The frequencies at which we want to calculate the strain\n mass_1: float\n The mass of the heavier object in solar masses\n mass_2: float\n The mass of the lighter object in solar masses\n luminosity_distance: float\n The luminosity distance in megaparsec\n a_1: float\n Dimensionless primary spin magnitude\n tilt_1: float\n Primary tilt angle\n phi_12: float\n Azimuthal angle between the two component spins\n a_2: float\n Dimensionless secondary spin magnitude\n tilt_2: float\n Secondary tilt angle\n phi_jl: float\n Azimuthal angle between the total binary angular momentum and the\n orbital angular momentum\n theta_jn: float\n Angle between the total binary angular momentum and the line of sight\n phase: float\n The phase at coalescence\n kwargs: dict\n Optional keyword arguments\n Supported arguments:\n\n - waveform_approximant\n - reference_frequency\n - minimum_frequency\n - maximum_frequency\n - catch_waveform_errors\n - pn_spin_order\n - pn_tidal_order\n - pn_phase_order\n - pn_amplitude_order\n - mode_array:\n Activate a specific mode array and evaluate the model using those\n modes only. e.g. waveform_arguments =\n dict(waveform_approximant='IMRPhenomHM', mode_array=[[2,2],[2,-2])\n returns the 22 and 2-2 modes only of IMRPhenomHM. You can only\n specify modes that are included in that particular model. e.g.\n waveform_arguments = dict(waveform_approximant='IMRPhenomHM',\n mode_array=[[2,2],[2,-2],[5,5],[5,-5]]) is not allowed because the\n 55 modes are not included in this model. Be aware that some models\n only take positive modes and return the positive and the negative\n mode together, while others need to call both. e.g.\n waveform_arguments = dict(waveform_approximant='IMRPhenomHM',\n mode_array=[[2,2],[4,-4]]) returns the 22 and 2-2 of IMRPhenomHM.\n However, waveform_arguments =\n dict(waveform_approximant='IMRPhenomXHM', mode_array=[[2,2],[4,-4]])\n returns the 22 and 4-4 of IMRPhenomXHM.\n\n Returns\n =======\n dict: A dictionary with the plus and cross polarisation strain modes\n \"\"\"\n waveform_kwargs = dict(\n waveform_approximant='IMRPhenomPv2', reference_frequency=50.0,\n minimum_frequency=20.0, maximum_frequency=frequency_array[-1],\n catch_waveform_errors=False, pn_spin_order=-1, pn_tidal_order=-1,\n pn_phase_order=-1, pn_amplitude_order=0)\n waveform_kwargs.update(kwargs)\n return _base_lal_cbc_fd_waveform(\n frequency_array=frequency_array, mass_1=mass_1, mass_2=mass_2,\n luminosity_distance=luminosity_distance, theta_jn=theta_jn, phase=phase,\n a_1=a_1, a_2=a_2, tilt_1=tilt_1, tilt_2=tilt_2, phi_12=phi_12,\n phi_jl=phi_jl, **waveform_kwargs)\n\n\ndef lal_binary_neutron_star(\n frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1,\n phi_12, a_2, tilt_2, phi_jl, theta_jn, phase, lambda_1, lambda_2,\n **kwargs):\n \"\"\" A Binary Neutron Star waveform model using lalsimulation\n\n Parameters\n ==========\n frequency_array: array_like\n The frequencies at which we want to calculate the strain\n mass_1: float\n The mass of the heavier object in solar masses\n mass_2: float\n The mass of the lighter object in solar masses\n luminosity_distance: float\n The luminosity distance in megaparsec\n a_1: float\n Dimensionless primary spin magnitude\n tilt_1: float\n Primary tilt angle\n phi_12: float\n Azimuthal angle between the two component spins\n a_2: float\n Dimensionless secondary spin magnitude\n tilt_2: float\n Secondary tilt angle\n phi_jl: float\n Azimuthal angle between the total binary angular momentum and the\n orbital angular momentum\n theta_jn: float\n Orbital inclination\n phase: float\n The phase at coalescence\n lambda_1: float\n Dimensionless tidal deformability of mass_1\n lambda_2: float\n Dimensionless tidal deformability of mass_2\n kwargs: dict\n Optional keyword arguments\n Supported arguments:\n\n - waveform_approximant\n - reference_frequency\n - minimum_frequency\n - maximum_frequency\n - catch_waveform_errors\n - pn_spin_order\n - pn_tidal_order\n - pn_phase_order\n - pn_amplitude_order\n - mode_array:\n Activate a specific mode array and evaluate the model using those\n modes only. e.g. waveform_arguments =\n dict(waveform_approximant='IMRPhenomHM', mode_array=[[2,2],[2,-2])\n returns the 22 and 2-2 modes only of IMRPhenomHM. You can only\n specify modes that are included in that particular model. e.g.\n waveform_arguments = dict(waveform_approximant='IMRPhenomHM',\n mode_array=[[2,2],[2,-2],[5,5],[5,-5]]) is not allowed because the\n 55 modes are not included in this model. Be aware that some models\n only take positive modes and return the positive and the negative\n mode together, while others need to call both. e.g.\n waveform_arguments = dict(waveform_approximant='IMRPhenomHM',\n mode_array=[[2,2],[4,-4]]) returns the 22 and 2-2 of IMRPhenomHM.\n However, waveform_arguments =\n dict(waveform_approximant='IMRPhenomXHM', mode_array=[[2,2],[4,-4]])\n returns the 22 and 4-4 of IMRPhenomXHM.\n\n Returns\n =======\n dict: A dictionary with the plus and cross polarisation strain modes\n \"\"\"\n waveform_kwargs = dict(\n waveform_approximant='IMRPhenomPv2_NRTidal', reference_frequency=50.0,\n minimum_frequency=20.0, maximum_frequency=frequency_array[-1],\n catch_waveform_errors=False, pn_spin_order=-1, pn_tidal_order=-1,\n pn_phase_order=-1, pn_amplitude_order=0)\n waveform_kwargs.update(kwargs)\n return _base_lal_cbc_fd_waveform(\n frequency_array=frequency_array, mass_1=mass_1, mass_2=mass_2,\n luminosity_distance=luminosity_distance, theta_jn=theta_jn, phase=phase,\n a_1=a_1, a_2=a_2, tilt_1=tilt_1, tilt_2=tilt_2, phi_12=phi_12,\n phi_jl=phi_jl, lambda_1=lambda_1, lambda_2=lambda_2, **waveform_kwargs)\n\n\ndef lal_eccentric_binary_black_hole_no_spins(\n frequency_array, mass_1, mass_2, eccentricity, luminosity_distance,\n theta_jn, phase, **kwargs):\n \"\"\" Eccentric binary black hole waveform model using lalsimulation (EccentricFD)\n\n Parameters\n ==========\n frequency_array: array_like\n The frequencies at which we want to calculate the strain\n mass_1: float\n The mass of the heavier object in solar masses\n mass_2: float\n The mass of the lighter object in solar masses\n eccentricity: float\n The orbital eccentricity of the system\n luminosity_distance: float\n The luminosity distance in megaparsec\n theta_jn: float\n Orbital inclination\n phase: float\n The phase at coalescence\n kwargs: dict\n Optional keyword arguments\n Supported arguments:\n\n - waveform_approximant\n - reference_frequency\n - minimum_frequency\n - maximum_frequency\n - catch_waveform_errors\n - pn_spin_order\n - pn_tidal_order\n - pn_phase_order\n - pn_amplitude_order\n - mode_array:\n Activate a specific mode array and evaluate the model using those\n modes only. e.g. waveform_arguments =\n dict(waveform_approximant='IMRPhenomHM', mode_array=[[2,2],[2,-2])\n returns the 22 and 2-2 modes only of IMRPhenomHM. You can only\n specify modes that are included in that particular model. e.g.\n waveform_arguments = dict(waveform_approximant='IMRPhenomHM',\n mode_array=[[2,2],[2,-2],[5,5],[5,-5]]) is not allowed because the\n 55 modes are not included in this model. Be aware that some models\n only take positive modes and return the positive and the negative\n mode together, while others need to call both. e.g.\n waveform_arguments = dict(waveform_approximant='IMRPhenomHM',\n mode_array=[[2,2],[4,-4]]) returns the 22 and 2-2 of IMRPhenomHM.\n However, waveform_arguments =\n dict(waveform_approximant='IMRPhenomXHM', mode_array=[[2,2],[4,-4]])\n returns the 22 and 4-4 of IMRPhenomXHM.\n\n Returns\n =======\n dict: A dictionary with the plus and cross polarisation strain modes\n \"\"\"\n waveform_kwargs = dict(\n waveform_approximant='EccentricFD', reference_frequency=10.0,\n minimum_frequency=10.0, maximum_frequency=frequency_array[-1],\n catch_waveform_errors=False, pn_spin_order=-1, pn_tidal_order=-1,\n pn_phase_order=-1, pn_amplitude_order=0)\n waveform_kwargs.update(kwargs)\n return _base_lal_cbc_fd_waveform(\n frequency_array=frequency_array, mass_1=mass_1, mass_2=mass_2,\n luminosity_distance=luminosity_distance, theta_jn=theta_jn, phase=phase,\n eccentricity=eccentricity, **waveform_kwargs)\n\n\ndef _base_lal_cbc_fd_waveform(\n frequency_array, mass_1, mass_2, luminosity_distance, theta_jn, phase,\n a_1=0.0, a_2=0.0, tilt_1=0.0, tilt_2=0.0, phi_12=0.0, phi_jl=0.0,\n lambda_1=0.0, lambda_2=0.0, eccentricity=0.0, **waveform_kwargs):\n \"\"\" Generate a cbc waveform model using lalsimulation\n\n Parameters\n ==========\n frequency_array: array_like\n The frequencies at which we want to calculate the strain\n mass_1: float\n The mass of the heavier object in solar masses\n mass_2: float\n The mass of the lighter object in solar masses\n luminosity_distance: float\n The luminosity distance in megaparsec\n a_1: float\n Dimensionless primary spin magnitude\n tilt_1: float\n Primary tilt angle\n phi_12: float\n Azimuthal angle between the component spins\n a_2: float\n Dimensionless secondary spin magnitude\n tilt_2: float\n Secondary tilt angle\n phi_jl: float\n Azimuthal angle between the total and orbital angular momenta\n theta_jn: float\n Orbital inclination\n phase: float\n The phase at coalescence\n eccentricity: float\n Binary eccentricity\n lambda_1: float\n Tidal deformability of the more massive object\n lambda_2: float\n Tidal deformability of the less massive object\n kwargs: dict\n Optional keyword arguments\n\n Returns\n =======\n dict: A dictionary with the plus and cross polarisation strain modes\n \"\"\"\n import lal\n import lalsimulation as lalsim\n\n waveform_approximant = waveform_kwargs['waveform_approximant']\n reference_frequency = waveform_kwargs['reference_frequency']\n minimum_frequency = waveform_kwargs['minimum_frequency']\n maximum_frequency = waveform_kwargs['maximum_frequency']\n catch_waveform_errors = waveform_kwargs['catch_waveform_errors']\n pn_spin_order = waveform_kwargs['pn_spin_order']\n pn_tidal_order = waveform_kwargs['pn_tidal_order']\n pn_phase_order = waveform_kwargs['pn_phase_order']\n pn_amplitude_order = waveform_kwargs['pn_amplitude_order']\n waveform_dictionary = waveform_kwargs.get(\n 'lal_waveform_dictionary', lal.CreateDict()\n )\n\n approximant = lalsim_GetApproximantFromString(waveform_approximant)\n\n if pn_amplitude_order != 0:\n start_frequency = lalsim.SimInspiralfLow2fStart(\n minimum_frequency, int(pn_amplitude_order), approximant)\n else:\n start_frequency = minimum_frequency\n\n delta_frequency = frequency_array[1] - frequency_array[0]\n\n frequency_bounds = ((frequency_array >= minimum_frequency) *\n (frequency_array <= maximum_frequency))\n\n luminosity_distance = luminosity_distance * 1e6 * utils.parsec\n mass_1 = mass_1 * utils.solar_mass\n mass_2 = mass_2 * utils.solar_mass\n\n iota, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z = bilby_to_lalsimulation_spins(\n theta_jn=theta_jn, phi_jl=phi_jl, tilt_1=tilt_1, tilt_2=tilt_2,\n phi_12=phi_12, a_1=a_1, a_2=a_2, mass_1=mass_1, mass_2=mass_2,\n reference_frequency=reference_frequency, phase=phase)\n\n longitude_ascending_nodes = 0.0\n mean_per_ano = 0.0\n\n lalsim.SimInspiralWaveformParamsInsertPNSpinOrder(\n waveform_dictionary, int(pn_spin_order))\n lalsim.SimInspiralWaveformParamsInsertPNTidalOrder(\n waveform_dictionary, int(pn_tidal_order))\n lalsim.SimInspiralWaveformParamsInsertPNPhaseOrder(\n waveform_dictionary, int(pn_phase_order))\n lalsim.SimInspiralWaveformParamsInsertPNAmplitudeOrder(\n waveform_dictionary, int(pn_amplitude_order))\n lalsim_SimInspiralWaveformParamsInsertTidalLambda1(\n waveform_dictionary, lambda_1)\n lalsim_SimInspiralWaveformParamsInsertTidalLambda2(\n waveform_dictionary, lambda_2)\n\n for key, value in waveform_kwargs.items():\n func = getattr(lalsim, \"SimInspiralWaveformParamsInsert\" + key, None)\n if func is not None:\n func(waveform_dictionary, value)\n\n if waveform_kwargs.get('numerical_relativity_file', None) is not None:\n lalsim.SimInspiralWaveformParamsInsertNumRelData(\n waveform_dictionary, waveform_kwargs['numerical_relativity_file'])\n\n if ('mode_array' in waveform_kwargs) and waveform_kwargs['mode_array'] is not None:\n mode_array = waveform_kwargs['mode_array']\n mode_array_lal = lalsim.SimInspiralCreateModeArray()\n for mode in mode_array:\n lalsim.SimInspiralModeArrayActivateMode(mode_array_lal, mode[0], mode[1])\n lalsim.SimInspiralWaveformParamsInsertModeArray(waveform_dictionary, mode_array_lal)\n\n if lalsim.SimInspiralImplementedFDApproximants(approximant):\n wf_func = lalsim_SimInspiralChooseFDWaveform\n else:\n wf_func = lalsim_SimInspiralFD\n try:\n hplus, hcross = wf_func(\n mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y,\n spin_2z, luminosity_distance, iota, phase,\n longitude_ascending_nodes, eccentricity, mean_per_ano, delta_frequency,\n start_frequency, maximum_frequency, reference_frequency,\n waveform_dictionary, approximant)\n except Exception as e:\n if not catch_waveform_errors:\n raise\n else:\n EDOM = (e.args[0] == 'Internal function call failed: Input domain error')\n if EDOM:\n failed_parameters = dict(mass_1=mass_1, mass_2=mass_2,\n spin_1=(spin_1x, spin_2y, spin_1z),\n spin_2=(spin_2x, spin_2y, spin_2z),\n luminosity_distance=luminosity_distance,\n iota=iota, phase=phase,\n eccentricity=eccentricity,\n start_frequency=start_frequency)\n logger.warning(\"Evaluating the waveform failed with error: {}\\n\".format(e) +\n \"The parameters were {}\\n\".format(failed_parameters) +\n \"Likelihood will be set to -inf.\")\n return None\n else:\n raise\n\n h_plus = np.zeros_like(frequency_array, dtype=complex)\n h_cross = np.zeros_like(frequency_array, dtype=complex)\n\n if len(hplus.data.data) > len(frequency_array):\n logger.debug(\"LALsim waveform longer than bilby's `frequency_array`\" +\n \"({} vs {}), \".format(len(hplus.data.data), len(frequency_array)) +\n \"probably because padded with zeros up to the next power of two length.\" +\n \" Truncating lalsim array.\")\n h_plus = hplus.data.data[:len(h_plus)]\n h_cross = hcross.data.data[:len(h_cross)]\n else:\n h_plus[:len(hplus.data.data)] = hplus.data.data\n h_cross[:len(hcross.data.data)] = hcross.data.data\n\n h_plus *= frequency_bounds\n h_cross *= frequency_bounds\n\n if wf_func == lalsim_SimInspiralFD:\n dt = 1 / hplus.deltaF + (hplus.epoch.gpsSeconds + hplus.epoch.gpsNanoSeconds * 1e-9)\n time_shift = np.exp(-1j * 2 * np.pi * dt * frequency_array[frequency_bounds])\n h_plus[frequency_bounds] *= time_shift\n h_cross[frequency_bounds] *= time_shift\n\n return dict(plus=h_plus, cross=h_cross)\n\n\ndef binary_black_hole_roq(\n frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1,\n phi_12, a_2, tilt_2, phi_jl, theta_jn, phase, **waveform_arguments):\n waveform_kwargs = dict(\n waveform_approximant='IMRPhenomPv2', reference_frequency=20.0)\n waveform_kwargs.update(waveform_arguments)\n return _base_roq_waveform(\n frequency_array=frequency_array, mass_1=mass_1, mass_2=mass_2,\n luminosity_distance=luminosity_distance, theta_jn=theta_jn, phase=phase,\n a_1=a_1, a_2=a_2, tilt_1=tilt_1, tilt_2=tilt_2, phi_jl=phi_jl,\n phi_12=phi_12, lambda_1=0.0, lambda_2=0.0, **waveform_kwargs)\n\n\ndef binary_neutron_star_roq(\n frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1,\n phi_12, a_2, tilt_2, phi_jl, lambda_1, lambda_2, theta_jn, phase,\n **waveform_arguments):\n waveform_kwargs = dict(\n waveform_approximant='IMRPhenomD_NRTidal', reference_frequency=20.0)\n waveform_kwargs.update(waveform_arguments)\n return _base_roq_waveform(\n frequency_array=frequency_array, mass_1=mass_1, mass_2=mass_2,\n luminosity_distance=luminosity_distance, theta_jn=theta_jn, phase=phase,\n a_1=a_1, a_2=a_2, tilt_1=tilt_1, tilt_2=tilt_2, phi_jl=phi_jl,\n phi_12=phi_12, lambda_1=lambda_1, lambda_2=lambda_2, **waveform_kwargs)\n\n\ndef _base_roq_waveform(\n frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1,\n phi_12, a_2, tilt_2, lambda_1, lambda_2, phi_jl, theta_jn, phase,\n **waveform_arguments):\n \"\"\"\n See https://git.ligo.org/lscsoft/lalsuite/blob/master/lalsimulation/src/LALSimInspiral.c#L1460\n\n Parameters\n ==========\n frequency_array: np.array\n This input is ignored for the roq source model\n mass_1: float\n The mass of the heavier object in solar masses\n mass_2: float\n The mass of the lighter object in solar masses\n luminosity_distance: float\n The luminosity distance in megaparsec\n a_1: float\n Dimensionless primary spin magnitude\n tilt_1: float\n Primary tilt angle\n phi_12: float\n\n a_2: float\n Dimensionless secondary spin magnitude\n tilt_2: float\n Secondary tilt angle\n phi_jl: float\n\n theta_jn: float\n Orbital inclination\n phase: float\n The phase at coalescence\n\n Waveform arguments\n ===================\n Non-sampled extra data used in the source model calculation\n frequency_nodes_linear: np.array\n frequency_nodes_quadratic: np.array\n reference_frequency: float\n approximant: str\n\n Note: for the frequency_nodes_linear and frequency_nodes_quadratic arguments,\n if using data from https://git.ligo.org/lscsoft/ROQ_data, this should be\n loaded as `np.load(filename).T`.\n\n Returns\n =======\n waveform_polarizations: dict\n Dict containing plus and cross modes evaluated at the linear and\n quadratic frequency nodes.\n \"\"\"\n from lal import CreateDict\n frequency_nodes_linear = waveform_arguments['frequency_nodes_linear']\n frequency_nodes_quadratic = waveform_arguments['frequency_nodes_quadratic']\n reference_frequency = waveform_arguments['reference_frequency']\n approximant = lalsim_GetApproximantFromString(\n waveform_arguments['waveform_approximant'])\n\n luminosity_distance = luminosity_distance * 1e6 * utils.parsec\n mass_1 = mass_1 * utils.solar_mass\n mass_2 = mass_2 * utils.solar_mass\n\n waveform_dictionary = CreateDict()\n lalsim_SimInspiralWaveformParamsInsertTidalLambda1(\n waveform_dictionary, lambda_1)\n lalsim_SimInspiralWaveformParamsInsertTidalLambda2(\n waveform_dictionary, lambda_2)\n\n iota, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z = bilby_to_lalsimulation_spins(\n theta_jn=theta_jn, phi_jl=phi_jl, tilt_1=tilt_1, tilt_2=tilt_2,\n phi_12=phi_12, a_1=a_1, a_2=a_2, mass_1=mass_1, mass_2=mass_2,\n reference_frequency=reference_frequency, phase=phase)\n\n h_linear_plus, h_linear_cross = lalsim_SimInspiralChooseFDWaveformSequence(\n phase, mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y,\n spin_2z, reference_frequency, luminosity_distance, iota,\n waveform_dictionary, approximant, frequency_nodes_linear)\n\n waveform_dictionary = CreateDict()\n lalsim_SimInspiralWaveformParamsInsertTidalLambda1(\n waveform_dictionary, lambda_1)\n lalsim_SimInspiralWaveformParamsInsertTidalLambda2(\n waveform_dictionary, lambda_2)\n\n h_quadratic_plus, h_quadratic_cross = lalsim_SimInspiralChooseFDWaveformSequence(\n phase, mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y,\n spin_2z, reference_frequency, luminosity_distance, iota,\n waveform_dictionary, approximant, frequency_nodes_quadratic)\n\n waveform_polarizations = dict()\n waveform_polarizations['linear'] = dict(\n plus=h_linear_plus.data.data, cross=h_linear_cross.data.data)\n waveform_polarizations['quadratic'] = dict(\n plus=h_quadratic_plus.data.data, cross=h_quadratic_cross.data.data)\n\n return waveform_polarizations\n\n\ndef sinegaussian(frequency_array, hrss, Q, frequency, **kwargs):\n tau = Q / (np.sqrt(2.0) * np.pi * frequency)\n temp = Q / (4.0 * np.sqrt(np.pi) * frequency)\n fm = frequency_array - frequency\n fp = frequency_array + frequency\n\n h_plus = ((hrss / np.sqrt(temp * (1 + np.exp(-Q**2)))) *\n ((np.sqrt(np.pi) * tau) / 2.0) *\n (np.exp(-fm**2 * np.pi**2 * tau**2) +\n np.exp(-fp**2 * np.pi**2 * tau**2)))\n\n h_cross = (-1j * (hrss / np.sqrt(temp * (1 - np.exp(-Q**2)))) *\n ((np.sqrt(np.pi) * tau) / 2.0) *\n (np.exp(-fm**2 * np.pi**2 * tau**2) -\n np.exp(-fp**2 * np.pi**2 * tau**2)))\n\n return{'plus': h_plus, 'cross': h_cross}\n\n\ndef supernova(\n frequency_array, realPCs, imagPCs, file_path, luminosity_distance, **kwargs):\n \"\"\" A supernova NR simulation for injections \"\"\"\n\n realhplus, imaghplus, realhcross, imaghcross = np.loadtxt(\n file_path, usecols=(0, 1, 2, 3), unpack=True)\n\n # waveform in file at 10kpc\n scaling = 1e-3 * (10.0 / luminosity_distance)\n\n h_plus = scaling * (realhplus + 1.0j * imaghplus)\n h_cross = scaling * (realhcross + 1.0j * imaghcross)\n return {'plus': h_plus, 'cross': h_cross}\n\n\ndef supernova_pca_model(\n frequency_array, pc_coeff1, pc_coeff2, pc_coeff3, pc_coeff4, pc_coeff5,\n luminosity_distance, **kwargs):\n \"\"\" Supernova signal model \"\"\"\n\n realPCs = kwargs['realPCs']\n imagPCs = kwargs['imagPCs']\n\n pc1 = realPCs[:, 0] + 1.0j * imagPCs[:, 0]\n pc2 = realPCs[:, 1] + 1.0j * imagPCs[:, 1]\n pc3 = realPCs[:, 2] + 1.0j * imagPCs[:, 2]\n pc4 = realPCs[:, 3] + 1.0j * imagPCs[:, 3]\n pc5 = realPCs[:, 4] + 1.0j * imagPCs[:, 5]\n\n # file at 10kpc\n scaling = 1e-23 * (10.0 / luminosity_distance)\n\n h_plus = scaling * (pc_coeff1 * pc1 + pc_coeff2 * pc2 + pc_coeff3 * pc3 +\n pc_coeff4 * pc4 + pc_coeff5 * pc5)\n h_cross = scaling * (pc_coeff1 * pc1 + pc_coeff2 * pc2 + pc_coeff3 * pc3 +\n pc_coeff4 * pc4 + pc_coeff5 * pc5)\n\n return {'plus': h_plus, 'cross': h_cross}\n\n\nprecession_only = {\n \"tilt_1\", \"tilt_2\", \"phi_12\", \"phi_jl\", \"chi_1_in_plane\", \"chi_2_in_plane\",\n}\n\nspin = {\n \"a_1\", \"a_2\", \"tilt_1\", \"tilt_2\", \"phi_12\", \"phi_jl\", \"chi_1\", \"chi_2\",\n \"chi_1_in_plane\", \"chi_2_in_plane\",\n}\nmass = {\n \"chirp_mass\", \"mass_ratio\", \"total_mass\", \"mass_1\", \"mass_2\",\n \"symmetric_mass_ratio\",\n}\nprimary_spin_and_q = {\n \"a_1\", \"chi_1\", \"mass_ratio\"\n}\ntidal = {\n \"lambda_1\", \"lambda_2\", \"lambda_tilde\", \"delta_lambda_tilde\"\n}\nphase = {\n \"phase\", \"delta_phase\",\n}\nextrinsic = {\n \"azimuth\", \"zenith\", \"luminosity_distance\", \"psi\", \"theta_jn\",\n \"cos_theta_jn\", \"geocent_time\", \"time_jitter\", \"ra\", \"dec\",\n \"H1_time\", \"L1_time\", \"V1_time\",\n}\n\nPARAMETER_SETS = dict(\n spin=spin, mass=mass, phase=phase, extrinsic=extrinsic,\n tidal=tidal, primary_spin_and_q=primary_spin_and_q,\n intrinsic=spin.union(mass).union(phase).union(tidal),\n precession_only=precession_only,\n)\n",
"#!/usr/bin/env python\n\"\"\"\nA script to demonstrate how to use your own source model\n\"\"\"\nimport bilby\nimport numpy as np\n\n# First set up logging and some output directories and labels\noutdir = 'outdir'\nlabel = 'create_your_own_source_model'\nsampling_frequency = 4096\nduration = 1\n\n\n# Here we define out source model - this is the sine-Gaussian model in the\n# frequency domain.\ndef sine_gaussian(f, A, f0, tau, phi0, geocent_time, ra, dec, psi):\n arg = -(np.pi * tau * (f - f0))**2 + 1j * phi0\n plus = np.sqrt(np.pi) * A * tau * np.exp(arg) / 2.\n cross = plus * np.exp(1j * np.pi / 2)\n return {'plus': plus, 'cross': cross}\n\n\n# We now define some parameters that we will inject\ninjection_parameters = dict(A=1e-23, f0=100, tau=1, phi0=0, geocent_time=0,\n ra=0, dec=0, psi=0)\n\n# Now we pass our source function to the WaveformGenerator\nwaveform_generator = bilby.gw.waveform_generator.WaveformGenerator(\n duration=duration, sampling_frequency=sampling_frequency,\n frequency_domain_source_model=sine_gaussian)\n\n# Set up interferometers.\nifos = bilby.gw.detector.InterferometerList(['H1', 'L1'])\nifos.set_strain_data_from_power_spectral_densities(\n sampling_frequency=sampling_frequency, duration=duration,\n start_time=injection_parameters['geocent_time'] - 3)\nifos.inject_signal(waveform_generator=waveform_generator,\n parameters=injection_parameters)\n\n# Here we define the priors for the search. We use the injection parameters\n# except for the amplitude, f0, and geocent_time\nprior = injection_parameters.copy()\nprior['A'] = bilby.core.prior.LogUniform(minimum=1e-25, maximum=1e-21, name='A')\nprior['f0'] = bilby.core.prior.Uniform(90, 110, 'f')\n\nlikelihood = bilby.gw.likelihood.GravitationalWaveTransient(\n interferometers=ifos, waveform_generator=waveform_generator)\n\nresult = bilby.core.sampler.run_sampler(\n likelihood, prior, sampler='dynesty', outdir=outdir, label=label,\n resume=False, sample='unif', injection_parameters=injection_parameters)\nresult.plot_corner()\n"
] | [
[
"numpy.sqrt",
"numpy.zeros_like",
"numpy.exp",
"numpy.loadtxt"
],
[
"numpy.sqrt",
"numpy.exp"
]
] |
ajclaros/rl_legged_walker | [
"26d0e124ef38045943449c2772b966571117683b"
] | [
"vis/plot_2d_neural_outputs.py"
] | [
"\nfrom jason.ctrnn import CTRNN\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np\nimport random\nimport sys\nimport json\nimport os\nimport math\nfrom util.fitness_functions import fitness_maximize_output_change, fitness_frequency_match\n\n\ndef main():\n trial_seed=1\n sol_seed=6\n size=2\n directory=f\"data/perturbed_networks/nnsize-{size}_sol-seed-{sol_seed}/seed{trial_seed}/\"\n directory=f\"jason/data/ctrnn_snapshots_recovery/\"\n \n\n #plot_2d_neural_outputs\n\n \n #filename = f\"data/evolved_solutions/mga_pop-20_gen-1000/ALL/discovery_mga_best_nn{size}_seed-{seed}.json\" \n plot_2d_neural_outputs( directory, size=2)\n\n\n\ndef plot_2d_neural_outputs(directory, size=2, stepsize=0.01):\n \n filenames = os.listdir(directory)\n\n rows=int( math.ceil( math.sqrt(len(filenames)) ))\n print(rows)\n fig, axs = plt.subplots(rows, rows)\n\n count=0\n for filename in filenames:\n count+=1\n #r=count/2\n #c=count%2+1\n\n filepath=f\"{directory}{filename}\"\n\n ctrnn = CTRNN( size)\n ctrnn.load_json( filepath )\n mid_point=50\n\n fitness, output_history = simulate_ctrnn(ctrnn, stepsize=0.01, init_duration=0, test_duration=100)\n output_history = output_history.transpose(1,0)\n ax1 = plt.subplot(rows,rows,count)\n \n start_of_test=int(mid_point/stepsize)\n\n ax1.plot(output_history[0][0:start_of_test],output_history[1][0:start_of_test], color='r')\n ax1.plot(output_history[0][start_of_test:],output_history[1][start_of_test:], color='b')\n ax1.set_xlim(0,1)\n ax1.set_ylim(0,1)\n\n ax1.set_title(f\"{filename}\\n{fitness:0.2f}\")\n plt.show()\n\n\ndef simulate_ctrnn(ctrnn, stepsize=0.01, init_duration=0, test_duration=10):\n \"\"\"This function simply provides an average change in output per neuron per time. Optionally can include initial duration to prevent transient changes at start of simulation.\"\"\"\n\n\n init_time = np.arange(0.0, init_duration, stepsize)\n test_time = np.arange(0.0, test_duration, stepsize)\n\n output_history=np.zeros((len(test_time),ctrnn.size))\n\n #allow transients to clear\n ctrnn.initializeState( np.zeros( ctrnn.size ))\n\n #ctrnn.initializeState( np.asarray( [-5.0, 10.0] ))\n\n for i in range(len(init_time)):\n ctrnn.step(stepsize)\n \n #evaluate after transient period\n change_in_output=0\n for i in range(len(test_time)):\n output_history[i] = ctrnn.outputs\n pastOutputs = ctrnn.outputs\n ctrnn.step(stepsize)\n currentOutputs = ctrnn.outputs\n change_in_output += np.sum(abs(currentOutputs - pastOutputs) ) \n \n #average over time and per neuron\n return change_in_output / ctrnn.size / test_duration, output_history\n\n\n\nmain()\n"
] | [
[
"numpy.zeros",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
]
] |
RaphaelMeudec/tf-explain | [
"1a75841762985e55abe19107d09279f68f5731c8"
] | [
"examples/callbacks/mnist.py"
] | [
"import numpy as np\nimport tensorflow as tf\nimport tf_explain\n\nINPUT_SHAPE = (28, 28, 1)\nNUM_CLASSES = 10\n\nAVAILABLE_DATASETS = {\n 'mnist': tf.keras.datasets.mnist,\n 'fashion_mnist': tf.keras.datasets.fashion_mnist,\n}\nDATASET_NAME = 'fashion_mnist' # Choose between \"mnist\" and \"fashion_mnist\"\n\n# Load dataset\ndataset = AVAILABLE_DATASETS[DATASET_NAME]\n(train_images, train_labels), (test_images, test_labels) = dataset.load_data()\n\n# Convert from (28, 28) images to (28, 28, 1)\ntrain_images = train_images[..., tf.newaxis]\ntest_images = test_images[..., tf.newaxis]\n\n# One hot encore labels 0, 1, .., 9 to [0, 0, .., 1, 0, 0]\ntrain_labels = tf.keras.utils.to_categorical(train_labels, num_classes=NUM_CLASSES)\ntest_labels = tf.keras.utils.to_categorical(test_labels, num_classes=NUM_CLASSES)\n\n# Create model\nimg_input = tf.keras.Input(INPUT_SHAPE)\n\nx = tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(img_input)\nx = tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu', name='target_layer')(x)\nx = tf.keras.layers.MaxPool2D(pool_size=(2, 2))(x)\n\nx = tf.keras.layers.Dropout(0.25)(x)\nx = tf.keras.layers.Flatten()(x)\n\nx = tf.keras.layers.Dense(128, activation='relu')(x)\nx = tf.keras.layers.Dropout(0.5)(x)\n\nx = tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')(x)\n\nmodel = tf.keras.Model(img_input, x)\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n# Select a subset of the validation data to examine\n# Here, we choose 5 elements with label \"0\" == [1, 0, 0, .., 0]\nvalidation_class_zero = (np.array([\n el for el, label in zip(test_images, test_labels)\n if np.all(label == np.array([1] + [0] * 9))\n][0:5]), None)\n# Select a subset of the validation data to examine\n# Here, we choose 5 elements with label \"4\" == [0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\nvalidation_class_fours = (np.array([\n el for el, label in zip(test_images, test_labels)\n if np.all(label == np.array([0] * 4 + [1] + [0] * 5))\n][0:5]), None)\n\n# Instantiate callbacks\n# class_index value should match the validation_data selected above\ncallbacks = [\n tf_explain.callbacks.GradCAMCallback(validation_class_zero, 'target_layer', class_index=0),\n tf_explain.callbacks.GradCAMCallback(validation_class_fours, 'target_layer', class_index=4),\n tf_explain.callbacks.ActivationsVisualizationCallback(validation_class_zero, layers_name=['target_layer']),\n tf_explain.callbacks.SmoothGradCallback(validation_class_zero, class_index=0, num_samples=15, noise=1.),\n tf_explain.callbacks.IntegratedGradientsCallback(validation_class_zero, class_index=0, n_steps=10),\n]\n\n# Start training\nmodel.fit(train_images, train_labels, epochs=5, callbacks=callbacks)\n"
] | [
[
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPool2D",
"numpy.array",
"tensorflow.keras.Input"
]
] |
SanjayMarreddi/GameInShape | [
"64d64c2cb7bc472c3319b949f6f0b6b67ea15910"
] | [
"Web_App/GesturePredictor.py"
] | [
"# Importing relevant libraries\r\nimport cv2\r\nimport imutils\r\nimport tflearn\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport tensorflow as tf\r\n\r\nfrom tensorflow.python.framework import ops\r\nfrom tflearn.layers.estimator import regression\r\nfrom tflearn.layers.conv import conv_2d, max_pool_2d\r\nfrom tflearn.layers.core import input_data, dropout, fully_connected\r\n\r\n# Import all the functionalities from Switch_Control\r\nfrom Web_App.Switch_Control import *\r\n\r\n# global variables\r\nbg = None\r\n\r\n\r\ndef resizeImage(imageName):\r\n basewidth = 100\r\n img = Image.open(imageName)\r\n wpercent = (basewidth/float(img.size[0]))\r\n hsize = int((float(img.size[1])*float(wpercent)))\r\n img = img.resize((basewidth, hsize), Image.ANTIALIAS)\r\n img.save(imageName)\r\n\r\n\r\ndef run_avg(image, aWeight):\r\n global bg\r\n # initialize the background\r\n if bg is None:\r\n bg = image.copy().astype(\"float\")\r\n return\r\n\r\n # compute weighted average, accumulate it and update the background\r\n cv2.accumulateWeighted(image, bg, aWeight)\r\n\r\n\r\ndef segment(image, threshold=25):\r\n global bg\r\n # find the absolute difference between background and current frame\r\n diff = cv2.absdiff(bg.astype(\"uint8\"), image)\r\n\r\n # threshold the diff image so that we get the foreground\r\n thresholded = cv2.threshold(diff,\r\n threshold,\r\n 255,\r\n cv2.THRESH_BINARY)[1]\r\n\r\n # get the contours in the thresholded image\r\n (cnts, _) = cv2.findContours(thresholded.copy(),\r\n cv2.RETR_EXTERNAL,\r\n cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n # return None, if no contours detected\r\n if len(cnts) == 0:\r\n return\r\n else:\r\n # based on contour area, get the maximum contour which is the hand\r\n segmented = max(cnts, key=cv2.contourArea)\r\n return (thresholded, segmented)\r\n\r\n\r\ndef getPredictedClass():\r\n\r\n image = cv2.imread('Temp.png')\r\n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n gray_image = cv2.resize(gray_image, (89, 100))\r\n prediction = model.predict([gray_image.reshape(89, 100, 1)])\r\n return np.argmax(prediction), (np.amax(prediction) / (prediction[0][0] + prediction[0][1] + prediction[0][2]))\r\n\r\n\r\n# Model defined\r\nops.reset_default_graph()\r\nconvnet = input_data(shape=[None, 89, 100, 1], name='input')\r\nconvnet = conv_2d(convnet, 32, 2, activation='relu')\r\nconvnet = max_pool_2d(convnet, 2)\r\nconvnet = conv_2d(convnet, 64, 2, activation='relu')\r\nconvnet = max_pool_2d(convnet, 2)\r\n\r\nconvnet = conv_2d(convnet, 128, 2, activation='relu')\r\nconvnet = max_pool_2d(convnet, 2)\r\n\r\nconvnet = conv_2d(convnet, 256, 2, activation='relu')\r\nconvnet = max_pool_2d(convnet, 2)\r\n\r\nconvnet = conv_2d(convnet, 256, 2, activation='relu')\r\nconvnet = max_pool_2d(convnet, 2)\r\n\r\nconvnet = conv_2d(convnet, 128, 2, activation='relu')\r\nconvnet = max_pool_2d(convnet, 2)\r\n\r\nconvnet = conv_2d(convnet, 64, 2, activation='relu')\r\nconvnet = max_pool_2d(convnet, 2)\r\n\r\nconvnet = fully_connected(convnet, 1000, activation='relu')\r\nconvnet = dropout(convnet, 0.75)\r\n\r\nconvnet = fully_connected(convnet, 3, activation='softmax')\r\n\r\nconvnet = regression(convnet, optimizer='adam', learning_rate=0.001,\r\n loss='categorical_crossentropy', name='regression')\r\n\r\nmodel = tflearn.DNN(convnet, tensorboard_verbose=0)\r\n\r\n# Load Saved Model\r\nmodel.load(\"Web_App/TrainedModel/GestureRecogModel.tfl\")\r\n\r\n\r\n\"\"\" \r\nNote: This Trained Model for Hand Gesture Recognition is taken from\r\n https://github.com/SparshaSaha/Hand-Gesture-Recognition-Using-Background-Elllimination-and-Convolution-Neural-Network \r\n\"\"\"\r\n"
] | [
[
"numpy.amax",
"numpy.argmax",
"tensorflow.python.framework.ops.reset_default_graph"
]
] |
int-brain-lab/ibllib | [
"93be6b98848758e05cdc9398caaf19e6a68f7386"
] | [
"brainbox/plot.py"
] | [
"\"\"\"\nPlots metrics that assess quality of single units. Some functions here generate plots for the\noutput of functions in the brainbox `single_units.py` module.\n\nRun the following to set-up the workspace to run the docstring examples:\n>>> from brainbox import processing\n>>> import alf.io as aio\n>>> import numpy as np\n>>> import matplotlib.pyplot as plt\n>>> import ibllib.ephys.spikes as e_spks\n# (*Note, if there is no 'alf' directory, make 'alf' directory from 'ks2' output directory):\n>>> e_spks.ks2_to_alf(path_to_ks_out, path_to_alf_out)\n# Load the alf spikes bunch and clusters bunch, and get a units bunch.\n>>> spks_b = aio.load_object(path_to_alf_out, 'spikes')\n>>> clstrs_b = aio.load_object(path_to_alf_out, 'clusters')\n>>> units_b = processing.get_units_bunch(spks_b) # may take a few mins to compute\n\"\"\"\n\nimport time\nfrom warnings import warn\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\n# from matplotlib.ticker import StrMethodFormatter\nfrom brainbox import singlecell\nfrom brainbox.metrics import single_units\nfrom brainbox.processing import bincount2D\nfrom brainbox.io.spikeglx import extract_waveforms\nfrom ibllib.io import spikeglx\n\n\ndef feat_vars(units_b, units=None, feat_name='amps', dist='norm', test='ks', cmap_name='coolwarm',\n ax=None):\n '''\n Plots the coefficients of variation of a particular spike feature for all units as a bar plot,\n where each bar is color-coded corresponding to the depth of the max amplitude channel of the\n respective unit.\n\n Parameters\n ----------\n units_b : bunch\n A units bunch containing fields with spike information (e.g. cluster IDs, times, features,\n etc.) for all units.\n units : array-like (optional)\n A subset of all units for which to create the bar plot. (If `None`, all units are used)\n feat_name : string (optional)\n The spike feature to plot.\n dist : string (optional)\n The type of hypothetical null distribution from which the empirical spike feature\n distributions are presumed to belong to.\n test : string (optional)\n The statistical test used to calculate the probability that the empirical spike feature\n distributions come from `dist`.\n cmap_name : string (optional)\n The name of the colormap associated with the plot.\n ax : axessubplot (optional)\n The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)\n\n Returns\n -------\n cv_vals : ndarray\n The coefficients of variation of `feat_name` for each unit.\n p_vals : ndarray\n The probabilites that the distribution for `feat_name` for each unit comes from a\n `dist` distribution based on the `test` statistical test.\n\n See Also\n --------\n metrics.unit_stability\n\n Examples\n --------\n 1) Create a bar plot of the coefficients of variation of the spike amplitudes for all units.\n >>> fig, var_vals, p_vals = bb.plot.feat_vars(units_b)\n '''\n\n # Get units.\n if not (units is None): # we're using a subset of all units\n unit_list = list(units_b['depths'].keys())\n # For each unit in `unit_list`, remove unit from `units_b` if not in `units`.\n [units_b['depths'].pop(unit) for unit in unit_list if not (int(unit) in units)]\n unit_list = list(units_b['depths'].keys()) # get new `unit_list` after removing unit\n\n # Calculate coefficients of variation for all units\n p_vals_b, cv_b = single_units.unit_stability(\n units_b, units=units, feat_names=[feat_name], dist=dist, test=test)\n cv_vals = np.array(tuple(cv_b[feat_name].values()))\n cv_vals = cv_vals * 1e6 if feat_name == 'amps' else cv_vals # convert to uV if amps\n p_vals = np.array(tuple(p_vals_b[feat_name].values()))\n\n # Remove any empty units. This must be done AFTER the above calculations for ALL units so that\n # we can keep direct indexing.\n empty_unit_idxs = np.where([len(units_b['times'][unit]) == 0 for unit in unit_list])[0]\n good_units = [unit for unit in unit_list if unit not in empty_unit_idxs.astype(str)]\n\n # Get mean depths of spikes for good units\n depths = np.asarray([np.mean(units_b['depths'][str(unit)]) for unit in good_units])\n\n # Create unit normalized colormap based on `depths`, sorted by depth.\n cmap = plt.cm.get_cmap(cmap_name)\n depths_norm = depths / np.max(depths)\n rgba = np.asarray([cmap(depth) for depth in np.sort(np.flip(depths_norm))])\n\n # Plot depth-color-coded h bar plot of CVs for `feature` for each unit, where units are\n # sorted descendingly by depth along y-axis.\n if ax is None:\n fig, ax = plt.subplots()\n ax.barh(y=[int(unit) for unit in good_units], width=cv_vals[np.argsort(depths)], color=rgba)\n fig = ax.figure\n cbar = fig.colorbar(plt.cm.ScalarMappable(cmap=cmap), ax=ax)\n max_d = np.max(depths)\n tick_labels = [int(max_d * tick) for tick in (0, 0.2, 0.4, 0.6, 0.8, 1.0)]\n cbar.set_ticks(cbar.get_ticks()) # must call `set_ticks` to call `set_ticklabels`\n cbar.set_ticklabels(tick_labels)\n ax.set_title('CV of {feat}'.format(feat=feat_name))\n ax.set_ylabel('Unit Number (sorted by depth)')\n ax.set_xlabel('CV')\n cbar.set_label('Depth', rotation=-90)\n\n return cv_vals, p_vals\n\n\ndef missed_spikes_est(feat, feat_name, spks_per_bin=20, sigma=5, min_num_bins=50, ax=None):\n '''\n Plots the pdf of an estimated symmetric spike feature distribution, with a vertical cutoff line\n that indicates the approximate fraction of spikes missing from the distribution, assuming the\n true distribution is symmetric.\n\n Parameters\n ----------\n feat : ndarray\n The spikes' feature values.\n feat_name : string\n The spike feature to plot.\n spks_per_bin : int (optional)\n The number of spikes per bin from which to compute the spike feature histogram.\n sigma : int (optional)\n The standard deviation for the gaussian kernel used to compute the pdf from the spike\n feature histogram.\n min_num_bins : int (optional)\n The minimum number of bins used to compute the spike feature histogram.\n ax : axessubplot (optional)\n The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)\n\n Returns\n -------\n fraction_missing : float\n The fraction of missing spikes (0-0.5). *Note: If more than 50% of spikes are missing, an\n accurate estimate isn't possible.\n\n See Also\n --------\n single_units.feature_cutoff\n\n Examples\n --------\n 1) Plot cutoff line indicating the fraction of spikes missing from a unit based on the recorded\n unit's spike amplitudes, assuming the distribution of the unit's spike amplitudes is symmetric.\n >>> feat = units_b['amps']['1']\n >>> fraction_missing = bb.plot.missed_spikes_est(feat, feat_name='amps', unit=1)\n '''\n\n # Calculate the feature distribution histogram and fraction of spikes missing.\n fraction_missing, pdf, cutoff_idx = \\\n single_units.missed_spikes_est(feat, spks_per_bin, sigma, min_num_bins)\n\n # Plot.\n if ax is None: # create two axes\n fig, ax = plt.subplots(nrows=1, ncols=2)\n if ax is None or len(ax) == 2: # plot histogram and pdf on two separate axes\n num_bins = int(feat.size / spks_per_bin)\n ax[0].hist(feat, bins=num_bins)\n ax[0].set_xlabel('{0}'.format(feat_name))\n ax[0].set_ylabel('Count')\n ax[0].set_title('Histogram of {0}'.format(feat_name))\n ax[1].plot(pdf)\n ax[1].vlines(cutoff_idx, 0, np.max(pdf), colors='r')\n ax[1].set_xlabel('Bin Number')\n ax[1].set_ylabel('Density')\n ax[1].set_title('PDF Symmetry Cutoff\\n'\n '(estimated {:.2f}% missing spikes)'.format(fraction_missing * 100))\n else: # just plot pdf\n ax = ax[0]\n ax.plot(pdf)\n ax.vlines(cutoff_idx, 0, np.max(pdf), colors='r')\n ax.set_xlabel('Bin Number')\n ax.set_ylabel('Density')\n ax.set_title('PDF Symmetry Cutoff\\n'\n '(estimated {:.2f}% missing spikes)'.format(fraction_missing * 100))\n\n return fraction_missing\n\n\ndef wf_comp(ephys_file, ts1, ts2, ch, sr=30000, n_ch_probe=385, dtype='int16', car=True,\n col=['b', 'r'], ax=None):\n '''\n Plots two different sets of waveforms across specified channels after (optionally)\n common-average-referencing. In this way, waveforms can be compared to see if there is,\n e.g. drift during the recording, or if two units should be merged, or one unit should be split.\n\n Parameters\n ----------\n ephys_file : string\n The file path to the binary ephys data.\n ts1 : array_like\n A set of timestamps for which to compare waveforms with `ts2`.\n ts2: array_like\n A set of timestamps for which to compare waveforms with `ts1`.\n ch : array-like\n The channels to use for extracting and plotting the waveforms.\n sr : int (optional)\n The sampling rate (in hz) that the ephys data was acquired at.\n n_ch_probe : int (optional)\n The number of channels of the recording.\n dtype: str (optional)\n The datatype represented by the bytes in `ephys_file`.\n car: bool (optional)\n A flag for whether or not to perform common-average-referencing before extracting waveforms\n col: list of strings or float arrays (optional)\n Two elements in the list, where each specifies the color the `ts1` and `ts2` waveforms\n will be plotted in, respectively.\n ax : axessubplot (optional)\n The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)\n\n Returns\n -------\n wf1 : ndarray\n The waveforms for the spikes in `ts1`: an array of shape (#spikes, #samples, #channels).\n wf2 : ndarray\n The waveforms for the spikes in `ts2`: an array of shape (#spikes, #samples, #channels).\n s : float\n The similarity score between the two sets of waveforms, calculated by\n `single_units.wf_similarity`\n\n See Also\n --------\n io.extract_waveforms\n single_units.wf_similarity\n\n Examples\n --------\n 1) Compare first and last 100 spike waveforms for unit1, across 20 channels around the channel\n of max amplitude, and compare the waveforms in the first minute to the waveforms in the fourth\n minutes for unit2, across 10 channels around the mean.\n # Get first and last 100 spikes, and 20 channels around channel of max amp for unit 1:\n >>> ts1 = units_b['times']['1'][:100]\n >>> ts2 = units_b['times']['1'][-100:]\n >>> max_ch = clstrs_b['channels'][1]\n >>> if max_ch < n_c_ch: # take only channels greater than `max_ch`.\n >>> ch = np.arange(max_ch, max_ch + 20)\n >>> elif (max_ch + n_c_ch) > n_ch_probe: # take only channels less than `max_ch`.\n >>> ch = np.arange(max_ch - 20, max_ch)\n >>> else: # take `n_c_ch` around `max_ch`.\n >>> ch = np.arange(max_ch - 10, max_ch + 10)\n >>> wf1, wf2, s = bb.plot.wf_comp(path_to_ephys_file, ts1, ts2, ch)\n # Plot waveforms for unit2 from the first and fourth minutes across 10 channels.\n >>> ts = units_b['times']['2']\n >>> ts1_2 = ts[np.where(ts<60)[0]]\n >>> ts2_2 = ts[np.where(ts>180)[0][:len(ts1)]]\n >>> max_ch = clstrs_b['channels'][2]\n >>> if max_ch < n_c_ch: # take only channels greater than `max_ch`.\n >>> ch = np.arange(max_ch, max_ch + 10)\n >>> elif (max_ch + n_c_ch) > n_ch_probe: # take only channels less than `max_ch`.\n >>> ch = np.arange(max_ch - 10, max_ch)\n >>> else: # take `n_c_ch` around `max_ch`.\n >>> ch = np.arange(max_ch - 5, max_ch + 5)\n >>> wf1_2, wf2_2, s_2 = bb.plot.wf_comp(path_to_ephys_file, ts1_2, ts2_2, ch)\n '''\n\n # Ensure `ch` is ndarray\n ch = np.asarray(ch)\n ch = ch.reshape((ch.size, 1)) if ch.size == 1 else ch\n\n # Extract the waveforms for these timestamps and compute similarity score.\n wf1 = extract_waveforms(ephys_file, ts1, ch, sr=sr, n_ch_probe=n_ch_probe, dtype=dtype,\n car=car)\n wf2 = extract_waveforms(ephys_file, ts2, ch, sr=sr, n_ch_probe=n_ch_probe, dtype=dtype,\n car=car)\n s = single_units.wf_similarity(wf1, wf2)\n\n # Plot these waveforms against each other.\n n_ch = ch.size\n if ax is None:\n fig, ax = plt.subplots(nrows=n_ch, ncols=2) # left col is all waveforms, right col is mean\n for cur_ax, cur_ch in enumerate(ch):\n ax[cur_ax][0].plot(wf1[:, :, cur_ax].T, c=col[0])\n ax[cur_ax][0].plot(wf2[:, :, cur_ax].T, c=col[1])\n ax[cur_ax][1].plot(np.mean(wf1[:, :, cur_ax], axis=0), c=col[0])\n ax[cur_ax][1].plot(np.mean(wf2[:, :, cur_ax], axis=0), c=col[1])\n ax[cur_ax][0].set_ylabel('Ch {0}'.format(cur_ch))\n ax[0][0].set_title('All Waveforms. S = {:.2f}'.format(s))\n ax[0][1].set_title('Mean Waveforms')\n plt.legend(['1st spike set', '2nd spike set'])\n\n return wf1, wf2, s\n\n\ndef amp_heatmap(ephys_file, ts, ch, sr=30000, n_ch_probe=385, dtype='int16', cmap_name='RdBu',\n car=True, ax=None):\n '''\n Plots a heatmap of the normalized voltage values over time and space for given timestamps and\n channels, after (optionally) common-average-referencing.\n\n Parameters\n ----------\n ephys_file : string\n The file path to the binary ephys data.\n ts: array_like\n A set of timestamps for which to get the voltage values.\n ch : array-like\n The channels to use for extracting the voltage values.\n sr : int (optional)\n The sampling rate (in hz) that the ephys data was acquired at.\n n_ch_probe : int (optional)\n The number of channels of the recording.\n dtype: str (optional)\n The datatype represented by the bytes in `ephys_file`.\n cmap_name : string (optional)\n The name of the colormap associated with the plot.\n car: bool (optional)\n A flag for whether or not to perform common-average-referencing before extracting waveforms\n ax : axessubplot (optional)\n The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)\n\n Returns\n -------\n v_vals : ndarray\n The voltage values.\n\n Examples\n --------\n 1) Plot a heatmap of the spike amplitudes across 20 channels around the channel of max\n amplitude for all spikes in unit 1.\n >>> ts = units_b['times']['1']\n >>> max_ch = clstrs_b['channels'][1]\n >>> if max_ch < n_c_ch: # take only channels greater than `max_ch`.\n >>> ch = np.arange(max_ch, max_ch + 20)\n >>> elif (max_ch + n_c_ch) > n_ch_probe: # take only channels less than `max_ch`.\n >>> ch = np.arange(max_ch - 20, max_ch)\n >>> else: # take `n_c_ch` around `max_ch`.\n >>> ch = np.arange(max_ch - 10, max_ch + 10)\n >>> bb.plot.amp_heatmap(path_to_ephys_file, ts, ch)\n '''\n # Ensure `ch` is ndarray\n ch = np.asarray(ch)\n ch = ch.reshape((ch.size, 1)) if ch.size == 1 else ch\n\n # Get memmapped array of `ephys_file`\n s_reader = spikeglx.Reader(ephys_file, open=True)\n file_m = s_reader.data\n\n # Get voltage values for each peak amplitude sample for `ch`.\n max_amp_samples = (ts * sr).astype(int)\n # Currently this is an annoying way to calculate `v_vals` b/c indexing with multiple values\n # is currently unsupported.\n v_vals = np.zeros((max_amp_samples.size, ch.size))\n for sample in range(max_amp_samples.size):\n v_vals[sample] = file_m[max_amp_samples[sample]:max_amp_samples[sample] + 1, ch]\n if car: # compute spatial noise in chunks, and subtract from `v_vals`.\n # Get subset of time (from first to last max amp sample)\n n_chunk_samples = 5e6 # number of samples per chunk\n n_chunks = np.ceil((max_amp_samples[-1] - max_amp_samples[0]) /\n n_chunk_samples).astype('int')\n # Get samples that make up each chunk. e.g. `chunk_sample[1] - chunk_sample[0]` are the\n # samples that make up the first chunk.\n chunk_sample = np.arange(max_amp_samples[0], max_amp_samples[-1], n_chunk_samples,\n dtype=int)\n chunk_sample = np.append(chunk_sample, max_amp_samples[-1])\n noise_s_chunks = np.zeros((n_chunks, ch.size), dtype=np.int16) # spatial noise array\n # Give time estimate for computing `noise_s_chunks`.\n t0 = time.perf_counter()\n np.median(file_m[chunk_sample[0]:chunk_sample[1], ch], axis=0)\n dt = time.perf_counter() - t0\n print('Performing spatial CAR before waveform extraction. Estimated time is {:.2f} mins.'\n ' ({})'.format(dt * n_chunks / 60, time.ctime()))\n # Compute noise for each chunk, then take the median noise of all chunks.\n for chunk in range(n_chunks):\n noise_s_chunks[chunk, :] = np.median(\n file_m[chunk_sample[chunk]:chunk_sample[chunk + 1], ch], axis=0)\n noise_s = np.median(noise_s_chunks, axis=0)\n v_vals -= noise_s[None, :]\n print('Done. ({})'.format(time.ctime()))\n s_reader.close()\n\n # Plot heatmap.\n if ax is None:\n fig, ax = plt.subplots()\n v_vals_norm = (v_vals / np.max(abs(v_vals))).T\n cbar_map = ax.imshow(v_vals_norm, cmap=cmap_name, aspect='auto',\n extent=[ts[0], ts[-1], ch[0], ch[-1]], origin='lower')\n ax.set_yticks(np.arange(ch[0], ch[-1], 5))\n ax.set_ylabel('Channel Numbers')\n ax.set_xlabel('Time (s)')\n ax.set_title('Voltage Heatmap')\n fig = ax.figure\n cbar = fig.colorbar(cbar_map, ax=ax)\n cbar.set_label('V', rotation=-90)\n\n return v_vals\n\n\ndef firing_rate(ts, hist_win=0.01, fr_win=0.5, n_bins=10, show_fr_cv=True, ax=None):\n '''\n Plots the instantaneous firing rate of for given spike timestamps over time, and optionally\n overlays the value of the coefficient of variation of the firing rate for a specified number\n of bins.\n\n Parameters\n ----------\n ts : ndarray\n The spike timestamps from which to compute the firing rate.\n hist_win : float (optional)\n The time window (in s) to use for computing spike counts.\n fr_win : float (optional)\n The time window (in s) to use as a moving slider to compute the instantaneous firing rate.\n n_bins : int (optional)\n The number of bins in which to compute coefficients of variation of the firing rate.\n show_fr_cv : bool (optional)\n A flag for whether or not to compute and show the coefficients of variation of the firing\n rate for `n_bins`.\n ax : axessubplot (optional)\n The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)\n\n Returns\n -------\n fr: ndarray\n The instantaneous firing rate over time (in hz).\n cv: float\n The mean coefficient of variation of the firing rate of the `n_bins` number of coefficients\n computed. Can only be returned if `show_fr_cv` is True.\n cvs: ndarray\n The coefficients of variation of the firing for each bin of `n_bins`. Can only be returned\n if `show_fr_cv` is True.\n\n See Also\n --------\n single_units.firing_rate_cv\n singecell.firing_rate\n\n Examples\n --------\n 1) Plot the firing rate for unit 1 from the time of its first to last spike, showing the cv\n of the firing rate for 10 evenly spaced bins.\n >>> ts = units_b['times']['1']\n >>> fr, cv, cvs = bb.plot.firing_rate(ts)\n '''\n\n if ax is None:\n fig, ax = plt.subplots()\n if not (show_fr_cv): # compute just the firing rate\n fr = singlecell.firing_rate(ts, hist_win=hist_win, fr_win=fr_win)\n else: # compute firing rate and coefficients of variation\n cv, cvs, fr = single_units.firing_rate_coeff_var(ts, hist_win=hist_win, fr_win=fr_win,\n n_bins=n_bins)\n x = np.arange(fr.size) * hist_win\n ax.plot(x, fr)\n ax.set_title('Firing Rate')\n ax.set_xlabel('Time (s)')\n ax.set_ylabel('Rate (s$^-1$)')\n\n if not (show_fr_cv):\n return fr\n else: # show coefficients of variation\n y_max = np.max(fr) * 1.05\n x_l = x[int(x.size / n_bins)]\n # Plot vertical lines separating plots into `n_bins`.\n [ax.vlines((x_l * i), 0, y_max, linestyles='dashed', linewidth=2)\n for i in range(1, n_bins)]\n # Plot text with cv of firing rate for each bin.\n [ax.text(x_l * (i + 1), y_max, 'cv={0:.2f}'.format(cvs[i]), fontsize=9, ha='right')\n for i in range(n_bins)]\n return fr, cv, cvs\n\n\ndef peri_event_time_histogram(\n spike_times, spike_clusters, events, cluster_id, # Everything you need for a basic plot\n t_before=0.2, t_after=0.5, bin_size=0.025, smoothing=0.025, as_rate=True,\n include_raster=False, n_rasters=None, error_bars='std', ax=None,\n pethline_kwargs={'color': 'blue', 'lw': 2},\n errbar_kwargs={'color': 'blue', 'alpha': 0.5},\n eventline_kwargs={'color': 'black', 'alpha': 0.5},\n raster_kwargs={'color': 'black', 'lw': 0.5}, **kwargs):\n \"\"\"\n Plot peri-event time histograms, with the meaning firing rate of units centered on a given\n series of events. Can optionally add a raster underneath the PETH plot of individual spike\n trains about the events.\n\n Parameters\n ----------\n spike_times : array_like\n Spike times (in seconds)\n spike_clusters : array-like\n Cluster identities for each element of spikes\n events : array-like\n Times to align the histogram(s) to\n cluster_id : int\n Identity of the cluster for which to plot a PETH\n\n t_before : float, optional\n Time before event to plot (default: 0.2s)\n t_after : float, optional\n Time after event to plot (default: 0.5s)\n bin_size :float, optional\n Width of bin for histograms (default: 0.025s)\n smoothing : float, optional\n Sigma of gaussian smoothing to use in histograms. (default: 0.025s)\n as_rate : bool, optional\n Whether to use spike counts or rates in the plot (default: `True`, uses rates)\n include_raster : bool, optional\n Whether to put a raster below the PETH of individual spike trains (default: `False`)\n n_rasters : int, optional\n If include_raster is True, the number of rasters to include. If `None`\n will default to plotting rasters around all provided events. (default: `None`)\n error_bars : {'std', 'sem', 'none'}, optional\n Defines which type of error bars to plot. Options are:\n -- `'std'` for 1 standard deviation\n -- `'sem'` for standard error of the mean\n -- `'none'` for only plotting the mean value\n (default: `'std'`)\n ax : matplotlib axes, optional\n If passed, the function will plot on the passed axes. Note: current\n behavior causes whatever was on the axes to be cleared before plotting!\n (default: `None`)\n pethline_kwargs : dict, optional\n Dict containing line properties to define PETH plot line. Default\n is a blue line with weight of 2. Needs to have color. See matplotlib plot documentation\n for more options.\n (default: `{'color': 'blue', 'lw': 2}`)\n errbar_kwargs : dict, optional\n Dict containing fill-between properties to define PETH error bars.\n Default is a blue fill with 50 percent opacity.. Needs to have color. See matplotlib\n fill_between documentation for more options.\n (default: `{'color': 'blue', 'alpha': 0.5}`)\n eventline_kwargs : dict, optional\n Dict containing fill-between properties to define line at event.\n Default is a black line with 50 percent opacity.. Needs to have color. See matplotlib\n vlines documentation for more options.\n (default: `{'color': 'black', 'alpha': 0.5}`)\n raster_kwargs : dict, optional\n Dict containing properties defining lines in the raster plot.\n Default is black lines with line width of 0.5. See matplotlib vlines for more options.\n (default: `{'color': 'black', 'lw': 0.5}`)\n\n Returns\n -------\n ax : matplotlib axes\n Axes with all of the plots requested.\n \"\"\"\n\n # Check to make sure if we fail, we fail in an informative way\n if not len(spike_times) == len(spike_clusters):\n raise ValueError('Spike times and clusters are not of the same shape')\n if len(events) == 1:\n raise ValueError('Cannot make a PETH with only one event.')\n if error_bars not in ('std', 'sem', 'none'):\n raise ValueError('Invalid error bar type was passed.')\n if not all(np.isfinite(events)):\n raise ValueError('There are NaN or inf values in the list of events passed. '\n ' Please remove non-finite data points and try again.')\n\n # Compute peths\n peths, binned_spikes = singlecell.calculate_peths(spike_times, spike_clusters, [cluster_id],\n events, t_before, t_after, bin_size,\n smoothing, as_rate)\n # Construct an axis object if none passed\n if ax is None:\n plt.figure()\n ax = plt.gca()\n # Plot the curve and add error bars\n mean = peths.means[0, :]\n ax.plot(peths.tscale, mean, **pethline_kwargs)\n if error_bars == 'std':\n bars = peths.stds[0, :]\n elif error_bars == 'sem':\n bars = peths.stds[0, :] / np.sqrt(len(events))\n else:\n bars = np.zeros_like(mean)\n if error_bars != 'none':\n ax.fill_between(peths.tscale, mean - bars, mean + bars, **errbar_kwargs)\n\n # Plot the event marker line. Extends to 5% higher than max value of means plus any error bar.\n plot_edge = (mean.max() + bars[mean.argmax()]) * 1.05\n ax.vlines(0., 0., plot_edge, **eventline_kwargs)\n # Set the limits on the axes to t_before and t_after. Either set the ylim to the 0 and max\n # values of the PETH, or if we want to plot a spike raster below, create an equal amount of\n # blank space below the zero where the raster will go.\n ax.set_xlim([-t_before, t_after])\n ax.set_ylim([-plot_edge if include_raster else 0., plot_edge])\n # Put y ticks only at min, max, and zero\n if mean.min() != 0:\n ax.set_yticks([0, mean.min(), mean.max()])\n else:\n ax.set_yticks([0., mean.max()])\n # Move the x axis line from the bottom of the plotting space to zero if including a raster,\n # Then plot the raster\n if include_raster:\n if n_rasters is None:\n n_rasters = len(events)\n if n_rasters > 60:\n warn(\"Number of raster traces is greater than 60. This might look bad on the plot.\")\n ax.axhline(0., color='black')\n tickheight = plot_edge / len(events[:n_rasters]) # How much space per trace\n tickedges = np.arange(0., -plot_edge - 1e-5, -tickheight)\n clu_spks = spike_times[spike_clusters == cluster_id]\n for i, t in enumerate(events[:n_rasters]):\n idx = np.bitwise_and(clu_spks >= t - t_before, clu_spks <= t + t_after)\n event_spks = clu_spks[idx]\n ax.vlines(event_spks - t, tickedges[i + 1], tickedges[i], **raster_kwargs)\n ax.set_ylabel('Firing Rate' if as_rate else 'Number of spikes', y=0.75)\n else:\n ax.set_ylabel('Firing Rate' if as_rate else 'Number of spikes')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.set_xlabel('Time (s) after event')\n return ax\n\n\ndef driftmap(ts, feat, ax=None, plot_style='bincount',\n t_bin=0.01, d_bin=20, weights=None, vmax=None, **kwargs):\n \"\"\"\n Plots the values of a spike feature array (y-axis) over time (x-axis).\n Two arguments can be given for the plot_style of the drift map:\n - 'scatter' : whereby each value is plotted as a marker (up to 100'000 data point)\n - 'bincount' : whereby the values are binned (optimised to represent spike raster)\n\n Parameters\n ----------\n feat : ndarray\n The spikes' feature values.\n ts : ndarray\n The spike timestamps from which to compute the firing rate.\n ax : axessubplot (optional)\n The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)\n t_bin: time bin used when plot_style='bincount'\n d_bin: depth bin used when plot_style='bincount'\n plot_style: 'scatter', 'bincount'\n **kwargs: matplotlib.imshow arguments\n\n Returns\n -------\n cd: float\n The cumulative drift of `feat`.\n md: float\n The maximum drift of `feat`.\n\n See Also\n --------\n metrics.cum_drift\n metrics.max_drift\n\n Examples\n --------\n 1) Plot the amplitude driftmap for unit 1.\n >>> ts = units_b['times']['1']\n >>> amps = units_b['amps']['1']\n >>> ax = bb.plot.driftmap(ts, amps)\n 2) Plot the depth driftmap for unit 1.\n >>> ts = units_b['times']['1']\n >>> depths = units_b['depths']['1']\n >>> ax = bb.plot.driftmap(ts, depths)\n \"\"\"\n iok = ~np.isnan(feat)\n if ax is None:\n fig, ax = plt.subplots()\n\n if plot_style == 'scatter' and len(ts) < 100000:\n print('here todo')\n if 'color' not in kwargs.keys():\n kwargs['color'] = 'k'\n ax.plot(ts, feat, **kwargs)\n else:\n # compute raster map as a function of site depth\n R, times, depths = bincount2D(\n ts[iok], feat[iok], t_bin, d_bin, weights=weights)\n # plot raster map\n ax.imshow(R, aspect='auto', cmap='binary', vmin=0, vmax=vmax or np.std(R) * 4,\n extent=np.r_[times[[0, -1]], depths[[0, -1]]], origin='lower', **kwargs)\n ax.set_xlabel('time (secs)')\n ax.set_ylabel('depth (um)')\n return ax\n\n\ndef pres_ratio(ts, hist_win=10, ax=None):\n '''\n Plots the presence ratio of spike counts: the number of bins where there is at least one\n spike, over the total number of bins, given a specified bin width.\n\n Parameters\n ----------\n ts : ndarray\n The spike timestamps from which to compute the presence ratio.\n hist_win : float\n The time window (in s) to use for computing the presence ratio.\n ax : axessubplot (optional)\n The axis handle to plot the histogram on. (if `None`, a new figure and axis is created)\n\n Returns\n -------\n pr : float\n The presence ratio.\n spks_bins : ndarray\n The number of spks in each bin.\n\n See Also\n --------\n metrics.pres_ratio\n\n Examples\n --------\n 1) Plot the presence ratio for unit 1, given a window of 10 s.\n >>> ts = units_b['times']['1']\n >>> pr, pr_bins = bb.plot.pres_ratio(ts)\n '''\n\n pr, spks_bins = single_units.pres_ratio(ts, hist_win)\n pr_bins = np.where(spks_bins > 0, 1, 0)\n\n if ax is None:\n fig, ax = plt.subplots()\n\n ax.plot(pr_bins)\n ax.set_xlabel('Bin Number (width={:.1f}s)'.format(hist_win))\n ax.set_ylabel('Presence')\n ax.set_title('Presence Ratio')\n\n return pr, spks_bins\n\n\ndef driftmap_color(\n clusters_depths, spikes_times,\n spikes_amps, spikes_depths, spikes_clusters,\n ax=None, axesoff=False, return_lims=False):\n\n '''\n Plots the driftmap of a session or a trial\n\n The plot shows the spike times vs spike depths.\n Each dot is a spike, whose color indicates the cluster\n and opacity indicates the spike amplitude.\n\n Parameters\n -------------\n clusters_depths: ndarray\n depths of all clusters\n spikes_times: ndarray\n spike times of all clusters\n spikes_amps: ndarray\n amplitude of each spike\n spikes_depths: ndarray\n depth of each spike\n spikes_clusters: ndarray\n cluster idx of each spike\n ax: matplotlib.axes.Axes object (optional)\n The axis object to plot the driftmap on\n (if `None`, a new figure and axis is created)\n\n Return\n ---\n ax: matplotlib.axes.Axes object\n The axis object with driftmap plotted\n x_lim: list of two elements\n range of x axis\n y_lim: list of two elements\n range of y axis\n '''\n\n color_bins = sns.color_palette(\"hls\", 500)\n new_color_bins = np.vstack(\n np.transpose(np.reshape(color_bins, [5, 100, 3]), [1, 0, 2]))\n\n # get the sorted idx of each depth, and create colors based on the idx\n\n sorted_idx = np.argsort(np.argsort(clusters_depths))\n\n colors = np.vstack(\n [np.repeat(\n new_color_bins[np.mod(idx, 500), :][np.newaxis, ...],\n n_spikes, axis=0)\n for (idx, n_spikes) in\n zip(sorted_idx, np.unique(spikes_clusters,\n return_counts=True)[1])])\n\n max_amp = np.percentile(spikes_amps, 90)\n min_amp = np.percentile(spikes_amps, 10)\n opacity = np.divide(spikes_amps - min_amp, max_amp - min_amp)\n opacity[opacity > 1] = 1\n opacity[opacity < 0] = 0\n\n colorvec = np.zeros([len(opacity), 4], dtype='float16')\n colorvec[:, 3] = opacity.astype('float16')\n colorvec[:, 0:3] = colors.astype('float16')\n\n x = spikes_times.astype('float32')\n y = spikes_depths.astype('float32')\n\n args = dict(color=colorvec, edgecolors='none')\n\n if ax is None:\n fig = plt.Figure(dpi=200, frameon=False, figsize=[10, 10])\n ax = plt.Axes(fig, [0.1, 0.1, 0.9, 0.9])\n ax.set_xlabel('Time (sec)')\n ax.set_ylabel('Distance from the probe tip (um)')\n savefig = True\n args.update(s=0.1)\n\n ax.scatter(x, y, **args)\n x_edge = (max(x) - min(x)) * 0.05\n x_lim = [min(x) - x_edge, max(x) + x_edge]\n y_lim = [min(y) - 50, max(y) + 100]\n ax.set_xlim(x_lim[0], x_lim[1])\n ax.set_ylim(y_lim[0], y_lim[1])\n\n if axesoff:\n ax.axis('off')\n\n if savefig:\n fig.add_axes(ax)\n fig.savefig('driftmap.png')\n\n if return_lims:\n return ax, x_lim, y_lim\n else:\n return ax\n"
] | [
[
"numpy.argsort",
"numpy.asarray",
"matplotlib.pyplot.Figure",
"numpy.isfinite",
"numpy.append",
"matplotlib.pyplot.cm.ScalarMappable",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"numpy.reshape",
"numpy.isnan",
"numpy.where",
"numpy.bitwise_and",
"numpy.unique",
"numpy.mean",
"numpy.ceil",
"numpy.zeros",
"numpy.median",
"matplotlib.pyplot.subplots",
"numpy.mod",
"numpy.arange",
"matplotlib.pyplot.cm.get_cmap",
"numpy.max",
"numpy.std",
"numpy.percentile",
"numpy.zeros_like",
"matplotlib.pyplot.Axes",
"matplotlib.pyplot.legend",
"numpy.divide",
"numpy.flip"
]
] |
jdvelasq/techMiner | [
"c611d96d2f812b0890513514d9d19787a1edfe2d"
] | [
"techminer/core/normalize_network.py"
] | [
"import numpy as np\n\n\ndef normalize_network(X, normalization=None):\n \"\"\"\n \"\"\"\n X = X.copy()\n\n if isinstance(normalization, str) and normalization == \"None\":\n normalization = None\n\n if normalization is None:\n X = X.applymap(lambda w: int(w))\n else:\n X = X.applymap(lambda w: float(w))\n\n M = X.copy()\n\n if normalization == \"Jaccard\":\n for col in M.columns:\n for row in M.index:\n X.at[row, col] = M.at[row, col] / (\n M.loc[row, row] + M.at[col, col] - M.at[row, col]\n )\n\n if normalization == \"Dice\":\n for col in M.columns:\n for row in M.index:\n X.at[row, col] = M.at[row, col] / (\n M.loc[row, row] + M.at[col, col] + 2 * M.at[row, col]\n )\n\n if normalization == \"Salton/Cosine\":\n for col in M.columns:\n for row in M.index:\n X.at[row, col] = M.at[row, col] / np.sqrt(\n (M.loc[row, row] * M.at[col, col])\n )\n\n if normalization == \"Equivalence\":\n for col in M.columns:\n for row in M.index:\n X.at[row, col] = M.at[row, col] ** 2 / (\n M.loc[row, row] * M.at[col, col]\n )\n\n ## inclusion\n if normalization == \"Inclusion\":\n for col in M.columns:\n for row in M.index:\n X.at[row, col] = M.at[row, col] / min(M.loc[row, row], M.at[col, col])\n\n if normalization == \"Mutual Information\":\n N = len(M.columns)\n for col in M.columns:\n for row in M.index:\n X.at[row, col] = np.log(\n M.at[row, col] / (N * M.loc[row, row] * M.at[col, col])\n )\n\n if normalization == \"Association\":\n for col in M.columns:\n for row in M.index:\n X.at[row, col] = M.at[row, col] / (M.loc[row, row] * M.at[col, col])\n\n return X\n"
] | [
[
"numpy.sqrt",
"numpy.log"
]
] |
botmatic/tacotron2 | [
"c2dee4930f6bd1cf707e0565fd0675b8646a51a1"
] | [
"parallel_wavenet_vocoder/tests/test_mixture.py"
] | [
"# coding: utf-8\nfrom __future__ import with_statement, print_function, absolute_import\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nimport librosa\nimport pysptk\n\nfrom wavenet_vocoder.mixture import discretized_mix_logistic_loss\nfrom wavenet_vocoder.mixture import sample_from_discretized_mix_logistic\n\n\ndef log_prob_from_logits(x):\n \"\"\" numerically stable log_softmax implementation that prevents overflow \"\"\"\n # TF ordering\n axis = len(x.size()) - 1\n m, _ = torch.max(x, dim=-1, keepdim=True)\n return x - m - torch.log(torch.sum(torch.exp(x - m), dim=axis, keepdim=True))\n\n\ndef test_log_softmax():\n x = torch.rand(2, 16000, 30)\n y = log_prob_from_logits(x)\n y_hat = F.log_softmax(x, -1)\n\n y = y.data.cpu().numpy()\n y_hat = y_hat.data.cpu().numpy()\n assert np.allclose(y, y_hat)\n\n\ndef test_mixture():\n np.random.seed(1234)\n\n x, sr = librosa.load(pysptk.util.example_audio_file(), sr=None)\n assert sr == 16000\n\n T = len(x)\n x = x.reshape(1, T, 1)\n y = torch.from_numpy(x).float()\n y_hat = torch.rand(1, 30, T).float()\n\n print(y.shape, y_hat.shape)\n\n loss = discretized_mix_logistic_loss(y_hat, y)\n print(loss)\n\n loss = discretized_mix_logistic_loss(y_hat, y, reduce=False)\n print(loss.size(), y.size())\n assert loss.size() == y.size()\n\n y = sample_from_discretized_mix_logistic(y_hat)\n print(y.shape)\n\n\ndef test_misc():\n # https://en.wikipedia.org/wiki/Logistic_distribution\n # what i have learned\n # m = (x - mu) / s\n m = torch.rand(10, 10)\n log_pdf_mid1 = -2 * torch.log(torch.exp(m / 2) + torch.exp(-m / 2))\n log_pdf_mid2 = m - 2 * F.softplus(m)\n assert np.allclose(log_pdf_mid1.data.numpy(), log_pdf_mid2.data.numpy())\n\n # Edge case for 0\n plus_in = torch.rand(10, 10)\n log_cdf_plus1 = torch.sigmoid(m).log()\n log_cdf_plus2 = m - F.softplus(m)\n assert np.allclose(log_cdf_plus1.data.numpy(), log_cdf_plus2.data.numpy())\n\n # Edge case for 255\n min_in = torch.rand(10, 10)\n log_one_minus_cdf_min1 = (1 - torch.sigmoid(min_in)).log()\n log_one_minus_cdf_min2 = -F.softplus(min_in)\n assert np.allclose(log_one_minus_cdf_min1.data.numpy(), log_one_minus_cdf_min2.data.numpy())\n"
] | [
[
"numpy.allclose",
"torch.nn.functional.log_softmax",
"torch.rand",
"numpy.random.seed",
"torch.exp",
"torch.from_numpy",
"torch.max",
"torch.nn.functional.softplus",
"torch.sigmoid"
]
] |
jjbrophy47/tree_deletion | [
"97041d129da335de3018b3243bc81943088abf24"
] | [
"scripts/experiments/roar.py"
] | [
"\"\"\"\nRemove and Retrain (ROAR) experiment.\n\"\"\"\nimport os\nimport sys\nimport time\nimport argparse\nfrom datetime import datetime\n\nimport numpy as np\n\nhere = os.path.abspath(os.path.dirname(__file__))\nsys.path.insert(0, here + '/../../')\nsys.path.insert(0, here + '/../')\nimport dare\nfrom utility import data_util\nfrom utility import exp_util\nfrom utility import print_util\n\nMAX_SEED_INCREASE = 1000\n\n\ndef _get_model(args):\n \"\"\"\n Return model.\n \"\"\"\n model = dare.Forest(criterion=args.criterion,\n topd=0,\n k=args.k,\n n_estimators=args.n_estimators,\n max_features=args.max_features,\n max_depth=args.max_depth,\n random_state=args.rs)\n\n return model\n\n\ndef measure_performance(sort_indices, percentages, X_test, y_test, X_train, y_train,\n logger=None):\n \"\"\"\n Measures the change in log loss as training instances are removed.\n \"\"\"\n r = {}\n aucs = []\n accs = []\n aps = []\n\n # remove training samples in batches\n for percentage in percentages:\n n_samples = int(X_train.shape[0] * (percentage / 100))\n remove_indices = sort_indices[:n_samples]\n\n new_X_train = np.delete(X_train, remove_indices, axis=0)\n new_y_train = np.delete(y_train, remove_indices)\n\n if len(np.unique(new_y_train)) == 1:\n print(percentage)\n break\n\n # train target model\n model = _get_model(args)\n label = '{}%'.format(percentage)\n model = model.fit(new_X_train, new_y_train)\n\n auc, acc, ap = exp_util.performance(model, X_test, y_test,\n logger=logger, name=label)\n aucs.append(auc)\n accs.append(acc)\n aps.append(ap)\n\n r['auc'] = aucs\n r['acc'] = accs\n r['ap'] = aps\n\n return r\n\n\ndef experiment(args, logger, out_dir):\n \"\"\"\n Obtains data, trains model, and generates instance-attribution explanations.\n \"\"\"\n\n # get data\n X_train, X_test, y_train, y_test = data_util.get_data(args.dataset, data_dir=args.data_dir)\n\n # select a subset of the test data for evaluation\n n_test_samples = args.n_test if args.n_test is not None else int(X_test.shape[0] * args.test_frac)\n np.random.seed(args.rs)\n test_indices = np.random.choice(X_test.shape[0], size=n_test_samples, replace=False)\n X_test_sub, y_test_sub = X_test[test_indices], y_test[test_indices]\n\n # choose new subset if test subset all contain the same label\n new_seed = args.rs\n while y_test_sub.sum() == len(y_test_sub) or y_test_sub.sum() == 0:\n np.random.seed(new_seed)\n new_seed += np.random.randint(MAX_SEED_INCREASE)\n np.random.seed(new_seed)\n test_indices = np.random.choice(X_test.shape[0], size=n_test_samples, replace=False)\n X_test_sub, y_test_sub = X_test[test_indices], y_test[test_indices]\n\n X_test = X_test_sub\n y_test = y_test_sub\n\n # dataset statistics\n logger.info('\\ntrain instances: {:,}'.format(X_train.shape[0]))\n logger.info('test instances: {:,}'.format(X_test.shape[0]))\n logger.info('features: {:,}'.format(X_train.shape[1]))\n\n # experiment settings\n logger.info('\\nrandom state: {}'.format(args.rs))\n logger.info('criterion: {}'.format(args.criterion))\n logger.info('n_estimators: {}'.format(args.n_estimators))\n logger.info('max_depth: {}'.format(args.max_depth))\n logger.info('k: {}'.format(args.k))\n logger.info('max_features: {}'.format(args.max_features))\n logger.info('n_test: {}\\n'.format(args.n_test))\n\n # train target model\n model = _get_model(args)\n name = 'G-DaRE'\n\n start = time.time()\n model = model.fit(X_train, y_train)\n train_time = time.time() - start\n\n logger.info('[{}] train time: {:.3f}s'.format(name, train_time))\n exp_util.performance(model, X_test, y_test, logger=logger, name=name)\n\n percentages = list(range(0, 100, 1))\n start = time.time()\n\n # random method\n if args.method == 'random':\n logger.info('\\nordering by random...')\n np.random.seed(args.rs)\n train_order = np.random.choice(np.arange(X_train.shape[0]), size=X_train.shape[0], replace=False)\n results = measure_performance(train_order, percentages, X_test, y_test, X_train, y_train, logger)\n\n # G-DaRE 1: ordered from biggest sum increase in positive label confidence to least\n elif args.method == 'dare1':\n logger.info('\\nordering by G-DaRE...')\n explanation = exp_util.explain_lite(model, X_train, y_train, X_test)\n train_order = np.argsort(explanation)[::-1]\n results = measure_performance(train_order, percentages, X_test, y_test, X_train, y_train, logger)\n\n # G-DaRE 2: ordered by most positively influential to least positively influential\n elif args.method == 'dare2':\n logger.info('\\nordering by G-DaRE 2...')\n explanation = exp_util.explain_lite(model, X_train, y_train, X_test, y_test=y_test)\n train_order = np.argsort(explanation)[::-1]\n results = measure_performance(train_order, percentages, X_test, y_test, X_train, y_train, logger)\n\n # G-DaRE 3: ordered by biggest sum of absolute change in predictions\n elif args.method == 'dart3':\n logger.info('\\nordering by G-DaRE 3...')\n explanation = exp_util.explain_lite(model, X_train, y_train, X_test, use_abs=True)\n train_order = np.argsort(explanation)[::-1]\n results = measure_performance(train_order, percentages, X_test, y_test, X_train, y_train, logger)\n\n logger.info('time: {:3f}s'.format(time.time() - start))\n\n results['percentage'] = percentages\n np.save(os.path.join(out_dir, 'results.npy'), results)\n\n\ndef main(args):\n\n # create output dir\n out_dir = os.path.join(args.out_dir,\n args.dataset,\n args.criterion,\n args.method,\n 'rs_{}'.format(args.rs))\n\n log_fp = os.path.join(out_dir, 'log.txt')\n os.makedirs(out_dir, exist_ok=True)\n\n # skip experiment if results already exist\n if args.append_results and os.path.exists(os.path.join(out_dir, 'results.npy')):\n return\n\n # create logger\n logger = print_util.get_logger(log_fp)\n logger.info(args)\n logger.info(datetime.now())\n\n # run experiment\n experiment(args, logger, out_dir)\n\n # remove logger\n print_util.remove_logger(logger)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n # I/O settings\n parser.add_argument('--out_dir', type=str, default='output/roar/', help='output directory.')\n parser.add_argument('--data_dir', type=str, default='data', help='data directory.')\n parser.add_argument('--dataset', default='surgical', help='dataset to use for the experiment.')\n parser.add_argument('--append_results', action='store_true', default=False, help='add results.')\n\n # experiment settings\n parser.add_argument('--rs', type=int, default=1, help='seed to enhance reproducibility.')\n parser.add_argument('--n_test', type=int, default=50, help='no. test instances')\n parser.add_argument('--method', type=str, default='dare1', help='method to use.')\n\n # tree hyperparameters\n parser.add_argument('--n_estimators', type=int, default=100, help='number of trees in the forest.')\n parser.add_argument('--max_depth', type=int, default=20, help='maximum depth of the tree.')\n parser.add_argument('--k', type=int, default=25, help='number of thresholds to consider.')\n parser.add_argument('--max_features', type=str, default='sqrt', help='maximum features to sample.')\n parser.add_argument('--criterion', type=str, default='gini', help='splitting criterion.')\n\n args = parser.parse_args()\n main(args)\n"
] | [
[
"numpy.random.seed",
"numpy.random.choice",
"numpy.argsort",
"numpy.arange",
"numpy.delete",
"numpy.random.randint",
"numpy.unique"
]
] |
iostermann/deeplab2 | [
"e0f7eecfac5d35c3e9e66f061098d5f5f15a7152"
] | [
"data/dataset_utils_test.py"
] | [
"# coding=utf-8\n# Copyright 2021 The Deeplab2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for dataset_utils.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom deeplab2.data import dataset_utils\n\n\nclass DatasetUtilsTest(tf.test.TestCase):\n\n def _get_test_labels(self, num_classes, shape, label_divisor):\n num_ids_per_class = 35\n semantic_labels = np.random.randint(num_classes, size=shape)\n panoptic_labels = np.random.randint(\n num_ids_per_class, size=shape) + semantic_labels * label_divisor\n\n semantic_labels = tf.convert_to_tensor(semantic_labels, dtype=tf.int32)\n panoptic_labels = tf.convert_to_tensor(panoptic_labels, dtype=tf.int32)\n\n return panoptic_labels, semantic_labels\n\n def setUp(self):\n super().setUp()\n self._first_thing_class = 9\n self._num_classes = 19\n self._dataset_info = {\n 'panoptic_label_divisor': 1000,\n 'class_has_instances_list': tf.range(self._first_thing_class,\n self._num_classes)\n }\n self._num_ids = 37\n self._labels, self._semantic_classes = self._get_test_labels(\n self._num_classes, [2, 33, 33],\n self._dataset_info['panoptic_label_divisor'])\n\n def test_get_panoptic_and_semantic_label(self):\n # Note: self._labels contains one crowd instance per class.\n (returned_sem_labels, returned_pan_labels, returned_thing_mask,\n returned_crowd_region) = (\n dataset_utils.get_semantic_and_panoptic_label(\n self._dataset_info, self._labels, ignore_label=255))\n\n expected_semantic_labels = self._semantic_classes\n condition = self._labels % self._dataset_info['panoptic_label_divisor'] == 0\n condition = tf.logical_and(\n condition,\n tf.math.greater_equal(expected_semantic_labels,\n self._first_thing_class))\n expected_crowd_labels = tf.where(condition, 1.0, 0.0)\n expected_pan_labels = tf.where(\n condition, 255 * self._dataset_info['panoptic_label_divisor'],\n self._labels)\n expected_thing_mask = tf.where(\n tf.math.greater_equal(expected_semantic_labels,\n self._first_thing_class), 1.0, 0.0)\n\n self.assertListEqual(returned_sem_labels.shape.as_list(),\n expected_semantic_labels.shape.as_list())\n self.assertListEqual(returned_pan_labels.shape.as_list(),\n expected_pan_labels.shape.as_list())\n self.assertListEqual(returned_crowd_region.shape.as_list(),\n expected_crowd_labels.shape.as_list())\n self.assertListEqual(returned_thing_mask.shape.as_list(),\n expected_thing_mask.shape.as_list())\n np.testing.assert_equal(returned_sem_labels.numpy(),\n expected_semantic_labels.numpy())\n np.testing.assert_equal(returned_pan_labels.numpy(),\n expected_pan_labels.numpy())\n np.testing.assert_equal(returned_crowd_region.numpy(),\n expected_crowd_labels.numpy())\n np.testing.assert_equal(returned_thing_mask.numpy(),\n expected_thing_mask.numpy())\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.math.greater_equal",
"tensorflow.range",
"tensorflow.where",
"tensorflow.convert_to_tensor",
"numpy.random.randint",
"tensorflow.test.main"
]
] |
jwfromm/relax | [
"f120282007778706199243ee88b50697c2b9550c"
] | [
"python/tvm/relay/frontend/pytorch.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=import-self, too-many-lines, len-as-condition, no-else-return, unused-variable, too-many-nested-blocks\n# pylint: disable=consider-iterating-dictionary, invalid-name, unused-argument, unused-variable, broad-except\n# pylint: disable=import-outside-toplevel, simplifiable-if-expression, cell-var-from-loop, unnecessary-lambda\n# pylint: disable=missing-function-docstring\n\"\"\"PT: PyTorch frontend.\"\"\"\nimport functools\nimport itertools\nimport math\nimport sys\nimport logging\n\nimport numpy as np\nimport tvm\nfrom tvm.ir import IRModule\nfrom tvm.topi.utils import get_const_tuple\n\nfrom .. import analysis as _analysis\nfrom .. import expr as _expr\nfrom .. import function as _function\nfrom .. import op as _op\nfrom .. import qnn, transform\nfrom ..expr_functor import ExprMutator\nfrom ..loops import while_loop\nfrom ..prelude import Prelude, StaticTensorArrayOps\nfrom ..ty import Any, TensorType, TupleType\nfrom . import qnn_torch\nfrom .common import AttrCvt, get_relay_op, gru_cell, logger\nfrom .common import infer_shape as _infer_shape\nfrom .common import infer_value as _infer_value\nfrom .common import infer_value_simulated as _infer_value_simulated\nfrom .common import lstm_cell, try_infer_value, unbind\nfrom .pytorch_utils import is_version_greater_than\n\n__all__ = [\"from_pytorch\"]\n\n# This returns a \"subgraph\" which puts variables whenever\n# the type is known. It also records things to map the input\n# nodes to the extracted graph's nodes.\n# As Python objects are not round-trippable through C++, and\n# our type annotations only live in Python, we need to map\n# the we need to map the nodes we get in visiting to the nodes\n# we used to construct the graph (they are the same in C++,\n# match each other in dictionary lookups, but are not the same\n# in Python) by using the hint dictionary filled as\n# {node: node for node in nodes} to get the type annotations.\n# https://discuss.tvm.apache.org/t/round-tripping-objects-through-the-ffi/8440\nclass _TypeFinder(ExprMutator):\n def __init__(self, types):\n super().__init__()\n self.counter = 0\n self.vars = {}\n self.types = types\n self.leave = set() # some variables are not inputs\n\n def visit_let(self, let):\n self.leave.add(let.var)\n return super().visit_let(let)\n\n def visit_function(self, fn):\n self.leave.update(fn.params)\n return super().visit_function(fn)\n\n def visit(self, expr):\n if expr in self.leave:\n return super().visit(expr)\n if expr in self.vars:\n return self.vars[expr]\n if isinstance(expr, tvm.relay.Var):\n self.vars[expr] = expr\n return expr\n if expr in self.types:\n ty = self.types[expr]\n v = tvm.relay.var(f\"_{self.counter}\", type_annotation=ty)\n self.counter += 1\n self.vars[expr] = v\n return v\n v = super().visit(expr)\n return v\n\n\ndef _should_construct_dynamic_list(list_construct_node):\n # if this list is element-accessed or modified at runtime, generate List ADT\n def inplace_add_to_add(op_name):\n if op_name == \"aten::add_\":\n return \"aten::add\"\n else:\n return op_name\n\n uses = _get_uses(list_construct_node)\n\n for loop_use in filter(lambda use: use.user.kind() == \"prim::Loop\", uses):\n block_input_index = loop_use.offset - 1\n block = list(loop_use.user.blocks())[0]\n list_loop_var = list(block.inputs())[block_input_index]\n uses += _get_uses(list_loop_var.node())\n\n op_names = map(inplace_add_to_add, set(use.user.kind() for use in uses))\n\n list_ops = set([\"aten::add\", \"aten::__getitem__\"])\n intersect = list_ops.intersection(op_names)\n\n if len(intersect) > 0 and intersect != set([\"aten::add\"]):\n return True\n\n # if add op outputs list, it is dynamic so we need to construct List ADT\n for use in filter(lambda use: use.user.kind() in [\"aten::add\", \"aten::add_\"], uses):\n output_type = _get_node_type(use.user)\n if output_type == \"ListType\":\n return True\n\n return False\n\n\ndef _is_int_seq(seq):\n # TODO (t-vi): handle non-int constants? (like numpy.intXX)\n return len(seq) > 0 and all([isinstance(i, int) for i in seq])\n\n\n# operator implementation\nclass PyTorchOpConverter:\n \"\"\"A helper class for holding PyTorch op converters.\"\"\"\n\n def __init__(self, prelude, default_dtype):\n self.prelude = prelude\n self.default_dtype = default_dtype\n self.create_convert_map()\n self.types = {} # map from nodes to (Relay) type annotations\n\n # this incrementally infers the type, see the comments on the type visitor\n # above.\n def infer_type(self, node, mod=None):\n \"\"\"An incremental method to infer the type of a node in the relay graph.\"\"\"\n\n if node in self.types:\n return self.types[node]\n if isinstance(node, tvm.relay.Var):\n return node.type_annotation\n\n tf = _TypeFinder(types=self.types)\n new_node = tf.visit(node)\n fn = _function.Function(list(tf.vars.values()), new_node)\n new_mod = IRModule({\"main\": fn})\n if mod is not None:\n new_mod.update(mod)\n new_mod = transform.RemoveUnusedFunctions()(new_mod)\n new_mod = transform.InferType()(new_mod)\n entry = new_mod[\"main\"]\n ty = entry.body.checked_type\n self.types[node] = ty\n return self.types[node]\n\n def infer_type_with_prelude(self, val):\n body = self.infer_type(val, self.prelude.mod)\n return body\n\n # list ADT utilities\n def convert_to_list_adt(self, py_lst):\n elem_tys = [self.infer_type_with_prelude(elem) for elem in py_lst]\n msg = \"List elements should have identical types\"\n assert all(map(lambda ty: ty == elem_tys[0], elem_tys)), msg\n\n # get_type returns type_name, ctor1, ..., ctorN\n # 1 is nil\n _, cons, nil = self.prelude.mod.get_type(\"List\")\n adt_lst = nil()\n for elem in reversed(py_lst):\n adt_lst = cons(elem, adt_lst)\n return adt_lst\n\n def map_tensor_array_constructor(self, adt_lst, shape):\n static_tensor_array_ops = StaticTensorArrayOps(self.prelude, \"float32\", shape)\n static_tensor_array_ops.register()\n tensor_create = self.prelude.get_tensor_ctor_static(\"tensor_constructor\", \"float32\", shape)\n return self.prelude.map(tensor_create, adt_lst)\n\n def convert_to_tensor_array(self, adt_lst):\n _, cons, nil = self.prelude.mod.get_type(\"List\")\n if self.prelude.length(adt_lst) == 0:\n return nil()\n\n checked_type = self.infer_type_with_prelude(self.prelude.hd(adt_lst))\n shape = checked_type.shape\n tensor_array = self.map_tensor_array_constructor(adt_lst, shape)\n return tensor_array, tuple(shape)\n\n def infer_shape(self, inputs, mod=None):\n \"\"\"A method to get the output type of an intermediate node in the graph.\"\"\"\n typ = self.infer_type(inputs, mod=mod)\n if hasattr(typ, \"shape\"):\n # Regular operator that outputs tensors\n return get_const_tuple(typ.shape)\n # The return type is not a tensor, for example List\n return typ\n\n def infer_shape_with_prelude(self, inputs):\n return self.infer_shape(inputs, mod=self.prelude.mod)\n\n def record_output_type(self, output):\n if isinstance(output, tuple):\n cleaned_output = [o for o in output if o is not None]\n types = self.infer_type_with_prelude(_expr.Tuple(cleaned_output))\n for o, t in zip(cleaned_output, types.fields):\n self.types[o] = t\n elif isinstance(output, _expr.Expr):\n self.infer_type_with_prelude(output)\n # it can also happen that the type is int or so\n\n def pytorch_promote_types(self, inputs, dtypes):\n \"\"\"This promotes TVM inputs with TVM dtypes passed like PyTorch would\"\"\"\n actual_dtypes = []\n for i, inp in enumerate(inputs):\n if isinstance(inp, _expr.Expr):\n idt = self.infer_type(inp).dtype\n actual_dtypes.append(idt)\n else:\n actual_dtypes.append(dtypes[i])\n dtypes = actual_dtypes\n tensor_dtypes = [dt for inp, dt in zip(inputs, dtypes) if not np.isscalar(inp)]\n non_tensor_inputs = [inp for inp in inputs if np.isscalar(inp)]\n result_type = _pytorch_result_type(tensor_dtypes, non_tensor_inputs)\n results = []\n for inp, dt in zip(inputs, dtypes):\n if np.isscalar(inp):\n results.append(_expr.const(inp, dtype=result_type))\n elif dt == result_type:\n results.append(inp)\n else:\n results.append(_op.cast(inp, result_type))\n return results\n\n def is_quantized_tensor(self, data):\n # If a quantized Torch module is saved and loaded back, dtype will be dropped\n # Since dtypes from Torch tensors are not reliable in such cases, we use\n # Relay's type inference result to decide if an input tensor is quantized\n ty = self.infer_type_with_prelude(data)\n return ty.dtype == \"uint8\"\n\n # Operator implementations\n def make_elemwise(self, name):\n def elemwise(inputs, input_types):\n data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])\n return get_relay_op(name)(data0, data1)\n\n return elemwise\n\n def min_max_common(self, name_elemwise, name_reduce, inputs, input_types):\n if len(inputs) == 1:\n data = self.pytorch_promote_types(inputs[:1], input_types[:1])\n return get_relay_op(name_reduce)(data[0])\n elif len(inputs) >= 2 and isinstance(inputs[1], int):\n data = self.pytorch_promote_types(inputs[:1], input_types[:1])\n dim = inputs[1]\n keepdims = inputs[2] if len(inputs) > 2 else False\n # also return dummy indices\n return get_relay_op(name_reduce)(data[0], axis=dim, keepdims=keepdims), None\n else:\n data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])\n return get_relay_op(name_elemwise)(data0, data1)\n\n def max(self, inputs, input_types):\n return self.min_max_common(\"maximum\", \"max\", inputs, input_types)\n\n def min(self, inputs, input_types):\n return self.min_max_common(\"minimum\", \"min\", inputs, input_types)\n\n def make_unary(self, name):\n def unary(inputs, input_types):\n # this is just to ensure tensor input\n (data,) = self.pytorch_promote_types(inputs[:1], input_types[:1])\n return get_relay_op(name)(data)\n\n return unary\n\n def log1p(self, inputs, input_types):\n # 1_plus_log x = log(x + 1)\n (dtype,) = input_types\n one = _expr.const(1, dtype=dtype)\n return _op.log(inputs[0] + one)\n\n def arange(self, inputs, input_types):\n def _get_value(val, dtype):\n # dtype is a tvm dtype\n if isinstance(val, _expr.Expr):\n inp = _op.cast(val, dtype)\n ret, _ = try_infer_value(inp, lambda ret: _expr.const(ret, dtype))\n else:\n ret = _create_typed_const(val, dtype)\n return ret\n\n def _get_type(val, inp_type):\n if isinstance(val, _expr.Expr):\n dtype = str(self.infer_type(val))\n return dtype\n return inp_type\n\n # PyTorch arange uses the following type semantics:\n # - if a dtype is given, start, stop, step are converted to that dtype\n # - if no dtype is given and all args are integral, dtype is int64\n # - if no dtype is given and there is a float arg, dtype is float32\n if len(inputs) == 5:\n dtype0 = _get_type(inputs[0], input_types[0])\n if inputs[1] is not None:\n dtype = _convert_dtype_value(inputs[1])\n elif dtype0.startswith(\"float\"):\n dtype = \"float32\"\n else:\n dtype = \"int64\"\n start = _expr.const(0, dtype)\n stop = _get_value(inputs[0], dtype)\n step = _expr.const(1, dtype)\n elif len(inputs) == 7:\n types = [_get_type(inputs[i], input_types[i]) for i in range(3)]\n if inputs[3] is not None:\n dtype = _convert_dtype_value(inputs[3])\n elif any([t.startswith(\"float\") for t in types]):\n dtype = \"float32\"\n else:\n dtype = \"int64\"\n start = _get_value(inputs[0], dtype)\n stop = _get_value(inputs[1], dtype)\n step = _get_value(inputs[2], dtype)\n else:\n msg = \"Unknown number of arguments (%d) to parse.\" % (len(inputs))\n raise AssertionError(msg)\n\n return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype)\n\n def squeeze(self, inputs, input_types):\n data = inputs[0]\n if len(inputs) == 1:\n axis = None\n else:\n # TODO (t-vi): why is the cast to int needed? similarly elsewhere\n axis = [int(inputs[1])]\n\n return _op.transform.squeeze(data, axis)\n\n def unsqueeze(self, inputs, input_types):\n data = inputs[0]\n axis = inputs[1]\n\n return _op.transform.expand_dims(data, int(axis), 1)\n\n def concatenate(self, inputs, input_types):\n def tensor_array_concat(lst, axis):\n assert axis == 0, \"Tensor array concat supported only for axis 0\"\n tensor_array, shape = self.convert_to_tensor_array(lst)\n concat_shape = (Any(),) + shape[1:]\n concat = self.prelude.get_global_var_static(\"tensor_array_concat\", \"float32\", shape)\n concatenated = concat(tensor_array)\n\n static_tensor_array_ops = StaticTensorArrayOps(self.prelude, \"float32\", concat_shape)\n static_tensor_array_ops.register()\n get_tensor = self.prelude.get_global_var_static(\n \"tensor_get_data\", \"float32\", concat_shape\n )\n return get_tensor(concatenated)\n\n data = inputs[0]\n axis = inputs[1]\n\n if not isinstance(data, list):\n return tensor_array_concat(data, axis)\n\n if isinstance(data, _expr.Expr):\n data = [data]\n\n return _op.tensor.concatenate(data, int(axis))\n\n def slice(self, inputs, input_types):\n axis_dtype = \"int64\"\n index_size_limit = sys.maxsize\n data = inputs[0]\n dshape = self.infer_shape(data)\n ndim = len(dshape)\n dim = int(inputs[1])\n stride = inputs[4]\n\n target_begin, is_begin_const = try_infer_value(\n inputs[2], lambda ret: ret.astype(np.int).item(0)\n )\n target_end, is_end_const = try_infer_value(\n inputs[3], lambda ret: ret.astype(np.int).item(0)\n )\n\n # A fast path when slicing is nop.\n if (\n isinstance(target_begin, int)\n and isinstance(target_end, int)\n and target_begin == 0\n and target_end >= index_size_limit\n and stride == 1\n ):\n return data\n\n if target_begin is None and target_end is None:\n return data\n\n # Process begin\n begin = [0] * ndim\n\n if target_begin is not None:\n begin[dim] = target_begin\n\n if target_begin is not None and not isinstance(begin[dim], int):\n tmp = []\n for b in begin:\n if isinstance(b, int):\n tmp.append(_op.expand_dims(_expr.const(b, axis_dtype), axis=0))\n else:\n tmp.append(_op.cast(_op.expand_dims(b, axis=0), axis_dtype))\n begin = _op.concatenate(tmp, axis=0)\n btype = self.infer_type(begin).dtype\n if str(btype) != axis_dtype:\n begin = _op.cast(begin, axis_dtype)\n\n # Process end\n if isinstance(target_end, int) and target_end >= index_size_limit:\n target_end = dshape[dim]\n\n if any([isinstance(d, tvm.tir.Any) for d in dshape]):\n end = _op.shape_of(data)\n else:\n end = dshape\n\n if isinstance(target_end, int):\n if isinstance(end, list):\n end[dim] = target_end\n else:\n all_static = True\n for i, shape_dim in enumerate(dshape):\n if i != dim and isinstance(shape_dim, tvm.tir.Any):\n all_static = False\n\n if all_static:\n end = list(get_const_tuple(dshape))\n end[dim] = target_end\n else:\n target_end = _expr.const(target_end)\n end = _op.scatter(\n end,\n _op.expand_dims(_expr.const(dim), axis=0),\n _op.expand_dims(target_end, axis=0),\n axis=0,\n )\n else:\n end = _op.cast(_op.shape_of(data), axis_dtype)\n if target_end is not None and not isinstance(target_end, tvm.tir.Any):\n ttype = self.infer_type(target_end).dtype\n if str(ttype) != axis_dtype:\n target_end = _op.cast(target_end, axis_dtype)\n end = _op.scatter(\n end,\n _op.expand_dims(_expr.const(dim), axis=0),\n _op.expand_dims(target_end, axis=0),\n axis=0,\n )\n\n if not isinstance(end, list):\n etype = self.infer_type(end).dtype\n if str(etype) != axis_dtype:\n end = _op.cast(end, axis_dtype)\n\n strides = [1] * ndim\n strides[dim] = stride\n\n return _op.transform.strided_slice(\n data, begin=begin, end=end, strides=strides, slice_mode=\"end\"\n )\n\n def narrow(self, inputs, input_types):\n # Inputs are:\n # 0 - the tensor to narrow\n # 1 - the dimension along which to narrow\n # 2 - the starting dimension\n # 3 - the distance to the ending dimension\n # Lets find the ending dimension\n end = self.add(inputs[2:4], input_types[2:4])\n stride = 1\n slice_input = inputs[:3] + [end, stride]\n slice_types = input_types + [\"int32\"]\n return self.slice(slice_input, slice_types)\n\n def split(self, inputs, input_types):\n data = inputs[0]\n split_size = int(inputs[1])\n dim = int(inputs[2])\n\n split_index = split_size\n indices = []\n while split_index < self.infer_shape(data)[dim]:\n indices.append(split_index)\n split_index += split_size\n\n return _op.split(data, indices, dim)\n\n def split_with_sizes(self, inputs, input_types):\n data = inputs[0]\n sections = inputs[1]\n dim = int(inputs[2])\n\n if len(sections) == 1:\n # a special case used in torchvision detection models\n return _expr.TupleWrapper(_expr.Tuple([data]), 1)\n\n split_index = 0\n indices = []\n for i in range(len(sections) - 1):\n index, _ = try_infer_value(sections[i], lambda ret: int(ret))\n split_index += index\n indices.append(split_index)\n\n return _op.split(data, indices, dim)\n\n def select(self, inputs, input_types):\n data = inputs[0]\n dim = int(inputs[1])\n index = _wrap_const(inputs[2])\n return _op.transform.take(data, index, axis=dim, mode=\"wrap\")\n\n def take(self, inputs, input_types):\n data = inputs[0]\n indices = _op.cast(inputs[1], \"int32\")\n\n return _op.transform.take(data, indices=indices, mode=\"wrap\")\n\n def topk(self, inputs, input_types):\n data = inputs[0]\n axis = int(inputs[2])\n is_ascend = not bool(inputs[3])\n sort = bool(inputs[4])\n\n if isinstance(inputs[1], _expr.Expr):\n k, _ = try_infer_value(inputs[1], lambda ret: ret.tolist())\n else:\n k = inputs[1]\n\n if not sort:\n msg = \"Currently supports only sorted output for topk operator.\"\n raise AssertionError(msg)\n\n outs = _op.topk(data, k=k, axis=axis, is_ascend=is_ascend, ret_type=\"both\", dtype=\"int64\")\n\n return outs[0], outs[1]\n\n def reciprocal(self, inputs, input_types):\n data = inputs[0]\n return _expr.const(1.0, dtype=input_types[0]) / data\n\n def repeat(self, inputs, input_types):\n data = inputs[0]\n reps = []\n for r in inputs[1]:\n if isinstance(r, int):\n reps.append(r)\n else:\n reps.append(int(_infer_value(r, {}).numpy()))\n\n return _op.transform.tile(data, reps=reps)\n\n def repeat_interleave(self, inputs, input_types):\n data = inputs[0]\n if isinstance(inputs[1], int):\n repeats = inputs[1]\n axis = inputs[2]\n elif isinstance(inputs[1], _expr.Expr):\n if isinstance(inputs[1], _expr.Constant):\n repeats = int(inputs[1].data.numpy())\n else:\n repeats, _ = try_infer_value(inputs[1], lambda ret: ret.tolist())\n axis = inputs[2]\n else:\n msg = \"Only repeat with one value as repeat is currently supported.\"\n raise AssertionError(msg)\n if axis is None: # Flatten the data if no axis is given from torch\n data = _op.transform.reshape(data, [-1])\n axis = 0\n return _op.transform.repeat(data, repeats=repeats, axis=axis)\n\n def addcdiv(self, inputs, input_types):\n data, t1, t2, c = self.pytorch_promote_types(inputs[:4], input_types[:4])\n return data + (c * (t1 / t2))\n\n def addcmul(self, inputs, input_types):\n data, t1, t2, c = self.pytorch_promote_types(inputs[:4], input_types[:4])\n return data + (c * (t1 * t2))\n\n def where(self, inputs, input_types):\n if len(inputs) == 1:\n return self.nonzero([inputs[0], True], input_types)\n\n cond = inputs[0]\n x, y = self.pytorch_promote_types(inputs[1:3], input_types[1:3])\n return _op.where(cond, x, y)\n\n def full_impl(self, data, fill_value, dtype):\n size = []\n need_reshape = False\n new_shape = []\n for dim in data:\n if isinstance(dim, _expr.Expr):\n if isinstance(dim, _expr.Constant):\n dim = int(dim.data.numpy())\n if isinstance(size, list):\n size.append(dim)\n new_shape.append(dim)\n else:\n dim, success = try_infer_value(dim, lambda ret: int(ret), lambda: 0)\n new_shape.append(dim)\n\n if success:\n if isinstance(size, list):\n size.append(dim)\n else:\n size = None\n need_reshape = True\n else:\n if isinstance(size, list):\n size.append(dim)\n new_shape.append(dim)\n\n if size is None:\n tmp = []\n for dim in data:\n tmp.append(_op.cast(_op.expand_dims(dim, axis=0), \"int64\"))\n size = _op.concatenate(tmp, axis=0)\n\n out = _op.full(_expr.const(fill_value), size, dtype=dtype)\n if need_reshape:\n out = _op.reshape(out, new_shape)\n return out\n\n def ones(self, inputs, input_types):\n data = inputs[0]\n\n import torch\n\n if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):\n msg = \"Data type %s could not be parsed in ones op\" % (type(data))\n raise AssertionError(msg)\n\n if inputs[1] is not None:\n dtype = _convert_dtype_value(inputs[1])\n else:\n dtype = self.default_dtype\n return self.full_impl(data, 1, dtype)\n\n def ones_like(self, inputs, input_types):\n data = inputs[0]\n out = _op.ones_like(data)\n\n # If the input and the output datatype is different, do a cast\n if inputs[1] is not None:\n dtype = _convert_dtype_value(inputs[1])\n else:\n dtype = self.default_dtype\n if input_types[0] != dtype:\n out = _op.cast(out, dtype)\n\n return out\n\n def zeros(self, inputs, input_types):\n data = inputs[0]\n\n import torch\n\n if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):\n msg = \"Data type %s could not be parsed in zeros op\" % (type(data))\n raise AssertionError(msg)\n\n if inputs[1] is not None:\n dtype = _convert_dtype_value(inputs[1])\n else:\n dtype = self.default_dtype\n return self.full_impl(data, 0, dtype)\n\n def zeros_like(self, inputs, input_types):\n data = inputs[0]\n out = _op.zeros_like(data)\n\n # If the input and the output datatype is different, do a cast\n if inputs[1] is not None:\n dtype = _convert_dtype_value(inputs[1])\n else:\n dtype = self.default_dtype\n if input_types[0] not in dtype:\n out = _op.cast(out, dtype)\n\n return out\n\n def full(self, inputs, input_types):\n data = inputs[0]\n fill_value = inputs[1]\n\n import torch\n\n if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):\n msg = \"Data type %s could not be parsed in full op\" % (type(data))\n raise AssertionError(msg)\n\n if inputs[2] is not None: # dtype given\n dtype = _convert_dtype_value(inputs[2])\n else:\n # if dtype is None, torch uses a global default set by torch.set_default_tensor_type()\n dtype = self.default_dtype\n\n return self.full_impl(data, fill_value, dtype)\n\n def full_like(self, inputs, input_types):\n data = inputs[0]\n fill_value = inputs[1]\n\n out = _op.full_like(data, _expr.const(fill_value))\n\n # If the input and the output datatype is different, do a cast\n if inputs[2] is not None: # dtype given\n dtype = _convert_dtype_value(inputs[2])\n else:\n # if dtype is None, torch uses a global default set by torch.set_default_tensor_type()\n dtype = self.default_dtype\n if input_types[0] not in dtype:\n out = _op.cast(out, dtype)\n\n return out\n\n def linspace(self, inputs, input_types):\n start = inputs[0]\n stop = inputs[1]\n step = inputs[2]\n\n # Find the spacing between values as step\n if step != 1:\n step = (stop - start) / (step - 1)\n stop = stop + step\n else:\n stop = start + step\n\n if inputs[3] is None:\n import torch\n\n dtype = _convert_data_type(str(torch.get_default_dtype()))\n else:\n dtype = _convert_dtype_value(inputs[3])\n\n start = _create_typed_const(start, dtype)\n stop = _create_typed_const(stop, dtype)\n step = _create_typed_const(step, dtype)\n\n return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype)\n\n def relu(self, inputs, input_types):\n data = inputs[0]\n if self.is_quantized_tensor(data):\n assert len(inputs) == 3, \"Input quant param not found in op inputs\"\n input_zero_point = _expr.const(inputs[2], dtype=\"int32\")\n return qnn_torch.quantized_relu(data, input_zero_point)\n return _op.nn.relu(data)\n\n def prelu(self, inputs, input_types):\n # Reference: https://pytorch.org/docs/stable/generated/torch.nn.PReLU.html#torch.nn.PReLU\n data = inputs[0]\n dim = self.get_dims(data)\n ndims = len(dim)\n axis = 0 if ndims == 1 else 1\n alpha = _op.broadcast_to(inputs[1], (dim[axis]))\n return _op.nn.prelu(data, alpha, axis)\n\n def leaky_relu(self, inputs, input_types):\n data = inputs[0]\n alpha = float(inputs[1])\n return _op.nn.leaky_relu(data, alpha)\n\n def elu(self, inputs, input_types):\n data = inputs[0]\n dtype = input_types[0]\n alpha = _expr.const(-float(inputs[1]), dtype=dtype)\n return alpha * _op.nn.relu(_expr.const(1, dtype=dtype) - _op.exp(data)) + _op.nn.relu(data)\n\n def celu(self, inputs, input_types):\n data = inputs[0]\n dtype = input_types[0]\n alpha = _expr.const(float(inputs[1]), dtype=dtype)\n return alpha * _op.nn.relu(\n _expr.const(1, dtype=dtype) - _op.exp(data / alpha)\n ) + _op.nn.relu(data)\n\n def gelu(self, inputs, input_types):\n data = inputs[0]\n dtype = input_types[0]\n # gelu is data * normcdf(data)\n # normcdf expressed as erf because we don't currently have that intrinsic\n # note that there is also a fastgelu variant approximating normcdf\n # with tanh and third order polynomials, but this is \"true\" gelu\n return data * (\n _expr.const(0.5, dtype=dtype)\n + _op.erf(data * _expr.const(0.5 ** 0.5, dtype=dtype)) * _expr.const(0.5, dtype=dtype)\n )\n\n def selu(self, inputs, input_types):\n data = inputs[0]\n # https://pytorch.org/docs/stable/nn.html#selu\n dtype = input_types[0]\n alpha = _expr.const(-1.6732632423543772848170429916717, dtype=dtype)\n gamma = _expr.const(1.0507009873554804934193349852946, dtype=dtype)\n return gamma * (\n alpha * _op.nn.relu(_expr.const(1.0, dtype=dtype) - _op.exp(data)) + _op.nn.relu(data)\n )\n\n def silu(self, inputs, input_types):\n data = inputs[0]\n return data * _op.tensor.sigmoid(data)\n\n def log_sigmoid(self, inputs, input_types):\n data = inputs[0]\n return _op.log(_op.tensor.sigmoid(data))\n\n def hard_sigmoid(self, inputs, input_types):\n def _relu6(x):\n return _op.tensor.clip(x, 0.0, 6.0)\n\n def func(x):\n return _relu6(x + _expr.const(3.0)) / _expr.const(6.0)\n\n if self.is_quantized_tensor(inputs[0]):\n input_scale = _expr.const(inputs[1])\n input_zero_point = _expr.const(inputs[2])\n # PyTorch seems to use the following output qparams, but accuracy\n # is broken if we use this.\n # TODO(masahi): Revisit this parameter choice\n #\n # Taken from src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp\n # output_scale = _expr.const(0.00390625) # 1.0 / 2^8\n # output_zero_point = _expr.const(-128)\n output_scale = input_scale\n output_zero_point = input_zero_point\n\n data = qnn.op.dequantize(inputs[0], input_scale, input_zero_point, axis=1)\n out = func(data)\n return qnn.op.quantize(out, output_scale, output_zero_point, out_dtype=\"uint8\")\n\n return func(inputs[0])\n\n def hard_swish(self, inputs, input_types):\n data = inputs[0]\n return data * self.hard_sigmoid(inputs, input_types)\n\n def adaptive_avg_pool(self, op, inputs, input_types):\n data = inputs[0]\n output_size = inputs[1]\n\n def func(x):\n return op(x, output_size=output_size)\n\n if self.is_quantized_tensor(data):\n return qnn_torch.apply_with_upcast(data, func)\n\n return func(data)\n\n def adaptive_max_pool(self, op, inputs, input_types):\n data = inputs[0]\n output_size = inputs[1]\n # returns dummy indices too\n return op(data, output_size=output_size), None\n\n @staticmethod\n def convert_const_list(data):\n if isinstance(data, list):\n for i, _ in enumerate(data):\n if isinstance(data[i], _expr.Expr):\n data[i] = int(_infer_value_simulated(data[i], {}).numpy())\n return data\n\n def maxpool_2d(self, inputs, input_types):\n data = inputs[0]\n\n pool_size = self.convert_const_list(inputs[1])\n strides = self.convert_const_list(inputs[2] if inputs[2] else pool_size)\n padding = inputs[3]\n dilation = inputs[4]\n ceil_mode = int(inputs[5])\n\n return _op.nn.max_pool2d(\n data,\n pool_size=pool_size,\n strides=strides,\n dilation=dilation,\n padding=padding,\n layout=\"NCHW\",\n ceil_mode=ceil_mode,\n )\n\n def maxpool_2d_with_indices(self, inputs, input_types):\n # returns dummy indices too\n return self.maxpool_2d(inputs, input_types), None\n\n def maxpool_1d(self, inputs, input_types):\n data = inputs[0]\n\n pool_size = inputs[1]\n strides = inputs[2] if inputs[2] else pool_size\n padding = inputs[3]\n dilation = inputs[4]\n ceil_mode = int(inputs[5])\n\n return _op.nn.max_pool1d(\n data,\n pool_size=pool_size,\n strides=strides,\n dilation=dilation,\n padding=padding,\n layout=\"NCW\",\n ceil_mode=ceil_mode,\n )\n\n def maxpool_3d(self, inputs, input_types):\n data = inputs[0]\n\n pool_size = inputs[1]\n strides = inputs[2] if inputs[2] else pool_size\n padding = inputs[3]\n dilation = inputs[4]\n ceil_mode = int(inputs[5])\n\n return _op.nn.max_pool3d(\n data,\n pool_size=pool_size,\n strides=strides,\n dilation=dilation,\n padding=padding,\n ceil_mode=ceil_mode,\n )\n\n def hardtanh(self, inputs, input_types):\n a = inputs[0]\n tanh_min = float(inputs[1])\n tanh_max = float(inputs[2])\n return _op.tensor.clip(a, tanh_min, tanh_max)\n\n def convolution(self, inputs, input_types):\n # Use transpose or normal\n use_transpose = True if inputs[6] == 1 else False\n\n data = inputs[0]\n weight = inputs[1]\n bias = inputs[2]\n strides = tuple(inputs[3])\n padding = tuple(inputs[4])\n dilation = tuple(inputs[5])\n\n if isinstance(weight, _expr.Expr):\n inferred_shape = self.infer_shape(weight)\n weight_shape = []\n for infer in inferred_shape:\n weight_shape.append(infer)\n else:\n msg = \"Data type %s could not be parsed in conv op\" % (type(weight))\n raise AssertionError(msg)\n\n # Transposed convolutions have IOHW layout.\n if use_transpose:\n weight_shape[0], weight_shape[1] = weight_shape[1], weight_shape[0]\n\n channels = weight_shape[0]\n groups = int(inputs[8])\n\n # Check if this is depth wise convolution\n # We need to reshape weight so that Relay could recognize this is depth wise\n # weight_shape[1] is always in_channels // groups\n # For depthwise, in_channels == groups, so weight_shape[1] == 1\n # If groups > 1 but weight_shape[1] != 1, this is group convolution\n if groups > 1 and weight_shape[1] == 1:\n channel_multiplier = channels // groups\n new_weight_shape = (groups, channel_multiplier) + tuple(weight_shape[2:])\n weight = _op.transform.reshape(weight, new_weight_shape)\n\n kernel_size = weight_shape[2:]\n use_bias = isinstance(bias, _expr.Expr)\n\n # We are trying to invoke various relay operations through a single conv_op variable.\n # However the function signatures for some operations have additional attributes so we\n # pass these in along with the standard ones.\n additional_arguments = dict()\n\n if use_transpose:\n if len(kernel_size) == 3:\n conv_op = _op.nn.conv3d_transpose\n elif len(kernel_size) == 2:\n conv_op = _op.nn.conv2d_transpose\n else:\n conv_op = _op.nn.conv1d_transpose\n output_padding = tuple(inputs[7])\n additional_arguments[\"output_padding\"] = output_padding\n\n else:\n if len(kernel_size) == 3:\n conv_op = _op.nn.conv3d\n elif len(kernel_size) == 2:\n conv_op = _op.nn.conv2d\n else:\n conv_op = _op.nn.conv1d\n\n if len(kernel_size) == 3:\n data_layout = \"NCDHW\"\n kernel_layout = \"OIDHW\"\n elif len(kernel_size) == 2:\n data_layout = \"NCHW\"\n kernel_layout = \"OIHW\"\n if use_transpose:\n # Transposed convolutions have IOHW layout.\n kernel_layout = \"IOHW\"\n else:\n data_layout = \"NCW\"\n kernel_layout = \"OIW\"\n\n # Conv1d does not currently support grouped convolution so we convert it to conv2d\n is_grouped_conv1d = False\n if groups > 1 and len(kernel_size) == 1 and not use_transpose:\n is_grouped_conv1d = True\n conv_op = _op.nn.conv2d\n kernel_size = [1] + kernel_size\n strides = (1,) + strides\n padding = (0,) + padding\n dilation = (1,) + dilation\n data = _op.expand_dims(data, axis=2)\n weight = _op.expand_dims(weight, axis=2)\n data_layout = \"NCHW\"\n kernel_layout = \"OIHW\"\n\n conv_out = conv_op(\n data,\n weight,\n strides=strides,\n padding=padding,\n dilation=dilation,\n groups=groups,\n channels=channels,\n kernel_size=kernel_size,\n data_layout=data_layout,\n kernel_layout=kernel_layout,\n out_layout=\"\",\n out_dtype=\"\",\n **additional_arguments,\n )\n if use_bias:\n res = _op.nn.bias_add(conv_out, bias)\n else:\n res = conv_out\n if is_grouped_conv1d:\n # Because we conducted grouped conv1d convolution through conv2d we must\n # squeeze the output to get the correct result.\n res = _op.squeeze(res, axis=[2])\n return res\n\n def softmax(self, inputs, input_types):\n data = inputs[0]\n axis = inputs[1]\n if isinstance(axis, str):\n axis = int(axis)\n\n return _op.nn.softmax(data, axis=axis)\n\n def threshold(self, inputs, input_types):\n data = inputs[0]\n return _op.nn.relu(data)\n\n def contiguous(self, inputs, input_types):\n return inputs[0]\n\n def batch_norm(self, inputs, input_types):\n data = inputs[0]\n data_type = input_types[0]\n\n channels = self.infer_shape(data)\n\n if isinstance(inputs[1], _expr.Expr) and isinstance(inputs[2], _expr.Expr):\n scale = center = True\n weight = inputs[1]\n beta = inputs[2]\n gamma = weight\n else:\n scale = center = False\n\n if not scale:\n gamma = _create_typed_const(np.ones([int(channels[1])]), data_type)\n\n if not center:\n beta = _create_typed_const(np.zeros([int(channels[1])]), data_type)\n\n moving_mean = inputs[3]\n moving_var = inputs[4]\n epsilon = float(inputs[7])\n\n return _op.nn.batch_norm(\n data,\n gamma,\n beta,\n moving_mean,\n moving_var,\n axis=1,\n epsilon=epsilon,\n center=center,\n scale=scale,\n )[0]\n\n def instance_norm(self, inputs, input_types):\n data = inputs[0]\n data_type = input_types[0]\n channels = self.infer_shape(data)\n\n if isinstance(inputs[1], _expr.Expr) and isinstance(inputs[2], _expr.Expr):\n scale = center = True\n weight = inputs[1]\n beta = inputs[2]\n gamma = weight\n else:\n scale = center = False\n\n if not scale:\n gamma = _create_typed_const(np.ones([int(channels[1])]), data_type)\n\n if not center:\n beta = _create_typed_const(np.zeros([int(channels[1])]), data_type)\n\n epsilon = float(inputs[7])\n return _op.nn.instance_norm(\n data, gamma, beta, axis=1, epsilon=epsilon, center=center, scale=scale\n )\n\n def get_dims(self, data):\n import torch\n\n if isinstance(data, _expr.Expr):\n dims = self.infer_shape(data)\n elif isinstance(data, list):\n dims = data\n elif isinstance(data, (torch.Tensor, np.ndarray)):\n dims = data.shape\n else:\n msg = \"Data type %s could not be parsed\" % type(data)\n raise AssertionError(msg)\n return dims\n\n def layer_norm(self, inputs, input_types):\n data = inputs[0]\n ndims = len(self.get_dims(inputs[1]))\n assert ndims == 1, \"Support only normalization over last one dimension.\"\n\n return _op.nn.layer_norm(\n data,\n gamma=inputs[2],\n beta=inputs[3],\n axis=-1,\n epsilon=float(inputs[4]),\n center=True,\n scale=True,\n )\n\n def group_norm(self, inputs, input_types):\n data = inputs[0]\n gamma = inputs[2]\n beta = inputs[3]\n num_groups = inputs[1]\n epsilon = float(inputs[4])\n\n return _op.nn.group_norm(\n data,\n gamma=gamma,\n beta=beta,\n num_groups=num_groups,\n axis=1,\n epsilon=epsilon,\n center=True,\n scale=True,\n )\n\n def transpose(self, inputs, input_types):\n data = inputs[0]\n\n import torch\n\n if isinstance(data, _expr.Expr):\n ndims = len(self.infer_shape_with_prelude(data))\n elif isinstance(data, list):\n ndims = data\n elif isinstance(data, (torch.Tensor, np.ndarray)):\n ndims = data.shape\n else:\n msg = \"Data type %s could not be parsed in transpose op\" % (type(data))\n raise AssertionError(msg)\n\n if isinstance(data, tvm.runtime.NDArray):\n ndims = len(data.shape)\n axes = list(range(ndims))\n\n num_inputs = len(inputs)\n\n if num_inputs == 1:\n if ndims >= 2:\n axes[-1] = ndims - 2\n axes[-2] = ndims - 1\n if not isinstance(data, _expr.Expr):\n data = _expr.const(data)\n\n elif num_inputs == 3:\n parse = lambda i: ndims * (i < 0) + i\n src, dst = [parse(int(inputs[i])) for i in [1, 2]]\n axes[src] = dst\n axes[dst] = src\n else:\n axes = inputs[1]\n return _op.transform.transpose(data, axes)\n\n def flatten(self, inputs, input_types):\n data = inputs[0]\n start = int(inputs[1])\n end = int(inputs[2])\n dshape = get_const_tuple(self.infer_shape_with_prelude(data))\n ndim = len(dshape)\n if end < 0:\n end += ndim\n new_shape = [0] * start\n\n new_shape.append(-1)\n squeeze_axes = []\n for i in range(start + 1, end + 1):\n new_shape.append(1)\n squeeze_axes.append(i)\n for _ in range(end + 1, ndim):\n new_shape.append(0)\n out = _op.reshape(data, new_shape)\n if squeeze_axes:\n out = _op.squeeze(out, axis=squeeze_axes)\n return out\n\n def addmm(self, inputs, input_types):\n input_mat = inputs[0]\n mat1 = inputs[1]\n data_type = input_types[1]\n mat2 = inputs[2]\n\n beta = inputs[3]\n alpha = inputs[4]\n\n if not isinstance(alpha, _expr.Expr) and alpha != 1:\n alpha = _create_typed_const(alpha, data_type)\n mat1 *= alpha\n\n if not isinstance(beta, _expr.Expr) and beta != 1:\n beta = _create_typed_const(beta, data_type)\n mat2 *= beta\n\n transposed_mat2 = _op.transform.transpose(mat2, axes=[1, 0])\n\n units = self.infer_shape(transposed_mat2)[0]\n dense_out = _op.nn.dense(mat1, transposed_mat2, units=units)\n\n return dense_out + input_mat\n\n def size(self, inputs, input_types):\n shape = self.infer_shape_with_prelude(inputs[0])\n axis = None\n if len(inputs) > 1:\n axis = int(inputs[1])\n\n if any(map(lambda s: isinstance(s, tvm.tir.expr.Any), shape)):\n if axis is None or isinstance(shape[axis], tvm.tir.expr.Any):\n shape_dynamic = _op.shape_of(inputs[0], dtype=\"int32\")\n if axis is not None:\n return _op.take(shape_dynamic, _expr.const(axis), 0)\n return shape_dynamic\n\n if axis is not None:\n return _expr.const(shape[axis])\n return _expr.const(shape)\n\n def numtotensor(self, inputs, input_types):\n val = inputs[0]\n dtype = input_types[0]\n\n if isinstance(val, _expr.Expr):\n return val\n\n if isinstance(val, tvm.tir.IntImm):\n val = val.__int__()\n dtype = int\n\n arr = val * np.ones([]).astype(dtype)\n return arr\n\n def tensortonum(self, inputs, input_types):\n return inputs[0]\n\n def view(self, inputs, input_types):\n data = inputs[0]\n\n if len(inputs) == 3:\n shape_inp = [inputs[1], self.infer_shape(inputs[2])[0]]\n else:\n if isinstance(inputs[1], list):\n shape_inp = inputs[1]\n else:\n shape_inp = self.infer_shape(inputs[1])\n new_shape = shape_inp\n for i, shape in enumerate(shape_inp):\n if isinstance(shape, _expr.Expr):\n val = _infer_value_simulated(shape, {})\n new_shape[i] = val.numpy().item(0)\n\n return _op.transform.reshape(data, new_shape)\n\n def reshape(self, inputs, input_types):\n data = inputs[0]\n new_shape = inputs[1]\n\n tmp_shape = []\n is_dyn = False\n for s in new_shape:\n if isinstance(s, _expr.Constant):\n tmp_shape.append(int(s.data.numpy()))\n elif isinstance(s, _expr.Expr):\n dim, success = try_infer_value(s, lambda ret: int(ret))\n tmp_shape.append(dim)\n\n if not success:\n is_dyn = True\n else:\n tmp_shape.append(s)\n\n if is_dyn:\n new_shape = []\n for i, s in enumerate(tmp_shape):\n if not isinstance(s, _expr.Expr):\n s = _expr.const(s, \"int64\")\n else:\n s = _op.cast(s, \"int64\")\n new_shape.append(_op.expand_dims(s, axis=0))\n new_shape = _op.concatenate(new_shape, axis=0)\n else:\n new_shape = tmp_shape\n return _op.transform.reshape(data, new_shape)\n\n def pixel_shuffle(self, inputs, input_types):\n data = inputs[0]\n upscale_factor = inputs[1]\n upscale_squared = upscale_factor * upscale_factor\n b, c, h, w = self.infer_shape(data)\n assert (\n c % upscale_squared == 0\n ), \"input channel should be divisible by square of upscale_factor\"\n\n ndims = len(self.infer_shape_with_prelude(data))\n axes = list(range(ndims))\n num_inputs = len(inputs)\n oc = c // upscale_squared\n oh = h * upscale_factor\n ow = w * upscale_factor\n\n new_shape = [b, oc, upscale_factor, upscale_factor, h, w]\n out_shape = [b, oc, oh, ow]\n\n data = _op.transform.reshape(data, new_shape)\n # The data will be transposed to\n # [b, oc, h, upscale_factor, w, upscale_factor]\n # for further reshape\n axes = [0, 1, 4, 2, 5, 3]\n data = _op.transform.transpose(data, axes)\n return _op.transform.reshape(data, out_shape)\n\n def clone(self, inputs, input_types):\n data = inputs[0]\n return _op.tensor.copy(data)\n\n def log_softmax(self, inputs, input_types):\n data = inputs[0]\n axis = int(inputs[1])\n return _op.nn.log_softmax(data, axis)\n\n def sigmoid(self, inputs, input_types):\n data = inputs[0]\n return _op.tensor.sigmoid(data)\n\n def softplus(self, inputs, input_types):\n data = inputs[0]\n dtype = input_types[0]\n beta = _expr.const(float(inputs[1]), dtype=dtype)\n return _op.log(_op.exp(inputs[0] * beta) + _expr.const(1.0, dtype=dtype)) / beta\n\n def make_avg_pool(self, dim):\n def avg_pool(inputs, input_types):\n data = inputs[0]\n\n pool_size = self.convert_const_list(inputs[1])\n strides = self.convert_const_list(inputs[2] if inputs[2] else pool_size)\n padding = inputs[3]\n ceil_mode = int(inputs[4])\n count_include_pad = int(inputs[5])\n\n def func(x):\n if dim == 1:\n return _op.nn.avg_pool1d(\n x,\n pool_size=pool_size,\n strides=strides,\n padding=padding,\n dilation=(1,),\n ceil_mode=ceil_mode,\n count_include_pad=count_include_pad,\n )\n elif dim == 2:\n return _op.nn.avg_pool2d(\n x,\n pool_size=pool_size,\n strides=strides,\n padding=padding,\n dilation=(1, 1),\n ceil_mode=ceil_mode,\n count_include_pad=count_include_pad,\n )\n elif dim == 3:\n return _op.nn.avg_pool3d(\n x,\n pool_size=pool_size,\n strides=strides,\n padding=padding,\n dilation=(1, 1, 1),\n ceil_mode=ceil_mode,\n count_include_pad=count_include_pad,\n )\n else:\n msg = \"Average Pooling dimension should be between 1 and 3\"\n raise RuntimeError(msg)\n\n if self.is_quantized_tensor(data):\n return qnn_torch.apply_with_upcast(data, func)\n\n return func(data)\n\n return avg_pool\n\n def linear(self, inputs, input_types):\n # https://pytorch.org/docs/stable/nn.functional.html#linear\n # 0 - input\n # 1 - weight\n bias = inputs[2]\n a_shape = self.infer_shape_with_prelude(inputs[0])\n b_shape = self.infer_shape_with_prelude(inputs[1])\n if len(a_shape) == 2 and len(b_shape) == 2:\n mm_out = _op.nn.dense(inputs[0], inputs[1])\n elif len(b_shape) == 1:\n mm_out = self.matmul([inputs[0], inputs[1]], input_types[:2])\n else:\n mm_out = self.matmul(\n [inputs[0], _op.transpose(inputs[1], axes=(1, 0))], input_types[:2]\n )\n if isinstance(bias, _expr.Expr):\n bias_ndims = len(self.infer_shape_with_prelude(bias))\n if bias_ndims == 1:\n return _op.nn.bias_add(mm_out, bias, axis=-1)\n mm_dtype = self.infer_type_with_prelude(mm_out).dtype\n return self.add([mm_out, bias], [mm_dtype, input_types[2]])\n return mm_out\n\n def dropout(self, inputs, input_types):\n data = inputs[0]\n rate = float(inputs[1])\n\n return _op.nn.dropout(data, rate)\n\n def make_reduce(self, name):\n def reduce(inputs, input_types):\n data = inputs[0]\n axis = None\n keepdims = False\n\n if len(inputs) > 2: # default, torch have only data, axis=None, keepdims=False\n if isinstance(inputs[1], int):\n axis = int(inputs[1])\n elif _is_int_seq(inputs[1]):\n axis = inputs[1]\n else:\n axis = list(self.infer_shape(inputs[1]))\n keepdims = bool(inputs[2])\n\n return get_relay_op(name)(data, axis=axis, keepdims=keepdims)\n\n return reduce\n\n def norm(self, inputs, input_types):\n data = inputs[0]\n dtype = input_types[0]\n axis = None\n keepdims = False\n if len(inputs) > 3:\n axis = inputs[2]\n keepdims = bool(inputs[3])\n\n order = inputs[1]\n if order == np.inf:\n return _op.reduce.max(_op.abs(data), axis=axis, keepdims=keepdims)\n elif order == np.NINF:\n return _op.reduce.min(_op.abs(data), axis=axis, keepdims=keepdims)\n else:\n reci_order = _expr.const(1.0 / order, dtype=dtype)\n order = _expr.const(order)\n return _op.power(\n _op.reduce.sum(_op.power(_op.abs(data), order), axis=axis, keepdims=keepdims),\n reci_order,\n )\n\n def frobenius_norm(self, inputs, input_types):\n data = inputs[0]\n axis = None\n keepdims = False\n if len(inputs) > 2:\n axis = inputs[1] if len(inputs[1]) > 0 else None\n keepdims = bool(inputs[2])\n\n return _op.sqrt(_op.reduce.sum((data * data), axis=axis, keepdims=keepdims))\n\n def std(self, inputs, input_types):\n data = inputs[0]\n if len(inputs) == 2:\n axis = None\n keepdims = False\n unbiased = bool(inputs[1])\n else:\n axis = inputs[1]\n keepdims = bool(inputs[3])\n unbiased = bool(inputs[2])\n\n return _op.reduce.std(data, axis=axis, keepdims=keepdims, unbiased=unbiased)\n\n def variance(self, inputs, input_types):\n data = inputs[0]\n if len(inputs) == 2:\n axis = None\n keepdims = False\n unbiased = bool(inputs[1])\n else:\n axis = inputs[1]\n keepdims = bool(inputs[3])\n unbiased = bool(inputs[2])\n\n return _op.reduce.variance(data, axis=axis, keepdims=keepdims, unbiased=unbiased)\n\n def mean(self, inputs, input_types):\n data = inputs[0]\n\n if inputs[1]:\n axis = inputs[1]\n else:\n axis = None\n\n if len(inputs) > 2 and inputs[2]:\n keepdims = int(inputs[2])\n else:\n keepdims = False\n if len(inputs) > 3 and inputs[3]:\n exclude = int(inputs[3])\n else:\n exclude = False\n\n def func(x):\n return _op.mean(x, axis, keepdims, exclude)\n\n if self.is_quantized_tensor(data):\n assert len(inputs) == 6, \"Input quant param not found in op inputs\"\n input_scale = _expr.const(inputs[4])\n input_zero_point = _expr.const(inputs[5])\n return qnn_torch.quantized_mean(data, input_scale, input_zero_point, func)\n\n return func(data)\n\n def chunk(self, inputs, input_types):\n data = inputs[0]\n\n num_chunks = int(inputs[1])\n axis = int(inputs[2])\n\n if isinstance(data, _expr.Expr):\n inferred_shape = self.infer_shape_with_prelude(data)\n\n shape = []\n for infer in inferred_shape:\n shape.append(infer)\n\n dim = int(shape[axis])\n\n if dim % num_chunks:\n unif_size = int(dim / (num_chunks - 1))\n else:\n unif_size = int(dim / num_chunks)\n\n indeces = []\n for i in range(unif_size, dim, unif_size):\n indeces.append(i)\n\n return _op.split(data, indeces, axis)\n\n def matmul(self, inputs, input_types):\n\n inputs_0 = inputs[0]\n inputs_1 = inputs[1]\n\n # Need to check input shape as batch matmul must be supported.\n a_shape = self.infer_shape_with_prelude(inputs_0)\n b_shape = self.infer_shape_with_prelude(inputs_1)\n\n # When performing a batch matmul, we need to properly handle N-dim shapes.\n if len(a_shape) > 2 and len(b_shape) > 2:\n # Convert a into a 3 dimensional tensors.\n need_reshape_output = False\n if len(a_shape) != 3:\n a = _op.reshape(inputs_0, [-1, a_shape[-2], a_shape[-1]])\n need_reshape_output = True\n else:\n a = inputs_0\n\n # Transpose matrix dimensions of b.\n trans_axes = list(range(len(b_shape)))\n trans_axes[-2], trans_axes[-1] = trans_axes[-1], trans_axes[-2]\n b = _op.transpose(inputs_1, trans_axes)\n\n # Convert b into a 3 dimensional tensor. Note that the last two dimensions\n # are transposed.\n if len(b_shape) != 3:\n b = _op.reshape(b, [-1, b_shape[-1], b_shape[-2]])\n\n # Perform a batch matmul.\n output = _op.nn.batch_matmul(a, b)\n\n # Reshape output to original dimensions.\n if need_reshape_output:\n return _op.reshape(output, [*a_shape[:-2], a_shape[-2], b_shape[-1]])\n return output\n elif len(a_shape) > 2:\n inputs_0 = _op.reshape(inputs_0, [-1, a_shape[-1]])\n\n if len(b_shape) > 2:\n trans_axes = list(range(len(b_shape)))\n trans_axes[-2], trans_axes[-1] = trans_axes[-1], trans_axes[-2]\n input_1 = _op.reshape(_op.transpose(inputs_1, trans_axes), [-1, b_shape[-2]])\n elif len(b_shape) == 2:\n input_1 = _op.transpose(inputs_1, axes=(1, 0))\n elif len(b_shape) == 1:\n input_1 = _op.expand_dims(inputs_1, 0, 1)\n\n out = _op.nn.dense(inputs_0, input_1)\n\n if len(b_shape) == 1:\n out = _op.squeeze(out, axis=[-1])\n\n # Reshape output into a N dimensional tensor when a or b dim > 2\n if len(a_shape) > 2:\n out = _op.reshape(out, [*a_shape[:-1], b_shape[-1]])\n elif len(b_shape) > 2:\n out = _op.reshape(out, [a_shape[-2], -1, b_shape[-1]])\n out = _op.reshape(\n _op.transpose(out, [1, 0, 2]), [*b_shape[:-2], a_shape[-2], b_shape[-1]]\n )\n\n return out\n\n def expand(self, inputs, input_types):\n data_in = inputs[0]\n shape = list(self.infer_shape(data_in))\n\n ndims = len(shape)\n sizes = inputs[1]\n out = data_in\n\n out_dims = len(sizes)\n if ndims < out_dims:\n num_newaxis = out_dims - ndims\n out = _op.expand_dims(out, axis=0, num_newaxis=num_newaxis)\n shape = [1] * num_newaxis + shape\n\n for i in range(out_dims):\n if sizes[i] != -1 and shape[i] == 1:\n if not isinstance(sizes[i], int):\n sizes[i] = int(_infer_value(sizes[i], {}).numpy())\n out = _op.repeat(out, sizes[i], axis=i)\n\n return out\n\n def int(self, inputs, input_types):\n if isinstance(inputs[0], _expr.Expr):\n return inputs[0]\n return int(inputs[0])\n\n def identity(self, inputs, input_types):\n return inputs[0]\n\n def none(self, inputs, input_types):\n return None\n\n def make_pad(self, mode):\n def pad(inputs, input_types):\n data = inputs[0]\n if isinstance(inputs[1], list):\n pad_list = inputs[1]\n else:\n pad_list = list(self.infer_shape(inputs[1]))\n\n # initialize paddings based on input len\n pad_len = len(self.infer_shape(data)) * 2\n paddings = [0] * pad_len\n\n if len(pad_list) >= 2:\n paddings[-1] = pad_list[1]\n paddings[-2] = pad_list[0]\n if len(pad_list) >= 4:\n paddings[-3] = pad_list[3]\n paddings[-4] = pad_list[2]\n if len(pad_list) >= 6:\n paddings[-5] = pad_list[5]\n paddings[-6] = pad_list[4]\n\n # group into tuple of 2 ints\n paddings = [paddings[i : i + 2] for i in range(0, len(paddings), 2)]\n\n const_paddings = []\n non_zero_found = False\n for pad in paddings:\n const_paddings.append([])\n for p in pad:\n if not isinstance(p, int):\n p = int(_infer_value(p, {}).numpy())\n const_paddings[-1].append(p)\n if p != 0:\n non_zero_found = True\n\n if not non_zero_found:\n return data\n elif mode == \"constant\":\n return _op.nn.pad(data, const_paddings, pad_value=inputs[2], pad_mode=mode)\n else:\n return _op.nn.pad(data, const_paddings, pad_mode=mode)\n\n return pad\n\n def clamp(self, inputs, input_types):\n data = inputs[0]\n\n def get_v(v, default_v):\n if isinstance(v, _expr.Constant):\n return float(v.data.numpy())\n if isinstance(v, _expr.Expr):\n infer_v, success = try_infer_value(v, lambda ret: float(ret))\n if success:\n return infer_v\n if v is not None:\n return v\n return default_v\n\n amin = get_v(inputs[1], np.finfo(np.float32).min)\n amax = get_v(inputs[2], np.finfo(np.float32).max)\n return _op.clip(data, amin, amax)\n\n def to(self, inputs, input_types):\n data = inputs[0]\n dtype = inputs[1] if inputs[1] is not None and not isinstance(inputs[1], str) else inputs[2]\n # special handling for aten::to(data, 6, _, _, _) case\n # 6 means dtype = float\n # this happens when converting upsampling with scale factor\n cast_map = {\n 5: \"float16\",\n 6: \"float32\",\n 7: \"float64\",\n 3: \"int32\",\n 4: \"int64\",\n }\n\n cast_func = {5: float, 6: float, 7: float, 3: int, 4: int}\n\n ret = data\n if isinstance(data, _expr.Expr):\n actual_dtype = str(self.infer_type(data).dtype)\n if dtype in cast_map and cast_map[dtype] != actual_dtype:\n ret = _op.cast(data, cast_map[dtype])\n elif dtype in cast_map:\n ret = cast_func[dtype](data)\n\n return ret\n\n def get_upsample_out_size(self, inputs, method):\n # This assumes a static shape\n out_size = []\n if inputs[1] is not None:\n for size in inputs[1]:\n if not isinstance(size, int):\n out_size.append(int(_infer_value(size, {}).numpy()))\n else:\n out_size.append(size)\n else:\n scale_index = 3 if method != \"nearest_neighbor\" else 2\n scales = inputs[scale_index]\n assert scales is not None, \"neither out size nor scale provided\"\n assert isinstance(scales, list)\n ishape = self.infer_shape(inputs[0])\n for i, scale in enumerate(scales):\n out_size.append(int(math.floor(float(ishape[2 + i]) * scale)))\n\n return out_size\n\n def make_upsample(self, method):\n def upsample(inputs, input_types):\n data = inputs[0]\n out_size = self.get_upsample_out_size(inputs, method)\n\n if len(inputs) > 2 and method != \"nearest_neighbor\":\n align_corners = inputs[2]\n else:\n align_corners = False\n\n if method == \"nearest_neighbor\":\n coord_trans = \"asymmetric\"\n elif align_corners:\n coord_trans = \"align_corners\"\n else:\n coord_trans = \"half_pixel\"\n\n def func(x):\n return _op.image.resize2d(\n x, out_size, None, \"NCHW\", method, coord_trans, cubic_alpha=-0.75\n )\n\n if self.is_quantized_tensor(data):\n # input qparams are manually appended by us\n assert isinstance(inputs[-2], float)\n assert isinstance(inputs[-1], int)\n input_scale = _expr.const(inputs[-2])\n input_zero_point = _expr.const(inputs[-1])\n return qnn_torch.quantized_upsample(data, input_scale, input_zero_point, func)\n\n return func(data)\n\n return upsample\n\n def make_upsample3d(self, method):\n def upsample3d(inputs, input_types):\n data = inputs[0]\n out_size = self.get_upsample_out_size(inputs, method)\n\n if len(inputs) > 2 and method == \"linear\":\n align_corners = inputs[2]\n else:\n align_corners = False\n\n if method == \"nearest_neighbor\":\n coord_trans = \"asymmetric\"\n elif align_corners:\n coord_trans = \"align_corners\"\n else:\n coord_trans = \"half_pixel\"\n\n return _op.image.resize3d(data, out_size, None, \"NCDHW\", method, coord_trans)\n\n return upsample3d\n\n def expand_as(self, inputs, input_types):\n target = inputs[1]\n t0 = self.infer_type(inputs[0]).dtype\n t1 = self.infer_type(inputs[1]).dtype\n if str(t0) != str(t1):\n target = _op.cast(target, t0)\n return _op.broadcast_to_like(inputs[0], target)\n\n def Bool(self, inputs, input_types):\n assert len(inputs) == 1\n return inputs[0]\n\n def Float(self, inputs, input_types):\n assert len(inputs) == 1\n return _op.cast(inputs[0], \"float32\")\n\n def bitwise_not(self, inputs, input_types):\n data = inputs[0]\n # The input tensor must be of integral or Boolean types.\n # For bool tensors, it computes the logical NOT\n if input_types[0] == \"bool\":\n out = _op.logical_not(_op.cast(data, \"bool\"))\n else:\n out = _op.bitwise_not(_op.cast(data, \"int\"))\n\n return out\n\n def bitwise_xor(self, inputs, input_types):\n lhs = inputs[0]\n rhs = inputs[1]\n lhs = _op.cast(lhs, \"bool\") if input_types[0] == \"bool\" else _op.cast(lhs, \"int\")\n rhs = _op.cast(rhs, \"bool\") if input_types[1] == \"bool\" else _op.cast(rhs, \"int\")\n\n return _op.bitwise_xor(lhs, rhs)\n\n def logical_not(self, inputs, input_types):\n data = _wrap_const(inputs[0])\n return _op.logical_not(_op.cast(data, \"bool\"))\n\n def logical_xor(self, inputs, input_types):\n lhs = _op.cast(inputs[0], \"bool\")\n rhs = _op.cast(inputs[1], \"bool\")\n\n return _op.logical_xor(lhs, rhs)\n\n def list_getitem(self, inputs, input_types):\n return self.prelude.nth(inputs[0], _wrap_const(inputs[1]))\n\n def list_len(self, inputs, input_types):\n return self.prelude.length(inputs[0])\n\n def type_as(self, inputs, input_types):\n assert len(inputs) == 2\n assert len(input_types) == 2\n return _op.cast(inputs[0], input_types[1])\n\n def gather(self, inputs, input_types):\n data = inputs[0]\n axis = inputs[1]\n indices = inputs[2]\n\n return _op.gather(data, axis, indices)\n\n def add(self, inputs, input_types):\n # add_ is overloaded for tensor add and list concat\n if input_types[0] == \"ListType\":\n return self.prelude.concat(inputs[0], inputs[1])\n return self.make_elemwise(\"add\")(inputs, input_types)\n\n def tensor_array_stack(self, inputs, input_types):\n dim = inputs[1]\n assert dim == 0, \"stacking on a dynamic tensor list only supported on a first axis\"\n tensor_array, shape = self.convert_to_tensor_array(inputs[0])\n\n stacked_shape = (Any(),) + shape\n stack = self.prelude.get_global_var_static(\"tensor_array_stack\", \"float32\", shape)\n stacked = stack(tensor_array)\n\n static_tensor_array_ops = StaticTensorArrayOps(self.prelude, \"float32\", stacked_shape)\n static_tensor_array_ops.register()\n get_tensor = self.prelude.get_global_var_static(\"tensor_get_data\", \"float32\", stacked_shape)\n return get_tensor(stacked)\n\n def stack(self, inputs, input_types):\n if isinstance(inputs[0], list):\n # a static python list of tensors\n dim = inputs[1]\n return _op.stack(inputs[0], dim)\n else:\n # List ADT case\n assert isinstance(inputs[0], _expr.Expr)\n ty = self.infer_type_with_prelude(inputs[0])\n list_ty = self.prelude.mod.get_global_type_var(\"List\")\n msg = \"The input list is expected to be List ADT\"\n assert isinstance(ty, tvm.ir.TypeCall) and ty.func == list_ty, msg\n return self.tensor_array_stack(inputs, input_types)\n\n def rsub(self, inputs, input_types):\n data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])\n\n # TODO (t-vi): should this also be part of the type promotion?\n alpha = _expr.const(float(inputs[2]))\n\n # note: rsub means data0 and data1 swap places\n return get_relay_op(\"subtract\")(data1, alpha * data0)\n\n def embedding(self, inputs, input_types):\n weight = inputs[0]\n indices = inputs[1]\n\n return _op.take(weight, indices.astype(\"int32\"), axis=0)\n\n def one_hot(self, inputs, input_types):\n indices = inputs[0].astype(\"int32\")\n num_classes = inputs[1]\n if num_classes == -1:\n msg = \"Inferring the number of classes is not yet supported.\"\n raise NotImplementedError(msg)\n\n dtype = \"int32\"\n on_value = tvm.relay.const(1.0, dtype)\n off_value = tvm.relay.const(0.0, dtype)\n\n return _op.one_hot(indices, on_value, off_value, num_classes, -1, dtype)\n\n def index(self, inputs, input_types):\n data = inputs[0]\n indices = inputs[1]\n return _op.adv_index([data] + indices)\n\n def meshgrid(self, inputs, input_types):\n data = inputs[0]\n return _op.meshgrid(data, indexing=\"ij\")\n\n def nms(self, inputs, input_types):\n boxes = inputs[0]\n scores = inputs[1]\n iou_threshold = inputs[2]\n\n # TVM NMS assumes score > 0\n scores = scores - _op.min(scores) + _op.const(1.0)\n\n num_boxes = _op.shape_of(scores)\n # PyTorch NMS doesn't have score_threshold, so no need to run get_valid_count\n indices = _op.transform.arange(_op.squeeze(num_boxes), dtype=\"int32\")\n indices = _op.expand_dims(indices, 0, 1)\n\n # Generate data with shape (1, num_anchors, 5)\n scores = AttrCvt(op_name=\"expand_dims\", extras={\"axis\": -1, \"num_newaxis\": 1})([scores], {})\n data = _op.concatenate([scores, boxes], -1)\n data = _op.expand_dims(data, 0, 1)\n\n # Perform Non-Maximum Suppression,\n # PyTorch NMS doesn't have parameter top_k and max_output_size\n score_index = 0\n top_k = max_out_size = -1\n nms_ret = get_relay_op(\"non_max_suppression\")(\n data=data,\n valid_count=num_boxes,\n indices=indices,\n max_output_size=max_out_size,\n iou_threshold=iou_threshold,\n force_suppress=True,\n top_k=top_k,\n coord_start=1,\n score_index=score_index,\n id_index=-1,\n return_indices=True,\n invalid_to_bottom=False,\n )\n\n # squeeze the two outputs of nms for strided_slice\n size = get_relay_op(\"squeeze\")(nms_ret[1], axis=[1])\n data_slice = get_relay_op(\"squeeze\")(nms_ret[0], axis=[0])\n\n # strided slice to get the dynamic result\n ret = get_relay_op(\"strided_slice\")(\n data_slice, begin=_expr.const([0]), end=size, slice_mode=\"size\"\n )\n # in torchvision, indices from nms are int64\n return _op.cast(ret, \"int64\")\n\n def logsumexp(self, inputs, input_types):\n data = self.pytorch_promote_types(inputs[:1], input_types[:1])\n dim_list = inputs[1]\n keepdim = inputs[2] if len(inputs) > 2 else False\n # dim is output of prim::ListConstruct, even if it is int in python code\n assert isinstance(dim_list, list), \"dim is expected to be a list\"\n return _op.logsumexp(data[0], axis=dim_list, keepdims=keepdim)\n\n def roi_align(self, inputs, input_types):\n data = inputs[0]\n boxes = inputs[1]\n\n output_size = (inputs[3], inputs[4])\n spatial_scale = inputs[2]\n sample_ratio = inputs[5]\n aligned = False if len(inputs) < 7 else inputs[6]\n\n if aligned:\n boxes -= _expr.const(0.5 / spatial_scale)\n\n return _op.vision.roi_align(data, boxes, output_size, spatial_scale, sample_ratio)\n\n def deform_conv2d(self, inputs, input_types):\n data = inputs[0]\n weight = inputs[1]\n offset = inputs[2]\n\n if len(inputs) > 12:\n strides_offset = 5\n bias = inputs[4]\n logging.warning(\"mask argument in deformable conv2d is not supported and ignored\")\n else:\n strides_offset = 4\n bias = inputs[3]\n\n strides = (inputs[strides_offset], inputs[strides_offset + 1])\n padding = (inputs[strides_offset + 2], inputs[strides_offset + 3])\n dilation = (inputs[strides_offset + 4], inputs[strides_offset + 5])\n groups = inputs[strides_offset + 6]\n deformable_groups = inputs[strides_offset + 7]\n weight_shape = self.infer_shape(weight)\n output_channels = weight_shape[0]\n kernel_size = (weight_shape[2], weight_shape[3])\n\n conv_out = _op.nn.deformable_conv2d(\n data,\n offset,\n weight,\n strides,\n padding,\n dilation,\n deformable_groups,\n groups,\n output_channels,\n kernel_size,\n )\n\n return _op.nn.bias_add(conv_out, bias)\n\n def unbind(self, inputs, input_types):\n data = inputs[0]\n axis = int(inputs[1])\n return unbind(data, axis)\n\n def shape_as_tensor(self, inputs, input_types):\n is_symbolic_shape = False\n input_shape = self.infer_shape(inputs[0], self.prelude.mod)\n for axis in input_shape:\n if not isinstance(axis, (int, tvm.tir.IntImm)):\n is_symbolic_shape = True\n break\n\n if is_symbolic_shape:\n ret = _op.shape_of(inputs[0], dtype=\"int64\")\n else:\n ret = _expr.const(np.array(input_shape), dtype=\"int64\")\n\n return ret\n\n def logical_and(self, inputs, input_types):\n lhs = _op.cast(inputs[0], \"bool\")\n rhs = _op.cast(inputs[1], \"bool\")\n\n return _op.logical_and(lhs, rhs)\n\n def nonzero(self, inputs, input_types, is_numpy_style=False):\n data = inputs[0]\n ret = _op.transform.argwhere(data)\n if is_numpy_style or (len(inputs) > 1 and inputs[1]):\n return unbind(ret, 1)\n return ret\n\n def nonzero_numpy(self, inputs, input_types):\n return self.nonzero(inputs, input_types, is_numpy_style=False)\n\n def scatter(self, inputs, input_types):\n data = inputs[0]\n axis = int(inputs[1])\n index = inputs[2]\n src = inputs[3]\n return _op.transform.scatter(data, index, src, axis)\n\n def index_put(self, inputs, input_types):\n in_tensor = inputs[0]\n indices = inputs[1]\n values = inputs[2]\n accumulate = inputs[3]\n if not accumulate:\n mode = \"update\"\n else:\n mode = \"add\"\n # Combine array of index tensors into one index tensor with shape (N,_)\n index_tensor = _op.stack(indices, axis=0)\n return _op.transform.scatter_nd(in_tensor, index_tensor, values, mode)\n\n def scalar_tensor(self, inputs, input_types):\n data = inputs[0]\n cast_map = {\n 6: \"float32\",\n 7: \"float64\",\n 3: \"int32\",\n 4: \"int64\",\n }\n type_key = inputs[1]\n if isinstance(data, _expr.Constant):\n data = data.data.numpy().tolist()\n return _expr.const(data, cast_map[type_key])\n\n def interpolate(self, inputs, input_types):\n if isinstance(inputs[1], _expr.Expr):\n out_size = inputs[1]\n elif isinstance(inputs[1], list):\n out_size = []\n for i in [0, 1]:\n size, _ = try_infer_value(\n inputs[1][i],\n lambda ret: ret.astype(np.int),\n lambda: _op.expand_dims(inputs[1][i], axis=0),\n )\n out_size.append(size)\n out_size = _op.concatenate(out_size, axis=0)\n\n data = inputs[0]\n align_corners = inputs[4]\n method = inputs[3]\n if method.startswith(\"nearest\"):\n method = \"nearest_neighbor\"\n elif method[0:2] == \"bi\":\n method = method[2:]\n\n if method == \"nearest_neighbor\":\n coord_trans = \"asymmetric\"\n elif align_corners:\n coord_trans = \"align_corners\"\n else:\n coord_trans = \"half_pixel\"\n\n return _op.image.resize2d(\n data, out_size, None, \"NCHW\", method, coord_trans, cubic_alpha=-0.75\n )\n\n def numel(self, inputs, input_types):\n return _op.ndarray_size(inputs[0])\n\n def empty(self, inputs, input_types):\n shape = inputs[0]\n return _op.zeros(shape, _convert_dtype_value(inputs[1]))\n\n def bincount(self, inputs, input_types):\n data = inputs[0]\n weights = inputs[1]\n input_type = self.infer_type(data).dtype\n if input_type == \"int64\":\n logger.warning(\n \"Casting an int64 input to int32, since we do not have int64 atomic add\"\n \"needed for bincount yet.\"\n )\n data = _op.cast(data, \"int32\")\n maximum = _op.max(data)\n dim = maximum + _expr.const(1, dtype=\"int32\")\n if weights:\n weight_type = self.infer_type(weights)\n out_dtype = weight_type.dtype\n updates = weights\n else:\n out_dtype = \"int32\"\n updates = _op.ones_like(data)\n\n counts = _op.zeros(_op.reshape(dim, [1]), out_dtype)\n out = _op.scatter_add(counts, data, updates, axis=0)\n if input_type == \"int32\":\n # Torch always outputs int64 results for bincount\n return _op.cast(out, \"int64\")\n return out\n\n def scatter_add(self, inputs, input_types):\n data = inputs[0]\n axis = inputs[1]\n index = inputs[2]\n src = inputs[3]\n return _op.scatter_add(data, index, src, axis=axis)\n\n def cumsum(self, inputs, input_types):\n data = inputs[0]\n dim = inputs[1]\n dtype = inputs[2]\n\n if inputs[2] is not None:\n dtype = _convert_dtype_value(inputs[2])\n\n return _op.cumsum(data, axis=dim, dtype=dtype)\n\n def masked_fill(self, inputs, input_types):\n mask = inputs[1]\n value = _op.cast(_wrap_const(inputs[2]), input_types[0])\n return _op.where(mask, value, inputs[0])\n\n def masked_select(self, inputs, input_types):\n mask = inputs[1]\n indices = self.nonzero([mask], input_types, is_numpy_style=True)\n return _op.adv_index([inputs[0]] + [indices[i] for i in range(indices.size)])\n\n def sort(self, inputs, input_types):\n data = inputs[0]\n dim = inputs[1]\n is_descending = inputs[2]\n # pytorch sort returns both sorted indices and values\n indices = _op.argsort(data, dim, not is_descending)\n return _op.gather(data, dim, indices), indices\n\n def argsort(self, inputs, input_types):\n data = inputs[0]\n dim = inputs[1]\n is_descending = inputs[2]\n return _op.argsort(data, dim, not is_descending)\n\n def is_floating_point(self, inputs, input_types):\n assert len(inputs) == 1\n\n if isinstance(inputs[0], _expr.Expr):\n input_type = self.infer_type(inputs[0]).dtype\n else:\n input_type = input_types[0]\n\n is_float = input_type in [\"float32\", \"float64\", \"float16\", \"bfloat16\"]\n return _expr.const(is_float)\n\n def unique(self, inputs, input_types):\n assert len(inputs) == 4\n [data, is_sorted, return_inverse, return_counts] = inputs\n if not is_sorted:\n logger.warning(\"TVM always assumes sorted=True for torch.unique\")\n is_sorted = True\n if return_counts:\n [unique, indices, inverse_indices, num_uniq, counts] = _op.unique(\n data, is_sorted=is_sorted, return_counts=True\n )\n unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode=\"size\")\n counts_sliced = _op.strided_slice(counts, begin=[0], end=num_uniq, slice_mode=\"size\")\n return (unique_sliced, inverse_indices, counts_sliced)\n else:\n [unique, indices, inverse_indices, num_uniq] = _op.unique(\n data, is_sorted=is_sorted, return_counts=False\n )\n unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode=\"size\")\n return (unique_sliced, inverse_indices)\n\n def nll_loss(self, inputs, input_types):\n assert len(inputs) == 5\n [predictions, targets, weights, reduction, ignore_index] = inputs\n num_class = self.infer_shape(predictions)[1]\n if reduction == 0:\n reduction = \"none\"\n elif reduction == 1:\n reduction = \"mean\"\n else:\n reduction = \"sum\"\n if weights is None:\n weights = _op.full(_expr.const(1), (num_class,), dtype=input_types[0])\n return _op.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)\n\n def flip(self, inputs, input_types):\n data = inputs[0]\n axis = inputs[1]\n return _op.transform.reverse(data, axis=axis[0])\n\n def bidir_gru_cell(\n self,\n input_seqs,\n weights_dicts,\n ):\n \"\"\"\n Bidirectional GRU cell\n \"\"\"\n seq_len = len(input_seqs)\n forward_outputs, fw_H_t = gru_cell(\n input_seqs,\n **weights_dicts[0],\n )\n\n reverse_outputs, rev_H_t = gru_cell(\n input_seqs,\n **weights_dicts[1],\n backwards=True,\n )\n\n final_outputs = []\n for i in range(seq_len):\n final_outputs.append(\n _op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1)\n )\n\n return final_outputs, _op.stack([fw_H_t, rev_H_t], axis=0)\n\n def gru_layers(self, input_data, layer_weights_dicts, bidirectional, dropout_p=0.0):\n \"\"\"\n Methods iterates layers for Stacked GRU\n \"\"\"\n layers_num = len(layer_weights_dicts)\n # split input sequence to samples set\n input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)]\n output_hiddens = []\n for i in range(layers_num):\n weights_dicts = layer_weights_dicts[i]\n # input_seqs shape = [seq_num, (batch, feature_size)] or\n # [seq_num, (batch, 2*feature_size)] for bidirectional\n if bidirectional:\n input_seqs, H_t = self.bidir_gru_cell(input_seqs, weights_dicts)\n else:\n input_seqs, H_t = gru_cell(input_seqs, **weights_dicts[0])\n\n output_hiddens.append(H_t)\n\n # TODO (vvchernov): in pytorch implementation train is also checked\n # see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339\n # /aten/src/ATen/native/RNN.cpp#L1054\n if dropout_p != 0 and i < layers_num - 1:\n # for input in input_seqs:\n # input = _op.dropout(input, dropout_p)\n raise NotImplementedError(\"Dropout for GRU has not been supported yet!\")\n\n return _op.stack(input_seqs, 0), _op.stack(output_hiddens, 0)\n\n def gru(self, inputs, input_types):\n \"\"\"\n Description of GRU in pytorch:\n https://pytorch.org/docs/stable/generated/torch.nn.GRU.html?highlight=gru#torch.nn.GRU\n \"\"\"\n # TODO (vvchernov): support dropout\n assert len(inputs) == 9, \"Input of size 9 is expected\"\n # Unpack inputs, note that if optional and not provided then value will be None.\n _X = inputs[0]\n # _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size)\n\n hidden_state = inputs[1]\n # Hidden state shape (hidden_layers_num, batch, hidden_size)\n\n _weights = inputs[2]\n # Wi layer[0] shape (3 * hidden_size, feature_size)\n # Wh layer[0] shape (3 * hidden_size, hidden_size)\n # Bi layer[0] shape (3 * hidden_size)\n # Bh layer[0] shape (3 * hidden_size)\n\n # Wi layer[>0] shape (3 * hidden_size, hidden_size * num_directions)\n # Wh layer[>0] shape (3 * hidden_size, hidden_size)\n # Bi layer[>0] shape (3 * hidden_size)\n # Bh layer[>0] shape (3 * hidden_size)\n\n # Scalar inputs\n has_biases = inputs[3]\n num_layers = inputs[4]\n dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout\n # train = inputs[6]\n bidirectional = inputs[7]\n batch_first = inputs[8]\n\n num_directions = 1\n if bidirectional:\n num_directions = 2\n\n rsd = len(_weights) % num_layers\n assert rsd == 0, \"The number of weights must be a multiple of the number of layers!\"\n rsd = (len(_weights) / num_layers) % num_directions\n assert (\n rsd == 0\n ), \"The number of weights in layer must be a multiple of the number of directions!\"\n\n weights_num = int(len(_weights) / num_layers / num_directions)\n if has_biases:\n assert weights_num == 4, \"The weights number in layer is expected equal to 4\"\n else:\n assert weights_num == 2, \"The weights number in layer is expected equal to 2\"\n\n X = _op.transpose(_X, (1, 0, 2)) if batch_first else _X\n # TODO (vvchernov): Which data type should be used? from input or weights?\n # Instead of it _infer_type(X).checked_type.dtype can be used\n X_dtype = input_types[0]\n X_shape = _infer_shape(X) # (seq_num, batch, feature_size)\n\n hidden_size = int(_infer_shape(_weights[0])[0] / 3)\n batch_size = X_shape[1]\n\n # Initialize hidden states if not provided.\n layers_h = []\n hidden_layers_num = num_directions * num_layers\n if hidden_state is None:\n h_0 = _op.zeros((batch_size, hidden_size), X_dtype)\n for i in range(hidden_layers_num):\n layers_h.append(h_0)\n else:\n layers_h = unbind(hidden_state, 0)\n\n layer_weights_dicts = []\n k = 0 # layer counter\n if has_biases:\n names = [\"hidden_state\", \"w_inp\", \"w_hid\", \"b_inp\", \"b_hid\"]\n if bidirectional:\n rsd = len(_weights) % (2 * weights_num)\n assert rsd == 0, \"got an incorrect number of GRU weights\"\n for i in range(0, len(_weights), 2 * weights_num):\n fw_tensors = [layers_h[2 * k], *_weights[i : i + 4]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n j = i + weights_num\n rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 4]]\n rev_weights_dict = dict(zip(names, rev_tensors))\n layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])\n k += 1\n else:\n assert len(_weights) % weights_num == 0, \"got an incorrect number of GRU weights\"\n for i in range(0, len(_weights), weights_num):\n fw_tensors = [layers_h[k], *_weights[i : i + 4]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n layer_weights_dicts.append([fw_weights_dict])\n k += 1\n else:\n names = [\"hidden_state\", \"w_inp\", \"w_hid\"]\n if bidirectional:\n rsd = len(_weights) % (2 * weights_num)\n assert rsd == 0, \"got an incorrect number of GRU weights\"\n for i in range(0, len(_weights), 2 * weights_num):\n fw_tensors = [layers_h[2 * k], *_weights[i : i + 2]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n j = i + weights_num\n rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 2]]\n rev_weights_dict = dict(zip(names, rev_tensors))\n layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])\n k += 1\n else:\n assert len(_weights) % weights_num == 0, \"got an incorrect number of GRU weights\"\n for i in range(0, len(_weights), weights_num):\n fw_tensors = [layers_h[k], *_weights[i : i + 2]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n layer_weights_dicts.append([fw_weights_dict])\n k += 1\n assert (\n len(layer_weights_dicts) == num_layers and k == num_layers\n ), \"For stacked GRU number of weights sets should be the same as number of layers!\"\n\n output, out_hidden_state = self.gru_layers(\n X,\n layer_weights_dicts,\n bidirectional,\n dropout_p=dropout_p,\n )\n\n # output shape = (seq_num, batch, hidden_size) or\n # (seq_num, batch, 2*feature_size) for bidirectional\n if batch_first:\n output = _op.transpose(output, (1, 0, 2))\n\n return (output, out_hidden_state)\n\n def bidir_lstm_cell(\n self,\n input_seqs,\n weights_dicts,\n ):\n \"\"\"\n Bidirectional LSTM cell\n \"\"\"\n seq_len = len(input_seqs)\n forward_outputs, fw_H_t, fw_C_t = lstm_cell(\n input_seqs,\n **weights_dicts[0],\n )\n\n reverse_outputs, rev_H_t, rev_C_t = lstm_cell(\n input_seqs,\n **weights_dicts[1],\n backwards=True,\n )\n\n final_outputs = []\n for i in range(seq_len):\n final_outputs.append(\n _op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1)\n )\n\n return final_outputs, (fw_H_t, fw_C_t), (rev_H_t, rev_C_t)\n\n def lstm_layers(self, input_data, layer_weights_dicts, bidirectional, dtype, dropout_p=0.0):\n \"\"\"\n Methods iterates layers for Stacked LSTM\n \"\"\"\n layers_num = len(layer_weights_dicts)\n # split input sequence to samples set\n input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)]\n output_hiddens = []\n for i in range(layers_num):\n weights_dicts = layer_weights_dicts[i]\n # input_seqs shape = [seq_num, (batch, feature_size)] or\n # [seq_num, (batch, 2*feature_size)] for bidirectional\n if bidirectional:\n input_seqs, H_t, C_t = self.bidir_lstm_cell(input_seqs, weights_dicts)\n else:\n input_seqs, H_t, C_t = lstm_cell(input_seqs, **weights_dicts[0])\n\n output_hiddens.append((H_t, C_t))\n\n # TODO (vvchernov): in pytorch implementation train is also checked\n # see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339\n # /aten/src/ATen/native/RNN.cpp#L1054\n if dropout_p != 0 and i < layers_num - 1:\n # for input in input_seqs:\n # input = _op.dropout(input, dropout_p)\n raise NotImplementedError(\"Dropout for LSTM has not been supported yet!\")\n final_hiddens = []\n if bidirectional:\n for output_hidden in output_hiddens:\n final_hiddens.append(output_hidden[0])\n final_hiddens.append(output_hidden[1])\n else:\n final_hiddens = output_hiddens\n\n return _op.stack(input_seqs, 0), final_hiddens\n\n def lstm(self, inputs, input_types):\n \"\"\"\n Description of LSTM in pytorch:https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html\n Native implementation for torch version less than 1.8.0 (projection is unsupported):\n https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339/aten/ \\\n src/ATen/native/RNN.cpp#L1396\n Native implementation for torch version from 1.8.0 and higher (projection is supported):\n https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/RNN.cpp#L1483\n \"\"\"\n # TODO (vvchernov): support dropout\n assert len(inputs) == 9, \"Input of size 9 is expected\"\n # Unpack inputs, note that if optional and not provided then value will be None.\n _X = inputs[0]\n # _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size)\n\n hidden_states = inputs[1]\n assert len(hidden_states) == 2, \"lstm expects two hidden states\"\n h_0 = hidden_states[0]\n c_0 = hidden_states[1]\n # H0 shape (hidden_layers_num, batch, proj_size) if projection\n # else (hidden_layers_num, batch, hidden_size)\n # C0 shape (hidden_layers_num, batch, hidden_size)\n\n _weights = inputs[2]\n # If no projection\n # Wi layer[0] shape (4 * hidden_size, feature_size)\n # Wh layer[0] shape (4 * hidden_size, hidden_size)\n # Bi layer[0] shape (4 * hidden_size)\n # Bh layer[0] shape (4 * hidden_size)\n\n # Wi layer[>0] shape (4 * hidden_size, hidden_size * num_directions)\n # Wh layer[>0] shape (4 * hidden_size, hidden_size)\n # Bi layer[>0] shape (4 * hidden_size)\n # Bh layer[>0] shape (4 * hidden_size)\n\n # If projection\n # Wi layer[0] shape (4 * hidden_size, feature_size)\n # Wh layer[0] shape (4 * hidden_size, proj_size)\n # Bi layer[0] shape (4 * hidden_size)\n # Bh layer[0] shape (4 * hidden_size)\n # P layer[0] shape (proj_size, hidden_size)\n\n # Wi layer[>0] shape (4 * hidden_size, proj_size * num_directions)\n # Wh layer[>0] shape (4 * hidden_size, proj_size)\n # Bi layer[>0] shape (4 * hidden_size)\n # Bh layer[>0] shape (4 * hidden_size)\n # P layer[>0] shape (proj_size, hidden_size)\n\n # Scalar inputs\n has_biases = inputs[3]\n num_layers = inputs[4]\n dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout\n # train = inputs[6]\n bidirectional = inputs[7]\n batch_first = inputs[8]\n\n num_directions = 1\n if bidirectional:\n num_directions = 2\n\n rsd = len(_weights) % num_layers\n assert rsd == 0, \"The number of weights must be a multiple of the number of layers!\"\n rsd = (len(_weights) / num_layers) % num_directions\n assert (\n rsd == 0\n ), \"The number of weights in layer must be a multiple of the number of directions!\"\n has_proj = False\n proj_size = 0\n weights_num = int(len(_weights) / num_layers / num_directions)\n if has_biases:\n if weights_num == 5:\n has_proj = True\n proj_size = _infer_shape(_weights[4])[0]\n else:\n assert weights_num == 4, \"The weights number in layer is expected equal to 4\"\n else:\n if weights_num == 3:\n has_proj = True\n proj_size = _infer_shape(_weights[2])[0]\n else:\n assert weights_num == 2, \"The weights number in layer is expected equal to 2\"\n\n X = _op.transpose(_X, (1, 0, 2)) if batch_first else _X\n # TODO (vvchernov): Which data type should be used? from input or weights?\n # Instead of it _infer_type(X).checked_type.dtype can be used\n X_dtype = input_types[0]\n X_shape = _infer_shape(X) # (seq_num, batch, feature_size)\n\n hidden_size = _infer_shape(_weights[0])[0] / 4\n batch_size = X_shape[1]\n\n # Initialize hidden states if not provided.\n layers_h = []\n layers_c = []\n hidden_layers_num = num_directions * num_layers\n if h_0 is None:\n if has_proj:\n h_0 = _op.zeros((batch_size, proj_size), X_dtype)\n else:\n h_0 = _op.zeros((batch_size, hidden_size), X_dtype)\n for i in range(hidden_layers_num):\n layers_h.append(h_0)\n else:\n layers_h = unbind(h_0, 0)\n if c_0 is None:\n c_0 = _op.zeros((batch_size, hidden_size), X_dtype)\n for i in range(hidden_layers_num):\n layers_c.append(c_0)\n else:\n layers_c = unbind(c_0, 0)\n\n layer_weights_dicts = []\n k = 0 # layer counter\n if has_biases:\n names = [\"hidden_state\", \"cell_state\", \"w_inp\", \"w_hid\", \"b_inp\", \"b_hid\"]\n if bidirectional:\n rsd = len(_weights) % (2 * weights_num)\n assert rsd == 0, \"got an incorrect number of LSTM weights\"\n for i in range(0, len(_weights), 2 * weights_num):\n fw_tensors = [layers_h[2 * k], layers_c[2 * k], *_weights[i : i + 4]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n if has_proj:\n fw_weights_dict[\"proj\"] = _weights[i + 4]\n j = i + weights_num\n rev_tensors = [layers_h[2 * k + 1], layers_c[2 * k + 1], *_weights[j : j + 4]]\n rev_weights_dict = dict(zip(names, rev_tensors))\n if has_proj:\n rev_weights_dict[\"proj\"] = _weights[j + 4]\n layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])\n k += 1\n else:\n assert len(_weights) % weights_num == 0, \"got an incorrect number of LSTM weights\"\n for i in range(0, len(_weights), weights_num):\n fw_tensors = [layers_h[k], layers_c[k], *_weights[i : i + 4]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n if has_proj:\n fw_weights_dict[\"proj\"] = _weights[i + 4]\n layer_weights_dicts.append([fw_weights_dict])\n k += 1\n else:\n names = [\"hidden_state\", \"cell_state\", \"w_inp\", \"w_hid\"]\n if bidirectional:\n rsd = len(_weights) % (2 * weights_num)\n assert rsd == 0, \"got an incorrect number of LSTM weights\"\n for i in range(0, len(_weights), 2 * weights_num):\n fw_tensors = [layers_h[2 * k], layers_c[2 * k], *_weights[i : i + 2]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n if has_proj:\n fw_weights_dict[\"proj\"] = _weights[i + 2]\n j = i + weights_num\n rev_tensors = [layers_h[2 * k + 1], layers_c[2 * k + 1], *_weights[j : j + 2]]\n rev_weights_dict = dict(zip(names, rev_tensors))\n if has_proj:\n rev_weights_dict[\"proj\"] = _weights[j + 2]\n layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])\n k += 1\n else:\n assert len(_weights) % weights_num == 0, \"got an incorrect number of LSTM weights\"\n for i in range(0, len(_weights), weights_num):\n fw_tensors = [layers_h[k], layers_c[k], *_weights[i : i + 2]]\n fw_weights_dict = dict(zip(names, fw_tensors))\n if has_proj:\n fw_weights_dict[\"proj\"] = _weights[i + 2]\n layer_weights_dicts.append([fw_weights_dict])\n k += 1\n assert (\n len(layer_weights_dicts) == num_layers and k == num_layers\n ), \"For stacked LSTM number of weights sets should be the same as number of layers!\"\n\n outputs = self.lstm_layers(\n X,\n layer_weights_dicts,\n bidirectional,\n dtype=X_dtype,\n dropout_p=dropout_p,\n )\n\n # output shape = (seq_num, batch, hidden_size) or\n # (seq_num, batch, 2*feature_size) for bidirectional\n output = outputs[0]\n\n hy = []\n cy = []\n for hidden in outputs[1]:\n hy.append(hidden[0])\n cy.append(hidden[1])\n\n if batch_first:\n output = _op.transpose(output, (1, 0, 2))\n\n return (output, _op.stack(hy, 0), _op.stack(cy, 0))\n\n def all_any_common(self, op, inputs, input_types):\n dim = inputs[1]\n keepdim = inputs[2]\n if self.infer_type(inputs[0]).dtype != \"bool\":\n # The input dtype can be uint8.\n inp = _op.cast(inputs[0], \"bool\")\n else:\n inp = inputs[0]\n return op(inp, axis=dim, keepdims=keepdim)\n\n def searchsorted_common(self, sorted_sequence, values, out_int32, right):\n dtype = \"int32\" if out_int32 else \"int64\"\n values_shape = _infer_shape(values)\n\n if len(values_shape) == 0:\n values = _op.expand_dims(values, 0)\n\n out = _op.searchsorted(sorted_sequence, values, right=right, dtype=dtype)\n\n if len(values_shape) == 0:\n return _op.squeeze(out)\n\n return out\n\n def searchsorted(self, inputs, input_types):\n return self.searchsorted_common(*inputs)\n\n def bucketize(self, inputs, input_types):\n return self.searchsorted_common(inputs[1], inputs[0], inputs[2], inputs[3])\n\n def roll(self, inputs, input_types):\n def slide_axes(inp, shape, ax):\n axes = list(range(len(shape)))\n axes = axes[:ax] + [-1] + axes[ax:-1]\n return _op.transpose(inp, axes)\n\n x = inputs[0]\n shifts = inputs[1]\n dims = inputs[2]\n shape = self.infer_shape(x)\n start = _expr.const(0, \"int64\")\n step = _expr.const(1, \"int64\")\n\n out = x\n for i, dim in enumerate(dims):\n roll_dim = _expr.const(shape[dim], \"int64\")\n indices_1d = _op.mod(\n _op.transform.arange(start, roll_dim, step, \"int64\")\n - _expr.const(shifts[i], \"int64\")\n + roll_dim,\n roll_dim,\n )\n # First fill in the last axis with roll indices, and then do transpose to\n # bring the roll indices into the desired axis.\n indices = slide_axes(\n _op.tile(indices_1d, shape[:dim] + shape[dim + 1 :] + (1,)),\n shape,\n dim,\n )\n out = _op.gather(out, dim, indices)\n\n return out\n\n def einsum(self, inputs, input_types):\n equation, data = inputs\n return _op.einsum(data, equation)\n\n def dot(self, inputs, _):\n lhs, rhs = inputs\n return _op.sum(_op.multiply(lhs, rhs))\n\n def mv(self, inputs, _):\n lhs, rhs = inputs\n\n # Convert the 1D matrix (vector) into a 2D matrix with the extra\n # dimension=1\n rhs_matrix = _op.transform.expand_dims(rhs, 0)\n\n # Run multiplication\n dense_result = _op.nn.dense(lhs, rhs_matrix, units=None)\n\n # Chop off the extra result dimension\n return _op.transform.squeeze(dense_result)\n\n # Operator mappings\n def create_convert_map(self):\n self.convert_map = {\n \"aten::is_floating_point\": self.is_floating_point,\n \"aten::pixel_shuffle\": self.pixel_shuffle,\n \"aten::device\": self.none,\n \"prim::device\": self.none,\n \"aten::sub\": self.make_elemwise(\"subtract\"),\n \"aten::max\": self.max,\n \"aten::min\": self.min,\n \"aten::mul\": self.make_elemwise(\"multiply\"),\n \"aten::pow\": self.make_elemwise(\"power\"),\n \"aten::arange\": self.arange,\n \"aten::meshgrid\": self.meshgrid,\n \"aten::div\": self.make_elemwise(\"divide\"),\n \"aten::floor_divide\": self.make_elemwise(\"floor_divide\"),\n \"aten::true_divide\": self.make_elemwise(\"divide\"),\n \"aten::addcdiv\": self.addcdiv,\n \"aten::addcmul\": self.addcmul,\n \"aten::ones\": self.ones,\n \"aten::ones_like\": self.ones_like,\n \"aten::zeros\": self.zeros,\n \"aten::zeros_like\": self.zeros_like,\n \"aten::full\": self.full,\n \"aten::full_like\": self.full_like,\n \"aten::linspace\": self.linspace,\n \"aten::reciprocal\": self.reciprocal,\n \"aten::repeat\": self.repeat,\n \"aten::repeat_interleave\": self.repeat_interleave,\n \"aten::to\": self.to,\n \"aten::squeeze\": self.squeeze,\n \"aten::unsqueeze\": self.unsqueeze,\n \"aten::cat\": self.concatenate,\n \"aten::slice\": self.slice,\n \"aten::narrow\": self.narrow,\n \"aten::split\": self.split,\n \"aten::split_with_sizes\": self.split_with_sizes,\n \"aten::select\": self.select,\n \"aten::take\": self.take,\n \"aten::where\": self.where,\n \"aten::topk\": self.topk,\n \"aten::relu\": self.relu,\n \"aten::prelu\": self.prelu,\n \"aten::leaky_relu\": self.leaky_relu,\n \"aten::elu\": self.elu,\n \"aten::celu\": self.celu,\n \"aten::gelu\": self.gelu,\n \"aten::selu\": self.selu,\n \"aten::silu\": self.silu,\n \"aten::log_sigmoid\": self.log_sigmoid,\n \"aten::adaptive_avg_pool1d\": functools.partial(\n self.adaptive_avg_pool, _op.nn.adaptive_avg_pool1d\n ),\n \"aten::adaptive_avg_pool2d\": functools.partial(\n self.adaptive_avg_pool, _op.nn.adaptive_avg_pool2d\n ),\n \"aten::adaptive_avg_pool3d\": functools.partial(\n self.adaptive_avg_pool, _op.nn.adaptive_avg_pool3d\n ),\n \"aten::adaptive_max_pool1d\": functools.partial(\n self.adaptive_max_pool, _op.nn.adaptive_max_pool1d\n ),\n \"aten::adaptive_max_pool2d\": functools.partial(\n self.adaptive_max_pool, _op.nn.adaptive_max_pool2d\n ),\n \"aten::adaptive_max_pool3d\": functools.partial(\n self.adaptive_max_pool, _op.nn.adaptive_max_pool3d\n ),\n \"aten::max_pool2d\": self.maxpool_2d,\n \"aten::max_pool2d_with_indices\": self.maxpool_2d_with_indices,\n \"aten::max_pool1d\": self.maxpool_1d,\n \"aten::max_pool3d\": self.maxpool_3d,\n \"aten::hardtanh\": self.hardtanh,\n \"aten::_convolution\": self.convolution,\n \"aten::softmax\": self.softmax,\n \"aten::threshold\": self.threshold,\n \"aten::contiguous\": self.contiguous,\n \"aten::batch_norm\": self.batch_norm,\n \"aten::instance_norm\": self.instance_norm,\n \"aten::layer_norm\": self.layer_norm,\n \"aten::group_norm\": self.group_norm,\n \"aten::transpose\": self.transpose,\n \"aten::t\": self.transpose,\n \"aten::flatten\": self.flatten,\n \"aten::addmm\": self.addmm,\n \"aten::size\": self.size,\n \"aten::view\": self.view,\n \"aten::reshape\": self.reshape,\n \"aten::clone\": self.clone,\n \"aten::log_softmax\": self.log_softmax,\n \"aten::sigmoid\": self.sigmoid,\n \"aten::softplus\": self.softplus,\n \"aten::avg_pool1d\": self.make_avg_pool(1),\n \"aten::avg_pool2d\": self.make_avg_pool(2),\n \"aten::avg_pool3d\": self.make_avg_pool(3),\n \"aten::linear\": self.linear,\n \"aten::dropout\": self.dropout,\n \"aten::feature_dropout\": self.dropout,\n \"aten::alpha_dropout\": self.dropout,\n \"aten::mean\": self.mean,\n \"aten::chunk\": self.chunk,\n \"aten::unsafe_chunk\": self.chunk,\n \"aten::matmul\": self.matmul,\n \"aten::bmm\": self.matmul,\n \"aten::expand\": self.expand,\n \"aten::Int\": self.int,\n \"prim::NumToTensor\": self.numtotensor,\n \"prim::ImplicitTensorToNum\": self.tensortonum,\n \"aten::ScalarImplicit\": self.tensortonum,\n \"aten::constant_pad_nd\": self.make_pad(\"constant\"),\n \"aten::reflection_pad1d\": self.make_pad(\"reflect\"),\n \"aten::reflection_pad2d\": self.make_pad(\"reflect\"),\n \"aten::replication_pad1d\": self.make_pad(\"edge\"),\n \"aten::replication_pad2d\": self.make_pad(\"edge\"),\n \"aten::replication_pad3d\": self.make_pad(\"edge\"),\n \"aten::permute\": self.transpose,\n \"aten::sum\": self.make_reduce(\"sum\"),\n \"aten::prod\": self.make_reduce(\"prod\"),\n \"aten::argmin\": self.make_reduce(\"argmin\"),\n \"aten::argmax\": self.make_reduce(\"argmax\"),\n \"aten::norm\": self.norm,\n \"aten::frobenius_norm\": self.frobenius_norm,\n \"aten::std\": self.std,\n \"aten::var\": self.variance,\n \"aten::abs\": self.make_unary(\"abs\"),\n \"aten::neg\": self.make_unary(\"negative\"),\n \"aten::cos\": self.make_unary(\"cos\"),\n \"aten::cosh\": self.make_unary(\"cosh\"),\n \"aten::sin\": self.make_unary(\"sin\"),\n \"aten::sinh\": self.make_unary(\"sinh\"),\n \"aten::tan\": self.make_unary(\"tan\"),\n \"aten::tanh\": self.make_unary(\"tanh\"),\n \"aten::acos\": self.make_unary(\"acos\"),\n \"aten::asin\": self.make_unary(\"asin\"),\n \"aten::atan\": self.make_unary(\"atan\"),\n \"aten::log\": self.make_unary(\"log\"),\n \"aten::log2\": self.make_unary(\"log2\"),\n \"aten::log10\": self.make_unary(\"log10\"),\n \"aten::log1p\": self.log1p,\n \"aten::exp\": self.make_unary(\"exp\"),\n \"aten::erf\": self.make_unary(\"erf\"),\n \"aten::trunc\": self.make_unary(\"trunc\"),\n \"aten::sign\": self.make_unary(\"sign\"),\n \"aten::sqrt\": self.make_unary(\"sqrt\"),\n \"aten::rsqrt\": self.make_unary(\"rsqrt\"),\n \"aten::ceil\": self.make_unary(\"ceil\"),\n \"aten::floor\": self.make_unary(\"floor\"),\n \"aten::round\": self.make_unary(\"round\"),\n \"aten::isfinite\": self.make_unary(\"isfinite\"),\n \"aten::isinf\": self.make_unary(\"isinf\"),\n \"aten::isnan\": self.make_unary(\"isnan\"),\n \"aten::clamp\": self.clamp,\n \"aten::detach\": self.identity,\n \"aten::upsample_bilinear2d\": self.make_upsample(\"linear\"),\n \"aten::upsample_bicubic2d\": self.make_upsample(\"cubic\"),\n \"aten::upsample_nearest2d\": self.make_upsample(\"nearest_neighbor\"),\n \"aten::upsample_trilinear3d\": self.make_upsample3d(\"linear\"),\n \"aten::upsample_nearest3d\": self.make_upsample3d(\"nearest_neighbor\"),\n \"aten::expand_as\": self.expand_as,\n \"aten::lt\": self.make_elemwise(\"less\"),\n \"aten::gt\": self.make_elemwise(\"greater\"),\n \"aten::le\": self.make_elemwise(\"less_equal\"),\n \"aten::ge\": self.make_elemwise(\"greater_equal\"),\n \"aten::ne\": self.make_elemwise(\"not_equal\"),\n \"aten::eq\": self.make_elemwise(\"equal\"),\n \"aten::logical_not\": self.logical_not,\n \"aten::logical_xor\": self.logical_xor,\n \"aten::bitwise_not\": self.bitwise_not,\n \"aten::bitwise_xor\": self.bitwise_xor,\n \"aten::Bool\": self.Bool,\n \"aten::Float\": self.Float,\n \"aten::rsub\": self.rsub,\n \"aten::embedding\": self.embedding,\n \"aten::one_hot\": self.one_hot,\n \"aten::mm\": self.matmul,\n \"aten::add\": self.add,\n \"aten::stack\": self.stack,\n \"aten::__getitem__\": self.list_getitem,\n \"aten::len\": self.list_len,\n \"aten::type_as\": self.type_as,\n \"aten::gather\": self.gather,\n \"aten::index_select\": self.select,\n \"aten::index\": self.index,\n \"torchvision::nms\": self.nms,\n \"aten::logsumexp\": self.logsumexp,\n \"torchvision::roi_align\": self.roi_align,\n \"torchvision::deform_conv2d\": self.deform_conv2d,\n \"aten::unbind\": self.unbind,\n \"aten::__and__\": self.logical_and,\n \"aten::logical_and\": self.logical_and,\n \"aten::_shape_as_tensor\": self.shape_as_tensor,\n \"aten::nonzero\": self.nonzero,\n \"aten::nonzero_numpy\": self.nonzero_numpy,\n \"aten::scatter\": self.scatter,\n \"aten::index_put\": self.index_put,\n \"aten::scalar_tensor\": self.scalar_tensor,\n \"aten::__interpolate\": self.interpolate,\n \"aten::IntImplicit\": self.identity,\n \"aten::tensor\": self.identity, # used for example in tensor(1.0)\n \"aten::numel\": self.numel,\n \"aten::empty\": self.empty,\n \"aten::bincount\": self.bincount,\n \"aten::scatter_add\": self.scatter_add,\n \"aten::__not__\": self.logical_not,\n \"aten::hardswish\": self.hard_swish,\n \"aten::hardsigmoid\": self.hard_sigmoid,\n \"aten::cumsum\": self.cumsum,\n \"aten::masked_fill\": self.masked_fill,\n \"aten::masked_select\": self.masked_select,\n \"aten::argsort\": self.argsort,\n \"aten::sort\": self.sort,\n \"aten::_unique2\": self.unique,\n \"aten::nll_loss\": self.nll_loss,\n \"aten::nll_loss2d\": self.nll_loss,\n \"aten::nll_loss_nd\": self.nll_loss,\n \"aten::flip\": self.flip,\n \"aten::gru\": self.gru,\n \"aten::lstm\": self.lstm,\n \"aten::all\": functools.partial(self.all_any_common, _op.all),\n \"aten::any\": functools.partial(self.all_any_common, _op.any),\n \"aten::searchsorted\": self.searchsorted,\n \"aten::bucketize\": self.bucketize,\n \"aten::roll\": self.roll,\n \"aten::einsum\": self.einsum,\n \"aten::dot\": self.dot,\n \"aten::mv\": self.mv,\n }\n\n def update_convert_map(self, custom_map):\n self.convert_map.update(custom_map)\n\n def report_missing_conversion(self, op_names):\n \"\"\"Check if all ops in an input graph are supported by TVM\"\"\"\n known_ops = [\n \"prim::Constant\",\n \"prim::GetAttr\",\n \"prim::ListConstruct\",\n \"prim::ListUnpack\",\n \"prim::TupleConstruct\",\n \"prim::TupleUnpack\",\n \"prim::RaiseException\",\n \"prim::If\",\n \"prim::Loop\",\n ]\n known_ops += list(self.convert_map.keys())\n known_ops += list(qnn_torch.convert_map.keys())\n\n missing = []\n\n for op_name in op_names:\n # Also take care of in-place variant ops like aten::relu_\n if op_name not in known_ops and not (\n op_name.endswith(\"_\") and op_name[:-1] in known_ops\n ):\n missing.append(op_name)\n\n if missing:\n msg = \"The following operators are not implemented: {}\".format(missing)\n raise NotImplementedError(msg)\n\n def convert_block(self, block, outputs):\n \"\"\"Translate Torch \"Block\", used for prim::If and prim::Loop\"\"\"\n ops = _get_operator_nodes(block.nodes())\n ret_names = _get_input_names(block.returnNode())\n return self.convert_operators(ops, outputs, ret_names)\n\n def convert_if(self, if_node, outputs):\n \"\"\"Translate Torch prim::If to Relay If\"\"\"\n cond = outputs[if_node.inputsAt(0).debugName()]\n blocks = list(if_node.blocks())\n true_branch = self.convert_block(blocks[0], outputs)\n false_branch = self.convert_block(blocks[1], outputs)\n assert len(true_branch) == 1 and len(false_branch) == 1\n return _expr.If(cond, true_branch[0], false_branch[0])\n\n def convert_loop(self, loop_node, outputs):\n \"\"\"Translate Torch prim::Loop to Relay while_loop\"\"\"\n\n def get_input(index):\n ivalue = loop_node.inputsAt(index)\n inode = ivalue.node()\n if inode.kind() == \"prim::Constant\":\n return _expr.const(_get_constant(inode))\n var_name = ivalue.debugName()\n assert var_name in outputs\n return _wrap_const(outputs[var_name])\n\n # Refer to the spec for prim::Loop below\n # https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/OVERVIEW.md#loops\n # The first input: %max_trip_count\n # The second input: %initial_condition\n # The rest of input: loop variables\n max_loop_count = get_input(0)\n init_cond = get_input(1)\n num_loop_var = len(list(loop_node.inputs())) - 2\n init_vals = [get_input(i + 2) for i in range(num_loop_var)]\n\n # while loop has always max_loop_count being int64 max\n # max_loop_count.data (tvm.runtime.NDArray) is -1, so _get_constant again\n is_while_loop = (\n isinstance(max_loop_count, _expr.Constant)\n and _get_constant(loop_node.inputsAt(0).node()) == sys.maxsize\n )\n\n if is_while_loop:\n loop_iter_dtype = \"bool\"\n # while loop with non input dependent condition such as while i < 10:\n # init_cond is int, need to cast to bool to type check\n if isinstance(init_cond, _expr.Constant):\n init_cond = _op.cast(init_cond, \"bool\")\n init_loop_iter_val = init_cond\n else:\n loop_iter_dtype = \"int32\"\n # always count from 0\n init_loop_iter_val = _expr.const(0, dtype=\"int32\")\n\n body_block = list(loop_node.blocks())[0]\n block_input_names = _get_input_names(body_block)\n num_block_inputs = len(block_input_names)\n name_val_pairs = list(zip(block_input_names, [init_loop_iter_val] + init_vals))\n outputs.update(name_val_pairs)\n\n def get_var(name, val):\n if val:\n checked_type = self.infer_type_with_prelude(val)\n if hasattr(checked_type, \"shape\"):\n shape = get_const_tuple(checked_type.shape)\n actual_shape = []\n for dim in shape:\n if isinstance(dim, int) and dim == 0:\n actual_shape.append(Any())\n else:\n actual_shape.append(dim)\n return _expr.var(name, shape=actual_shape, dtype=checked_type.dtype)\n else:\n return _expr.var(name, type_annotation=checked_type)\n return _expr.var(name)\n\n loop_iter_var = _expr.var(block_input_names[0], shape=(), dtype=loop_iter_dtype)\n loop_vars = [get_var(name, val) for name, val in name_val_pairs[1:]]\n\n # Add non constant free variables to loop variables to prevent code blow up\n # Without this, if there are two for loops in a row, which often happens\n # if the outer loop is unrolled, the computation corresponding to the first for loop\n # is inlined inside loop body, turning O(N) + O(N) computation into O(N^2).\n # This issue was found when converting from Stacked LSTM test. Torch does not add the\n # outputof the eariler loop into loop variables of the next loop.\n # So the variable corresponding to the first loop output appears free in the second\n # loop body.\n free_vars = [\n var\n for var in _get_free_vars_from_block(body_block)\n if var in outputs\n and not isinstance(outputs[var], (_expr.Constant, int, float, str))\n and outputs[var]\n ]\n\n prev_outputs = {}\n for name in free_vars:\n prev_output = outputs[name]\n new_loop_var = get_var(name, prev_output)\n prev_outputs[name] = prev_output\n outputs[name] = new_loop_var\n loop_vars.append(new_loop_var)\n init_vals.append(prev_output)\n\n def cond(*current_vals):\n i = current_vals[0]\n\n if is_while_loop:\n return _op.equal(i, _expr.const(True, \"bool\"))\n\n return _op.less(i, max_loop_count)\n\n def body(*current_vals):\n # Update loop variables using the prev iteration outputs\n assert len(current_vals) == num_block_inputs + len(free_vars)\n\n for (i, val) in enumerate(current_vals):\n if i < num_block_inputs:\n outputs[block_input_names[i]] = val\n else:\n outputs[free_vars[i - num_block_inputs]] = val\n\n block_outputs = self.convert_block(body_block, outputs)\n block_outputs += [outputs[name] for name in free_vars]\n\n if not is_while_loop:\n # iter var increment implicit in torch, so do it manually\n # for while loop, block_outputs[0] is already a boolean,\n # the result of termination check\n incr = _expr.const(1, dtype=\"int32\")\n block_outputs[0] = current_vals[0] + incr\n\n return block_outputs\n\n loop = while_loop(cond, [loop_iter_var] + loop_vars, body)\n loop_val = loop(init_loop_iter_val, *init_vals)\n\n # restore original output values for free vars\n outputs.update(prev_outputs)\n\n # The first element is a loop counter or boolean condition, ignore it\n return [_expr.TupleGetItem(loop_val, i + 1) for i in range(num_loop_var)]\n\n def convert_operators(self, operators, outputs, ret_names):\n \"\"\"Convert each Torch IR operators to Relay equivalent\"\"\"\n for node_name, op_node in operators:\n operator = op_node.kind()\n inputs = _get_op_inputs(op_node, outputs)\n\n if operator == \"prim::Constant\":\n outputs[node_name] = _get_constant(op_node)\n elif operator == \"prim::ListConstruct\" and _should_construct_dynamic_list(op_node):\n outputs[node_name] = self.convert_to_list_adt(inputs)\n elif operator == \"prim::ListConstruct\":\n # This assumes that no more elements will be appended to this list\n # In this case, we keep the Python list\n outputs[node_name] = inputs\n elif operator == \"prim::TupleConstruct\":\n\n def _handel_nested_input(inputs):\n inputs_list = []\n for i, _ in enumerate(inputs):\n if isinstance(inputs[i], list):\n inputs_list.append(_handel_nested_input(inputs[i]))\n else:\n assert isinstance(inputs[i], _expr.Expr)\n inputs_list.append(inputs[i])\n return _expr.Tuple(inputs_list)\n\n outputs[node_name] = _handel_nested_input(inputs)\n elif operator in [\"prim::ListUnpack\", \"prim::TupleUnpack\"]:\n assert len(inputs) == 1\n if isinstance(inputs[0], (list, _expr.TupleWrapper)):\n unpacked = inputs[0]\n else:\n unpacked = _unpack_tuple(inputs[0])\n outputs.update(zip(_get_output_names(op_node), unpacked))\n elif operator == \"prim::prim::RaiseException\":\n logger.warning(\"raising exceptions is ignored\")\n outputs[node_name] = None\n elif operator == \"prim::If\":\n if_out = self.convert_if(op_node, outputs)\n outputs[node_name] = if_out\n elif operator == \"prim::Loop\":\n loop_out = self.convert_loop(op_node, outputs)\n unpacked_names = _get_output_names(op_node)\n assert len(loop_out) == len(unpacked_names)\n outputs.update(zip(unpacked_names, loop_out))\n else:\n if operator not in self.convert_map:\n # At this point, the only possible ops that are not in convert_map are\n # in-place variant of ops like aten::relu_\n assert operator.endswith(\"_\")\n logger.warning(\n \"An in-place op %s found, the result will not be correct \"\n \"if the model depends on side-effects by this op.\",\n operator,\n )\n relay_op = self.convert_map[operator[:-1]]\n else:\n relay_op = self.convert_map[operator]\n\n relay_out = relay_op(\n inputs, _get_input_types(op_node, outputs, default_dtype=self.default_dtype)\n )\n self.record_output_type(relay_out)\n\n if isinstance(relay_out, tuple):\n # This is for torch operators that return multiple outputs\n # See _adaptive_max_2d above for example\n out_names = _get_output_names(op_node)\n outputs.update(zip(out_names, relay_out))\n else:\n assert op_node.outputsSize() == 1\n outputs[node_name] = relay_out\n\n return [_wrap_const(outputs[ret_name]) for ret_name in ret_names]\n\n\ndef _pytorch_result_type(dtypes, non_tensor_inputs):\n \"\"\"This promotes TVM dtypes like PyTorch would\"\"\"\n import torch\n\n dtype_map = {\n \"float64\": torch.float64,\n \"float32\": torch.float32,\n \"float16\": torch.float16,\n \"bfloat16\": torch.bfloat16,\n \"int64\": torch.int64,\n \"int32\": torch.int32,\n \"int16\": torch.int16,\n \"int8\": torch.int8,\n \"uint8\": torch.uint8,\n \"bool\": torch.bool,\n }\n if len(dtypes) > 0:\n result_type = dtypes[0]\n for dt in dtypes[1:]:\n if dt != result_type: # we don't want to work with same types as we\n # don't do quantized here (which cannot be promoted?)\n result_type = _convert_data_type(\n str(\n torch.result_type(\n torch.zeros((), dtype=dtype_map[result_type]),\n torch.zeros((), dtype=dtype_map[dt]),\n )\n )\n )\n else:\n result_type = \"bool\" # this is the smallest type...\n for inp in non_tensor_inputs:\n result_type = _convert_data_type(\n str(torch.result_type(torch.zeros((), dtype=dtype_map[result_type]), inp))\n )\n return result_type\n\n\n# Helper functions for operator implementation\ndef _convert_dtype_value(val):\n \"\"\"converts a PyTorch the PyTorch numeric type id to a torch scalar type.\"\"\"\n convert_torch_dtype_map = {\n 7: \"torch.float64\",\n 6: \"torch.float32\",\n 5: \"torch.float16\",\n 4: \"torch.int64\",\n 3: \"torch.int32\",\n 2: \"torch.int16\",\n 1: \"torch.int8\",\n 0: \"torch.unit8\",\n None: \"torch.int64\",\n } # Default is torch.int64\n if val in convert_torch_dtype_map:\n return _convert_data_type(convert_torch_dtype_map[val])\n else:\n msg = \"Torch data type value %d is not handled yet.\" % (val)\n raise NotImplementedError(msg)\n\n\ndef _convert_data_type(input_type, default_dtype=None):\n \"\"\"converts the PyTorch scalar type input_type to a TVM dtype.\n optionally, default_dtype can be a TVM dtype that is used\n if input_type is None (but not when it is unknown)\"\"\"\n if input_type is None and default_dtype is not None:\n return default_dtype\n\n input_type = input_type.lower()\n if input_type in [\"double\", \"float64\", \"torch.float64\"]:\n return \"float64\"\n elif input_type in [\"float\", \"float32\", \"torch.float32\"]:\n return \"float32\"\n elif input_type in [\"half\", \"float16\", \"torch.float16\"]:\n return \"float16\"\n elif input_type in [\"long\", \"int64\", \"torch.int64\"]:\n return \"int64\"\n elif input_type in [\"int\", \"int32\", \"torch.int32\"]:\n return \"int32\"\n elif input_type in [\"short\", \"int16\", \"torch.int16\"]:\n return \"int16\"\n elif input_type in [\"char\", \"int8\", \"torch.int8\"]:\n return \"int8\"\n elif input_type in [\"byte\", \"uint8\", \"torch.uint8\"]:\n return \"uint8\"\n elif input_type in [\"quint8\", \"torch.quint8\"]:\n return \"quint8\"\n elif input_type in [\"qint8\", \"torch.qint8\"]:\n return \"qint8\"\n elif input_type in [\"qint32\", \"torch.qint32\"]:\n return \"qint32\"\n elif input_type in [\"bool\", \"torch.bool\"]:\n return \"bool\"\n elif input_type in [\"str\"]:\n return \"str\"\n else:\n raise NotImplementedError(\"input_type {} is not handled yet\".format(input_type))\n return \"float32\" # Never reached\n\n\ndef _create_typed_const(data, dtype):\n \"\"\"create a (scalar) constant of given value and dtype.\n dtype should be a TVM dtype\"\"\"\n\n if dtype == \"float64\":\n typed_data = _expr.const(np.float64(data), dtype=dtype)\n elif dtype == \"float32\":\n typed_data = _expr.const(np.float32(data), dtype=dtype)\n elif dtype == \"float16\":\n typed_data = _expr.const(np.float16(data), dtype=dtype)\n elif dtype == \"int64\":\n typed_data = _expr.const(np.int64(data), dtype=dtype)\n elif dtype == \"int32\":\n typed_data = _expr.const(np.int32(data), dtype=dtype)\n elif dtype == \"int16\":\n typed_data = _expr.const(np.int16(data), dtype=dtype)\n elif dtype == \"int8\":\n typed_data = _expr.const(np.int8(data), dtype=dtype)\n elif dtype == \"uint8\":\n typed_data = _expr.const(np.uint8(data), dtype=dtype)\n else:\n raise NotImplementedError(\"input_type {} is not handled yet\".format(dtype))\n return typed_data\n\n\ndef _wrap_const(c):\n if not isinstance(c, (_expr.Expr, list, tvm.tir.expr.Any)):\n return _expr.const(c)\n return c\n\n\ndef _run_jit_passes(graph):\n \"\"\"The inline pass is necessary to unwrap prim::CallMethod\"\"\"\n # pylint: disable=c-extension-no-member\n import torch\n\n if is_version_greater_than(\"1.5.1\"):\n # This is required for torchvision detection models from 1.6 above\n # It is the same as _jit_pass_inline, except that it has some special\n # case behaviors for some ops such as aten::__interpolate()\n torch._C._jit_pass_onnx_function_substitution(graph)\n else:\n torch._C._jit_pass_inline(graph)\n\n\ndef _get_tensor_and_var(torch_tensor, name):\n tensor = tvm.nd.array(torch_tensor.cpu().numpy())\n var = _expr.var(name, shape=tensor.shape, dtype=tensor.dtype)\n return tensor, var\n\n\ndef _get_output_name(node):\n assert node.outputsSize() == 1\n return node.output().debugName()\n\n\ndef _get_output_names(node):\n return [output.debugName() for output in node.outputs()]\n\n\ndef _get_input_names(node_or_graph):\n return [inp.debugName() for inp in node_or_graph.inputs()]\n\n\ndef _get_op_inputs(op_node, outputs):\n return [outputs[name] for name in _get_input_names(op_node)]\n\n\ndef _get_node_type(node):\n assert node.outputsSize() == 1\n return node.output().type().kind()\n\n\ndef _get_uses(node):\n uses = []\n for output in node.outputs():\n uses += output.uses()\n return uses\n\n\ndef _get_users(node):\n return [use.user for use in _get_uses(node)]\n\n\ndef _getattr_attr_name(node):\n attribute_names = node.attributeNames()\n assert len(attribute_names) == 1\n attr_name = node.s(attribute_names[0])\n return attr_name\n\n\ndef _getattr_full_name(getattrs, sep=\".\"):\n return sep.join([_getattr_attr_name(node) for node in getattrs])\n\n\ndef _get_pytorch_value_type(typ, default_dtype=\"float32\"):\n kind = typ.kind()\n if kind == \"TensorType\":\n if typ.scalarType() is None:\n # Tensor's type can be unknown if we use torch.jit.script(...)\n # Defaults can be passed in, if not it is float32\n logger.warning(\"Untyped Tensor found, assume it is %s\", default_dtype)\n return default_dtype\n else:\n return _convert_data_type(typ.scalarType())\n\n elif kind == \"ListType\":\n return \"ListType\"\n elif kind in [\"IntType\", \"FloatType\", \"BoolType\", \"StringType\", \"OptionalType\"]:\n pt_dtype = str(typ).lower()\n dtype = pt_dtype if pt_dtype == \"OptionalType\" else _convert_data_type(pt_dtype)\n return dtype\n else:\n return \"UnsupportedType\"\n\n\ndef _get_input_types(op_node, outputs, default_dtype=\"float32\"):\n \"\"\"Returns a TVM dtype for each input nodes derived from the torch type\"\"\"\n in_types = []\n for inp in op_node.inputs():\n if inp.node().kind() == \"prim::GetAttr\":\n # GetAttr nodes always return None when we call scalarType() on it\n name = inp.debugName()\n assert name in outputs\n if isinstance(outputs[name], _expr.Var):\n in_types.append(outputs[name].type_annotation.dtype)\n else:\n # For quantized modules with parameters, here we would get\n # \"prim::GetAttr[name=\"_packed_params\"]\". Since the dtype corresponding to\n # _packed_params is not needed by quantized ops, we return an arbitrary type.\n in_types.append(default_dtype)\n else:\n in_types.append(_get_pytorch_value_type(inp.type(), default_dtype=default_dtype))\n\n return in_types\n\n\ndef _get_constant(node):\n \"\"\"Retrieve a constant associated with this prim::Constant node\"\"\"\n attribute_names = node.attributeNames()\n num_attributes = len(attribute_names)\n\n if num_attributes == 1:\n attr_name = attribute_names[0]\n ty = node.output().type().kind()\n\n if ty == \"IntType\":\n return node.i(attr_name)\n elif ty == \"BoolType\":\n return bool(node.i(attr_name))\n elif ty in [\"FloatType\", \"LongType\"]:\n return node.f(attr_name)\n elif ty in [\"TensorType\", \"CompleteTensorType\"]:\n tensor = node.t(attr_name)\n if tensor.is_cuda:\n tensor = tensor.cpu()\n if len(tensor.shape) == 0: # tensor(0.1)\n # TODO(t-vi): When is this needed?\n return tensor.item()\n return _wrap_const(tensor.numpy())\n elif ty in [\"DeviceObjType\", \"StringType\"]:\n return node.s(attr_name)\n elif ty == \"FunctionType\":\n return None\n else:\n raise NotImplementedError(\"Unsupported type: %s\" % ty)\n else:\n assert num_attributes == 0\n return None\n\n\ndef _get_operator_nodes(nodes):\n \"\"\"Returns torch IR nodes that need conversion to Relay\"\"\"\n ops = []\n # Traverse nodes and add to graph\n for node in nodes:\n if node.outputsSize() == 0:\n continue\n if node.outputsSize() > 1:\n node_name = \"_\".join(_get_output_names(node))\n else:\n node_name = _get_output_name(node)\n\n if node.kind() != \"prim::GetAttr\":\n ops.append((node_name, node))\n\n return ops\n\n\ndef _get_relay_input_vars(graph, input_infos, prelude, is_module=True, default_dtype=\"float32\"):\n \"\"\"\n Return Relay vars from input shapes and create entries based on\n expected graph inputs - to allow translation\n \"\"\"\n\n graph_inputs = list(graph.inputs())\n if is_module:\n # a module has \"self\" as first input, which we do not need/want\n graph_inputs = graph_inputs[1:]\n\n if not isinstance(input_infos, list):\n msg = \"Graph inputs input_infos should be a list\"\n raise RuntimeError(msg)\n\n if len(graph_inputs) != len(input_infos):\n msg = \"PyTorch has {} inputs and input_infos lists {}.\".format(\n len(graph_inputs), len(input_infos)\n )\n raise RuntimeError(msg)\n\n def get_relay_ty(ishape, itype, pt_type):\n if pt_type.kind() == \"TensorType\":\n if not (_is_int_seq(ishape) or len(ishape) == 0):\n msg = \"Shape for Tensors must be lists of ints\"\n raise RuntimeError(msg)\n if (pt_type.dim() is not None and pt_type.dim() != len(ishape)) or (\n pt_type.sizes() is not None\n and any([s1 != s2 for s1, s2 in zip(pt_type.sizes(), ishape)])\n ):\n msg = \"Shapes of input list and information in the graph do not match\"\n raise RuntimeError(msg)\n pt_dtype = pt_type.scalarType()\n if not pt_dtype and itype:\n pt_dtype = itype\n dtype = _convert_data_type(pt_dtype, default_dtype=default_dtype)\n return TensorType(ishape, dtype)\n elif pt_type.kind() == \"TupleType\":\n if not isinstance(ishape, tuple):\n msg = \"Shapes for tuples must be tuples\"\n raise RuntimeError(msg)\n return TupleType(\n [get_relay_ty(elem, itype, pt_t) for elem, pt_t in zip(ishape, pt_type.elements())]\n )\n elif pt_type.kind() == \"ListType\":\n if not isinstance(ishape, list):\n msg = \"Shapes for lists must be lists\"\n raise RuntimeError(msg)\n pt_elemtype = pt_type.getElementType()\n elem_tys = [get_relay_ty(s, itype, pt_elemtype) for s in ishape]\n if len(elem_tys) > 0 and not all(map(lambda ty: ty == elem_tys[0], elem_tys)):\n msg = \"List elements need have identical types\"\n raise RuntimeError(msg)\n rlist, _, _ = prelude.mod.get_type(\"List\")\n return rlist(elem_tys[0])\n elif pt_type.kind() == \"OptionalType\":\n # we do not support None yet, so we fill in the type\n return get_relay_ty(ishape, itype, pt_type.getElementType())\n # TODO: scalar inputs\n raise NotImplementedError(\"unsupported input type\")\n\n input_vars = {}\n\n new_input_infos = []\n for num, inp in enumerate(input_infos):\n if not isinstance(inp, tuple):\n msg = \"Graph input {} is not a tuple\".format(num)\n raise RuntimeError(msg)\n if len(inp) != 2 or not isinstance(inp[0], str):\n msg = (\n \"Graph input {} is not valid,\"\n \" expected ('name', shape) or ('name', (shape, dtype))\".format(inp)\n )\n raise RuntimeError(msg)\n if not isinstance(inp[1], tuple) or len(inp[1]) == 0 or not isinstance(inp[1][-1], str):\n new_input_infos.append((inp[0], (inp[1], default_dtype)))\n else:\n new_input_infos.append(inp)\n\n input_types = [\n (name, get_relay_ty(info[0], info[1], gi.type()))\n for (name, info), gi in zip(new_input_infos, graph_inputs)\n ]\n\n ir_inputs = [i.debugName() for i in graph_inputs]\n for ir_input, (name, itype) in zip(ir_inputs, input_types):\n inp = _expr.var(name, type_annotation=itype)\n # Translate from graph input to user input name\n input_vars[ir_input] = inp\n\n return input_vars\n\n\ndef _unpack_tuple(tup):\n def unpack(tup, num_fields):\n return [_expr.TupleGetItem(tup, i) for i in range(num_fields)]\n\n if isinstance(tup, _expr.Tuple):\n return unpack(tup, len(tup.fields))\n elif isinstance(tup.type_annotation, TupleType):\n return unpack(tup, len(tup.type_annotation.fields))\n # shouldn't happen\n assert False\n\n\ndef _get_free_vars_from_block(block):\n block_inp_names = _get_input_names(block)\n bound_names = block_inp_names\n free_vars = set()\n\n for node in block.nodes():\n inp_names = _get_input_names(node)\n list_diff = [name for name in inp_names if name not in bound_names]\n free_vars.update(list_diff)\n bound_names += _get_output_names(node)\n\n return free_vars\n\n\ndef get_use_chains(root_node, terminate=lambda _: False):\n \"\"\"\n Track a chain of users of this node forward, returning a list of chains\n See get_attr_chains below for its usage\n \"\"\"\n\n def concat_lists(lists):\n return itertools.chain.from_iterable(lists)\n\n def inner(current, accum):\n users = _get_users(current)\n\n if not users or terminate(users):\n return [accum]\n\n return concat_lists([inner(nxt, accum + [nxt]) for nxt in users])\n\n return inner(root_node, [root_node])\n\n\ndef get_attr_chains(root_getattr_node):\n \"\"\"Returns chains of attribute access starting from root_getattr_node\n\n For example, given attribute \"block\", as in \"self.block\" when \"self\" points\n to the top level torch.nn.Module, it returns lists of attribute \"chains\",\n e.g. ['block', '2'], ['block', '1'], ['block', '0', '_packed_params']\n\n These sets of attributes form full attribute accessors. For example,\n \"self.block.1\", \"self.block.2\" will return the second and third submodule,\n and \"self.block.0._packed_params\" will return the parameters of the first\n submodule.\n \"\"\"\n\n def terminate(users):\n next_attrs = [user for user in users if user.kind() == \"prim::GetAttr\"]\n return len(next_attrs) == 0\n\n return get_use_chains(root_getattr_node, terminate)\n\n\ndef convert_params(graph, state_dict, use_parser_friendly_name=False):\n \"\"\"\n Return Relay vars and TVM NDArrays for input parameters\n A chain of prim::GetAttr nodes is processed one at a time\n \"\"\"\n getattr_nodes = graph.findAllNodes(\"prim::GetAttr\", recurse=True)\n params = {}\n param_tensors = {}\n packed_param_map = {}\n vars_by_name = {}\n seen = set()\n attr_name_sep = \"_\" if use_parser_friendly_name else \".\"\n\n for node in getattr_nodes:\n if _get_output_name(node) in seen:\n continue\n\n for getattrs in get_attr_chains(node):\n seen.update(map(_get_output_name, getattrs))\n\n full_attr = _getattr_full_name(getattrs, attr_name_sep)\n full_attr_node_name = _get_output_name(getattrs[-1])\n\n if full_attr.endswith(\"_packed_params\"): # for quantized models\n packed_param_map[full_attr_node_name] = full_attr\n elif full_attr in state_dict:\n if full_attr in vars_by_name:\n var = vars_by_name[full_attr]\n else:\n torch_tensor = state_dict[full_attr]\n tensor, var = _get_tensor_and_var(torch_tensor, full_attr)\n param_tensors[full_attr] = tensor\n vars_by_name[full_attr] = var\n params[full_attr_node_name] = var\n\n return params, param_tensors, packed_param_map\n\n\ndef get_all_op_names(graph):\n \"\"\"Return all operator names in the input graph\"\"\"\n nodes = list(graph.nodes())\n prim_with_blocks = [\"prim::If\", \"prim::Loop\"]\n for prim in prim_with_blocks:\n prim_nodes = graph.findAllNodes(prim, recurse=True)\n for prim_node in prim_nodes:\n for block in prim_node.blocks():\n nodes += block.nodes()\n return set(node.kind() for node in nodes)\n\n\ndef from_pytorch(\n script_module,\n input_infos,\n custom_convert_map=None,\n default_dtype=\"float32\",\n use_parser_friendly_name=False,\n keep_quantized_weight=False,\n):\n \"\"\"Load PyTorch model in the form of a scripted PyTorch model and convert into relay.\n The companion parameters will be handled automatically.\n\n Parameters\n ----------\n script_module : TopLevelTracedModule object\n TorchScripted PyTorch graph\n Note: We currently only support traces (ie: torch.jit.trace(model, input))\n\n input_infos : List of tuples\n Can be (input name, input shape) or (input name, (input shape, input types))\n Graph level input shape and type list\n The same input names need to be used for deployment, so choose easy to\n remember names (such as: input0, input1)\n e.g.\n [('input0', (1, 2)), ('input1', (3, 4))]\n or\n [('input0', ((1, 2), 'int')), ('input1', ((3, 4), 'float'))]\n\n custom_convert_map : Dictionary of str to Relay op\n A custom op conversion map in the same format as _convert_map above\n\n default_type : str\n The default dtype to use when type information is not provided by PyTorch.\n\n use_parser_friendly_name : bool\n When True, replace '.' with `_' in a original parameter name.\n The Relay text parser treats a variable name followed by a period as a tuple element access,\n so a variable name like \"dense.weight\" cannot be parsed correctly.\n Use this option when you want to run the AnnotateSpans pass on the imported module.\n\n keep_quantized_weight : bool\n Return quantized weights and bias, rather than float ones. PyTorch stores quantized weights\n in a custom format, so we cannot directly access 8 bit weights as Numpy arrays. We use\n a PyTorch function to unpack quantized weights into float32 arrays and quantization\n parameters. By default, we return float32 weights and rely on the QNN lowering and the\n Relay constant folding pass to quantize weights at compile time. In BYOC use cases, however,\n we cannot apply the constant folding pass on a QNN graph. If keep_quantized_weight is True,\n we quantize weights in the frontend using a function that is equivalent to\n qnn.op.quantize(...) operating on Numpy arrays.\n\n Returns\n -------\n mod : tvm.IRModule\n The module that optimizations will be performed on.\n\n params : dict of str to tvm.runtime.NDArray\n Dict of converted parameters stored in tvm.runtime.ndarray format\n \"\"\"\n import torch\n\n mod = tvm.IRModule()\n prelude = Prelude(mod)\n\n converter = PyTorchOpConverter(prelude, default_dtype)\n\n graph = script_module.graph.copy()\n _run_jit_passes(graph)\n\n if custom_convert_map:\n converter.update_convert_map(custom_convert_map)\n\n op_names = get_all_op_names(graph)\n converter.report_missing_conversion(op_names)\n\n is_module = isinstance(script_module, torch.jit.ScriptModule)\n params = script_module.state_dict() if is_module else {}\n outputs = _get_relay_input_vars(\n graph, input_infos, prelude, default_dtype=default_dtype, is_module=is_module\n )\n\n if use_parser_friendly_name:\n new_names = [key.replace(\".\", \"_\") for key in params.keys()]\n params = dict(zip(new_names, params.values()))\n\n param_vars, tensors, packed_param_map = convert_params(graph, params, use_parser_friendly_name)\n\n tvm_params = {k: tvm.nd.array(v) for k, v in tensors.items()}\n\n outputs.update(param_vars)\n ret_name = _get_input_names(graph.return_node())\n\n # For quantized models\n quantized_ops = set([\"aten::quantize_per_tensor\", \"quantized::linear_dynamic\"])\n if len(quantized_ops.intersection(set(op_names))) > 0:\n weight_quant_params = qnn_torch.get_weight_quant_params(\n script_module, packed_param_map.values()\n )\n input_scales_for_bias = qnn_torch.add_input_quant_params_to_op_inputs(graph)\n qnn_torch.add_quant_params_to_outputs(\n outputs,\n packed_param_map,\n weight_quant_params,\n input_scales_for_bias,\n keep_quantized_weight,\n )\n qnn_torch.add_quant_params(tvm_params, weight_quant_params)\n converter.update_convert_map(qnn_torch.convert_map)\n\n ret = converter.convert_operators(_get_operator_nodes(graph.nodes()), outputs, ret_name)[0]\n if isinstance(ret, list):\n # ListConstruct kept original python list. Convert to tuple.\n ret = _expr.Tuple(ret)\n\n # Separate data inputs and parameters to make sure data inputs come first.\n func_args = []\n data_inputs = []\n for arg in _analysis.free_vars(ret):\n if arg.name_hint not in tvm_params.keys():\n data_inputs.append(arg)\n else:\n func_args.append(arg)\n func_args = data_inputs + func_args\n\n mod[\"main\"] = tvm.relay.Function(func_args, ret)\n\n return transform.RemoveUnusedFunctions()(mod), tvm_params\n"
] | [
[
"numpy.int8",
"numpy.ones",
"torch.get_default_dtype",
"numpy.float32",
"torch._C._jit_pass_onnx_function_substitution",
"torch.zeros",
"numpy.int64",
"numpy.float16",
"numpy.int32",
"numpy.int16",
"numpy.array",
"numpy.finfo",
"numpy.isscalar",
"torch._C._jit_pass_inline",
"numpy.float64",
"numpy.uint8"
]
] |
alexandresalvatierra/python-data-preprocessing | [
"48c3f465db0b5b5785d217d0f8c9524bb3980c48"
] | [
"missing-data-one-hot-encoding.py"
] | [
"import pandas as pd\npd.set_option( 'display.max_columns', 24 )\n\nfile_path = 'C:/Users/profAlexandre/Desktop/inteligencia artificial/data-preprocessing/dataset/'\nfile_name = 'traffic-collision-data-from-2010-to-present.csv'\n\ndf = pd.read_csv( file_path + file_name )\n\n# show head from dataset\n#print( df.head() )\n\n# show total rows and columns from dataset\n#print( df.shape )\n\n# encoding 'Area Name'\nencoding = pd.get_dummies( df['Area Name'] )\n\n#print( encoding.head() )\n\n# concat encoding to dataset\nconcat = pd.concat( [df, encoding], axis = 1 )\n\n# remove column Area Name\nconcat.drop( 'Area Name', axis = 1 )\n\nprint( concat.head() )"
] | [
[
"pandas.read_csv",
"pandas.concat",
"pandas.get_dummies",
"pandas.set_option"
]
] |
giprayogo/binyard | [
"c1cfa880cb9907416da2363fa0e4ca2de920543e"
] | [
"binyard/plot_column.py"
] | [
"#!/usr/bin/env python\n# for all to often task: text data file, plot single column\n\nimport matplotlib\nmatplotlib.rc('axes.formatter', useoffset=False)\nimport matplotlib.pyplot as plt\nimport argparse\nfrom numpy import arange\nfrom numpy import loadtxt\nfrom numpy import transpose\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-f', '--file', required=True, nargs='+')\nparser.add_argument('-d', '--domain-column')\nparser.add_argument('-c', '--columns', nargs='+', required=True)\nargs = parser.parse_args()\n\ndatafiles = args.file\n\nfig, ax = plt.subplots()\nfor datafile in datafiles:\n columns = map(int, args.columns)\n data = transpose(loadtxt(datafile, comments='#', usecols=columns))\n\n try:\n domain_column = int(args.domain_column)\n domain = loadtxt(datafile, comments='#', usecols=domain_column)\n except TypeError:\n domain = arange(0, data.shape[-1])\n\n if len(data.shape) > 1:\n for _ in data:\n ax.plot(domain, _)\n else:\n ax.plot(domain, data)\n\nfig.tight_layout()\nplt.show()\n"
] | [
[
"matplotlib.pyplot.subplots",
"matplotlib.rc",
"numpy.arange",
"matplotlib.pyplot.show",
"numpy.loadtxt"
]
] |
c-hydro/hmc | [
"66470234e126e4a727e1faf2fe64bd58547220ed"
] | [
"tools/processing_tool_ensemble_maker/lib_utils_time.py"
] | [
"\"\"\"\nLibrary Features:\n\nName: lib_utils_time\nAuthor(s): Fabio Delogu ([email protected])\nDate: '20211208'\nVersion: '1.0.0'\n\"\"\"\n\n#######################################################################################\n# Libraries\nimport logging\nimport pandas as pd\nfrom datetime import date\n\nfrom tools.processing_tool_datasets_merger.lib_info_args import logger_name\n\n# Logging\nlog_stream = logging.getLogger(logger_name)\n#######################################################################################\n\n# -------------------------------------------------------------------------------------\n# Method to set time run\ndef set_time(time_run_args=None, time_run_file=None, time_format='%Y-%m-%d %H:$M',\n time_period=1, time_frequency='H', time_rounding='H', time_reverse=True):\n\n logging.info(' ---> Set time run ... ')\n if time_run_args is not None:\n time_run = time_run_args\n logging.info(' ----> Time ' + time_run + ' set by argument')\n elif (time_run_args is None) and (time_run_file is not None):\n time_run = time_run_file\n logging.info(' ----> Time ' + time_run + ' set by user')\n elif (time_run_args is None) and (time_run_file is None):\n time_now = date.today()\n time_run = time_now.strftime(time_format)\n logging.info(' ----> Time ' + time_run + ' set by system')\n else:\n logging.info(' ---> Set time run ... FAILED')\n logging.error(' ===> Time is not correctly set')\n raise IOError('Time type or format is wrong')\n\n logging.info(' ---> Set time run ... DONE')\n\n time_tmp = pd.Timestamp(time_run)\n time_run = time_tmp.floor(time_rounding)\n\n time_now = time_tmp.floor('H')\n\n if time_period > 0:\n time_range = pd.date_range(end=time_run, periods=time_period, freq=time_frequency)\n else:\n logging.warning(' ===> TimePeriod must be greater then 0. TimePeriod is set automatically to 1')\n time_range = pd.DatetimeIndex([time_now], freq=time_frequency)\n\n if time_reverse:\n time_range = time_range[::-1]\n\n return time_now, time_run, time_range\n\n# -------------------------------------------------------------------------------------\n\n"
] | [
[
"pandas.DatetimeIndex",
"pandas.Timestamp",
"pandas.date_range"
]
] |
JurriaanBerger/AdventOfCode2021 | [
"666d9b36d87bfaec3da8b700832bf0a5791b840a"
] | [
"day11/flash.py"
] | [
"import numpy as np\n\n\ndef read_octo():\n octo_map = []\n\n file = open('octo.txt', \"r\")\n f1 = file.read()\n file.close()\n\n f2 = f1.split(\"\\n\")\n\n for h in f2:\n list_h = []\n list_h[:0] = h\n octo_map.append([int(l) for l in list_h])\n\n return np.array(octo_map)\n\n\ndef energy_increase(cur_energy_map):\n new_energy_map = cur_energy_map + np.ones([len(cur_energy_map), len(cur_energy_map)])\n return new_energy_map\n\n\ndef find_flash(cur_energy_map,flashed_map):\n i = 0\n\n while i < len(cur_energy_map[0]):\n flashed = False\n for j in range(len(cur_energy_map)):\n if flashed_map[i, j] == 0 and cur_energy_map[i, j] > 9:\n flashed = True\n flashed_map[i, j] = 1\n for p in range(max(i - 1, 0), min(i + 2,len(cur_energy_map))):\n for q in range(max(j - 1, 0), min(j + 2, len(cur_energy_map))):\n if flashed_map[p, q] == 0: cur_energy_map[p, q] += 1\n break\n\n if flashed: i = 0\n else: i += 1\n\n return cur_energy_map, flashed_map\n\n\ndef reset_flashed(cur_energy_map, flashed_map):\n to_subtract = cur_energy_map*flashed_map\n new_energy_map = cur_energy_map-to_subtract\n return new_energy_map\n\n\n# Part 1\nenergy_map = read_octo()\n\ntotal_sum = 0\nfor e in range(0, 100):\n energy_map_increase = energy_increase(energy_map)\n did_flash_map = np.zeros([len(energy_map), len(energy_map)])\n energy_map, did_flash_map = find_flash(energy_map_increase, did_flash_map)\n energy_map = reset_flashed(energy_map, did_flash_map)\n total_sum += np.sum(did_flash_map)\n\n\nprint('ANSWER 1:', total_sum)\n\n\n# Part 2\nenergy_map = read_octo()\ndid_flash_map = np.zeros([len(energy_map), len(energy_map)])\n\nstep_counter = 0\nwhile np.sum(did_flash_map) < 100:\n energy_map_increase = energy_increase(energy_map)\n did_flash_map = np.zeros([len(energy_map), len(energy_map)])\n energy_map, did_flash_map = find_flash(energy_map_increase, did_flash_map)\n energy_map = reset_flashed(energy_map, did_flash_map)\n step_counter += 1\n\nprint('ANSWER 2:', step_counter)\n"
] | [
[
"numpy.array",
"numpy.sum"
]
] |
7D24H/testGPU | [
"d9341e1a60d51c895c7e5090990bc13920ba7257"
] | [
"ops.py"
] | [
"\nfrom glob import glob\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport cv2\n\n\ndef block_patch(input, patch_size=15, margin=5):\n\tshape = input.get_shape().as_list()\n\n\t#create patch in random size\n\tpad_size = tf.random_uniform([2], minval=int(patch_size*0.1), maxval=patch_size, dtype=tf.int32)\n\tpatch = tf.zeros([pad_size[0], pad_size[1], shape[-1]], dtype=tf.float32)\n\n\th_ = tf.random_uniform([1], minval=margin, maxval=shape[0]-pad_size[0]-margin, dtype=tf.int32)[0]\n\tw_ = tf.random_uniform([1], minval=margin, maxval=shape[1]-pad_size[1]-margin, dtype=tf.int32)[0]\n\n\tpadding = [[h_, shape[0]-h_-pad_size[0]], [w_, shape[1]-w_-pad_size[1]], [0, 0]]\n\tpadded = tf.pad(patch, padding, \"CONSTANT\", constant_values=1)\n\n\tcoord = h_, w_\n\n\tres = tf.multiply(input, padded)\n\n\treturn res, padded, coord, pad_size\n\n#function to get training data\ndef load_train_data(args):\n\tpaths = os.path.join(args.data, \"img_align_celeba/*.jpg\")\n\tdata_count = len(glob(paths))\n\n\tfilename_queue = tf.train.string_input_producer(tf.train.match_filenames_once(paths))\n\n\timage_reader = tf.WholeFileReader()\n\t_, image_file = image_reader.read(filename_queue)\n\timages = tf.image.decode_jpeg(image_file, channels=3)\n\n\n\t#input image range from -1 to 1\n\t#center crop 32x32 since raw images are not center cropped.\n\timages = tf.image.central_crop(images, 0.75)\n\timages = tf.image.resize_images(images ,[args.input_height, args.input_width])\n\timages = tf.image.convert_image_dtype(images, dtype=tf.float32) / 127.5 - 1\n\n\torig_images = images\n\timages, mask, coord, pad_size = block_patch(images, patch_size=args.patch_size, margin=args.margin)\n\tmask = tf.reshape(mask, [args.input_height, args.input_height, 3])\n\n\t#flip mask values\n\tmask = -(mask - 1)\n\timages += mask\n\n\torig_imgs, perturbed_imgs, mask, coord, pad_size = tf.train.shuffle_batch([orig_images, images, mask, coord, pad_size],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t batch_size=args.batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t capacity=args.batch_size*2,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t min_after_dequeue=args.batch_size\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t )\n\n\n\treturn orig_imgs, perturbed_imgs, mask, coord, pad_size, data_count\n\ndef load_test_data(args):\n\tpaths = glob(\"./data/test/*.jpg\")\n\tdata_count = len(paths)\n\n\tfilename_queue = tf.train.string_input_producer(tf.train.match_filenames_once(paths))\n\n\timage_reader = tf.WholeFileReader()\n\t_, image_file = image_reader.read(filename_queue)\n\timages = tf.image.decode_jpeg(image_file, channels=3)\n\n\n\t#input image range from -1 to 1\n\t# uncomment to center crop\n\t# images = tf.image.central_crop(images, 0.5)\n\timages = tf.image.resize_images(images ,[args.input_height, args.input_width])\n\timages = tf.image.convert_image_dtype(images, dtype=tf.float32) / 127.5 - 1\n\n\torig_images = images\n\timages, mask, coord, pad_size = block_patch(images, margin=args.margin)\n\tmask = tf.reshape(mask, [args.input_height, args.input_height, 3])\n\n\t#flip mask values\n\tmask = -(mask - 1)\n\timages += mask\n\n\torig_imgs, mask, test_imgs = tf.train.batch([orig_images, mask, images],\n\t\t\t\t\t\t\t\t\t\t\t\tbatch_size=args.batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\tcapacity=args.batch_size,\n\t\t\t\t\t\t\t\t\t\t\t )\n\n\n\treturn orig_imgs, test_imgs, mask, data_count\n\n\n#function to save images in tile\n#comment this function block if you don't have opencv\ndef img_tile(epoch, args, imgs, aspect_ratio=1.0, tile_shape=None, border=1, border_color=0):\n\tif imgs.ndim != 3 and imgs.ndim != 4:\n\t\traise ValueError('imgs has wrong number of dimensions.')\n\tn_imgs = imgs.shape[0]\n\n\ttile_shape = None\n\t# Grid shape\n\timg_shape = np.array(imgs.shape[1:3])\n\tif tile_shape is None:\n\t\timg_aspect_ratio = img_shape[1] / float(img_shape[0])\n\t\taspect_ratio *= img_aspect_ratio\n\t\ttile_height = int(np.ceil(np.sqrt(n_imgs * aspect_ratio)))\n\t\ttile_width = int(np.ceil(np.sqrt(n_imgs / aspect_ratio)))\n\t\tgrid_shape = np.array((tile_height, tile_width))\n\telse:\n\t\tassert len(tile_shape) == 2\n\t\tgrid_shape = np.array(tile_shape)\n\n\t# Tile image shape\n\ttile_img_shape = np.array(imgs.shape[1:])\n\ttile_img_shape[:2] = (img_shape[:2] + border) * grid_shape[:2] - border\n\n\t# Assemble tile image\n\ttile_img = np.empty(tile_img_shape)\n\ttile_img[:] = border_color\n\tfor i in range(grid_shape[0]):\n\t\tfor j in range(grid_shape[1]):\n\t\t\timg_idx = j + i*grid_shape[1]\n\t\t\tif img_idx >= n_imgs:\n\t\t\t\t# No more images - stop filling out the grid.\n\t\t\t\tbreak\n\t\t\timg = imgs[img_idx]\n\t\t\timg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n\t\t\tyoff = (img_shape[0] + border) * i\n\t\t\txoff = (img_shape[1] + border) * j\n\t\t\ttile_img[yoff:yoff+img_shape[0], xoff:xoff+img_shape[1], ...] = img\n\n\tcv2.imwrite(args.images_path+\"/img_\"+str(epoch) + \".jpg\", (tile_img + 1)*127.5)\n"
] | [
[
"tensorflow.pad",
"numpy.sqrt",
"tensorflow.zeros",
"tensorflow.image.central_crop",
"numpy.array",
"tensorflow.reshape",
"numpy.empty",
"tensorflow.multiply",
"tensorflow.image.resize_images",
"tensorflow.random_uniform",
"tensorflow.train.match_filenames_once",
"tensorflow.image.convert_image_dtype",
"tensorflow.image.decode_jpeg",
"tensorflow.train.shuffle_batch",
"tensorflow.train.batch",
"tensorflow.WholeFileReader"
]
] |
Lakoc/SFC_project | [
"00b98f0d9651138c30f567ae4775a624e511a392"
] | [
"gui/utils.py"
] | [
"import numpy as np\n\n\ndef remove_values_from_list(the_list, val):\n return [value for value in the_list if value != val]\n\n\ndef remove_values_from_list_to_float(the_list, val):\n return [float(value) for value in the_list if value != val]\n\n\ndef load_3d_arr_from_string(arr):\n arr = arr.replace('[', '').replace(']', '').split('\\n')\n count = arr.count('') + 1\n\n arr = remove_values_from_list(arr, '')\n group_size = len(arr) // count\n groups = [remove_values_from_list_to_float(val.split(' '), '') for group in range(count) for val in\n arr[group * group_size: (group + 1) * group_size]]\n groups = [groups[group * group_size: (group + 1) * group_size] for group in range(count)]\n return np.array(groups)\n\n\ndef normalize_config(config):\n config['variances'] = load_3d_arr_from_string(config['variances'])\n config['means'] = load_3d_arr_from_string(config['means'])[0, :]\n config['counts'] = load_3d_arr_from_string(config['counts'])[0, 0, :]\n config['layers'] = load_3d_arr_from_string(config['layers'])[0, 0, :]\n config['layer'] = int(config['layer'])\n config['batch_size'] = int(config['batch_size'])\n config['iterations'] = int(config['iterations'])\n config['epsilon'] = float(config['epsilon'])\n config['eta'] = float(config['eta'])\n config['beta1'] = float(config['beta1'])\n config['beta2'] = float(config['beta2'])\n config['a_func'] = config['a_func'][0].casefold()\n config['optimizer'] = config['optimizer'][0]\n return config\n\n\ndef validate_config(config):\n errors = []\n n_clusters = config['counts'].shape[0]\n if config['means'].shape[0] != n_clusters or config['variances'].shape[0] != n_clusters:\n errors.append(\n f\"Count of clusters differ in mean, count and variance field - {n_clusters}, {config['means'].shape[0]}, \"\n f\"{config['variances'].shape[0]}.\")\n cluster_dimensionality = config['means'].shape[1]\n if config['variances'].shape[1] != cluster_dimensionality or config['variances'].shape[2] != cluster_dimensionality:\n errors.append(\n f\"Clusters differ in mean, and variance field - {cluster_dimensionality}, {config['variances'].shape[1:]}.\")\n if len(config['layers']) < 3:\n errors.append(\n f\"Ensure to have at least 3 layers.\")\n if config['layer'] >= len(config['layers']):\n errors.append(\n f\"Layer index out of range.\")\n elif config['layers'][config['layer']] != 2:\n errors.append(\n f\"Selected layer does not have specified dimensionality (2).\")\n if config['layers'][0] != config['layers'][-1]:\n errors.append(\n f\"Input and output layer dimensionality differs.\")\n for index, layer in enumerate(config['layers']):\n if layer < 1:\n errors.append(\n f\"Layer {index} has invalid dimensionality - {layer}.\")\n for key in ['layer', 'batch_size', 'iterations', 'epsilon', 'beta1', 'beta2', 'eta']:\n if config[key] < 0:\n errors.append(\n f\"Invalid option for {key} - {config[key]}.\")\n return errors\n"
] | [
[
"numpy.array"
]
] |
ASMDS/PATREC | [
"091df6ec20e0736340a2b2ff9a25ac81bec48259"
] | [
"utils/Dataset.py"
] | [
"\nimport pandas as pd\nimport numpy as np\nfrom utils.DatasetFilter import DatasetFilter\nfrom utils.DatasetSplitter import DatasetSplitter\n\n\nclass Dataset:\n \n def __init__(self, dataset_options):\n self.options = dataset_options;\n self.df = None;\n self.columns_df = None;\n self.data = None;\n self.columns_data = None;\n return;\n\n\n # maybe stop creating separate files for filtered datasets and just create the df on the fly\n def _filterData(self):\n diseases = self.options.getDiseaseNames();\n filter = DatasetFilter(self.options)\n options_filtering = self.options.getOptionsFiltering();\n if options_filtering in diseases:\n self.df = filter.filterDataDisease()\n elif options_filtering.split(\"_\")[0] in self.options.getCategoricalFeatures() and not self.options.getEncodingScheme() == 'categorical':\n self.df = filter.filterCategoricalColumn(options_filtering)\n else:\n self.df = filter.filterDataBinaryColumns(options_filtering)\n\n\n def _getDf(self):\n if self.options.getOptionsFiltering() is not None:\n self._filterData();\n else:\n filename = self.options.getFilename()\n df = pd.read_csv(filename);\n self.df = df;\n \n\n def _getColumnsDf(self):\n cols = list(self.df.columns);\n self.columns_df = cols;\n\n def _getColumnsData(self):\n if self.data is None:\n self._getData();\n cols = list(self.data.columns);\n self.columns_data = cols;\n\n\n def _removeNotNeededColumns(self):\n not_needed_columns = self.options.getColumnsToRemove();\n columns_data = list(self.data.columns);\n for col in not_needed_columns:\n if col in columns_data:\n try:\n self.data = self.data.drop(col, axis=1);\n except ValueError or KeyError:\n pass;\n\n\n def _normalizeNumericalColumns(self):\n if self.columns_data is None:\n self._getColumnsData();\n for feat in self.columns_data:\n max_value = self.data[feat].max()\n min_value = self.data[feat].min()\n if not max_value == min_value:\n self.data[feat] = (self.data[feat] - min_value) / (max_value - min_value)\n\n\n def _getData(self):\n if self.df is None:\n self._getDf();\n self.data = self.df.copy();\n self.data = self.data.fillna(0.0);\n self._removeNotNeededColumns();\n if self.options.getEncodingScheme() == 'categorical':\n self._normalizeNumericalColumns();\n\n\n def _splitData(self):\n if self.data is None:\n self.getData();\n\n early_readmission_flagname = self.options.getEarlyReadmissionFlagname();\n df_pos = self.data.loc[self.data[early_readmission_flagname] == 1]\n df_neg = self.data.loc[self.data[early_readmission_flagname] == 0]\n df_pos = df_pos.sample(frac=1);\n df_neg = df_neg.sample(frac=1);\n return [df_pos, df_neg];\n\n\n def _getBalancedSubset(self):\n [df_pos, df_neg] = self._splitData();\n num_pos_samples = df_pos.shape[0];\n num_neg_samples = df_neg.shape[0];\n min_num_samples = int(np.min([num_pos_samples, num_neg_samples]));\n df_pos_balanced = df_pos[:min_num_samples];\n df_neg_balanced = df_neg[:min_num_samples];\n return [df_pos_balanced, df_neg_balanced];\n\n\n def _getTrainingTesting(self):\n ratio_training_samples = self.options.getRatioTrainingSamples();\n\n [df_pos, df_neg] = self._splitData();\n num_pos_samples = df_pos.shape[0];\n num_pos_samples_training = int(round(ratio_training_samples * num_pos_samples));\n num_pos_samples_testing = num_pos_samples - num_pos_samples_training;\n\n df_pos_training = df_pos.iloc[:num_pos_samples_training, :];\n df_pos_testing = df_pos.iloc[num_pos_samples_training:, :];\n print('df_pos_training: ' + str(df_pos_training.shape))\n print('df_pos_testing: ' + str(df_pos_testing.shape))\n df_neg_testing = df_neg.iloc[:num_pos_samples_testing, :];\n df_neg_training = df_neg.iloc[num_pos_samples_testing:, :];\n print('df_neg_training: ' + str(df_neg_training.shape))\n print('df_neg_testing: ' + str(df_neg_testing.shape))\n training = [df_pos_training, df_neg_training];\n testing = [df_pos_testing, df_neg_testing];\n return [training, testing]\n\n\n def getColumnsDf(self):\n if self.df is None:\n self._getDf();\n if self.columns_df is None:\n self._getColumnsDf();\n return self.columns_df;\n\n\n def getColumnsData(self):\n if self.data is None:\n self._getData();\n if self.columns_data is None:\n self._getColumnsData();\n return self.columns_data;\n\n\n def getDf(self):\n if self.df is None:\n self._getDf();\n return self.df;\n\n\n def getData(self):\n if self.data is None:\n self._getData();\n return self.data;\n\n\n def getFilename(self, filteroptions=False):\n return self.options.getFilename(filteroptions);\n\n\n def getFilenameOptions(self, filteroptions=False):\n return self.options.getFilenameOptions(filteroptions);\n\n\n def getBalancedSubsetTrainingAndTesting(self):\n [df_pos, df_neg] = self._getBalancedSubset();\n ratio_training_samples = self.options.getRatioTrainingSamples();\n num_pos_samples = df_pos.shape[0];\n num_pos_samples_training = int(round(ratio_training_samples * num_pos_samples));\n num_pos_samples_testing = num_pos_samples - num_pos_samples_training;\n\n df_pos_training = df_pos.iloc[:num_pos_samples_training, :];\n df_neg_training = df_neg.iloc[:num_pos_samples_training, :];\n df_pos_testing = df_pos.iloc[-num_pos_samples_testing:, :];\n df_neg_testing = df_neg.iloc[-num_pos_samples_testing:, :];\n\n df_balanced_training = df_pos_training.append(df_neg_training);\n df_balanced_training = df_balanced_training.sample(frac=1);\n df_balanced_testing = df_pos_testing.append(df_neg_testing);\n df_balanced_testing = df_balanced_testing.sample(frac=1);\n\n return [df_balanced_training, df_balanced_testing];\n\n\n def getTrainingAndTestingSet(self):\n [training, testing] = self._getTrainingTesting();\n return [training, testing]\n\n\n def getBalancedSubSet(self):\n [df_pos, df_neg] = self._getBalancedSubset();\n df_balanced = df_pos.append(df_neg);\n df_balanced = df_balanced.sample(frac=1);\n return df_balanced;\n\n\n def splitDatasetIntoTrainingTestingSet(self):\n datasplitter = DatasetSplitter(self.options)\n datasplitter.splitDatasetIntoTrainingTesting();\n\n\n def getNumSamplesBalancedSubset(self):\n [df_pos, df_neg] = self._getBalancedSubset();\n df_balanced = df_pos.append(df_neg);\n num_samples = df_balanced.shape[0];\n return num_samples;\n\n def getNumSamples(self):\n if self.df is None:\n self._getDf();\n num_samples = self.df.shape[0];\n return num_samples;\n\n"
] | [
[
"pandas.read_csv",
"numpy.min"
]
] |
shiningsunnyday/ssd_keras | [
"3a2ea0ceaa676f59b1787ad5f5e8337520c7c056"
] | [
"data_generator/object_detection_2d_data_generator_custom.py"
] | [
"'''\nA data generator for 2D object detection.\n\nCopyright (C) 2018 Pierluigi Ferrari\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\nfrom __future__ import division\nimport numpy as np\nimport inspect\nfrom collections import defaultdict\nimport warnings\nimport sklearn.utils\nfrom copy import deepcopy\nfrom PIL import Image\nimport cv2\nimport csv\nimport os\nimport sys\nfrom tqdm import tqdm, trange\ntry:\n import h5py\nexcept ImportError:\n warnings.warn(\"'h5py' module is missing. The fast HDF5 dataset option will be unavailable.\")\ntry:\n import json\nexcept ImportError:\n warnings.warn(\"'json' module is missing. The JSON-parser will be unavailable.\")\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n warnings.warn(\"'BeautifulSoup' module is missing. The XML-parser will be unavailable.\")\ntry:\n import pickle\nexcept ImportError:\n warnings.warn(\"'pickle' module is missing. You won't be able to save parsed file lists and annotations as pickled files.\")\n\nfrom ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\nfrom data_generator.object_detection_2d_image_boxes_validation_utils import BoxFilter\n\nclass DegenerateBatchError(Exception):\n '''\n An exception class to be raised if a generated batch ends up being degenerate,\n e.g. if a generated batch is empty.\n '''\n pass\n\nclass DatasetError(Exception):\n '''\n An exception class to be raised if a anything is wrong with the dataset,\n in particular if you try to generate batches when no dataset was loaded.\n '''\n pass\n\nimport pdb\n\nclass DataGenerator:\n '''\n A generator to generate batches of samples and corresponding labels indefinitely.\n\n Can shuffle the dataset consistently after each complete pass.\n\n Currently provides three methods to parse annotation data: A general-purpose CSV parser,\n an XML parser for the Pascal VOC datasets, and a JSON parser for the MS COCO datasets.\n If the annotations of your dataset are in a format that is not supported by these parsers,\n you could just add another parser method and still use this generator.\n\n Can perform image transformations for data conversion and data augmentation,\n for details please refer to the documentation of the `generate()` method.\n '''\n\n def __init__(self,\n load_images_into_memory=False,\n hdf5_dataset_path=None,\n filenames=None,\n filenames_type='text',\n images_dir=None,\n labels=None,\n image_ids=None,\n eval_neutral=None,\n labels_output_format=('class_id', 'xmin', 'ymin', 'xmax', 'ymax'),\n verbose=True):\n self.class_counts = None\n '''\n Initializes the data generator. You can either load a dataset directly here in the constructor,\n e.g. an HDF5 dataset, or you can use one of the parser methods to read in a dataset.\n\n Arguments:\n load_images_into_memory (bool, optional): If `True`, the entire dataset will be loaded into memory.\n This enables noticeably faster data generation than loading batches of images into memory ad hoc.\n Be sure that you have enough memory before you activate this option.\n hdf5_dataset_path (str, optional): The full file path of an HDF5 file that contains a dataset in the\n format that the `create_hdf5_dataset()` method produces. If you load such an HDF5 dataset, you\n don't need to use any of the parser methods anymore, the HDF5 dataset already contains all relevant\n data.\n filenames (string or list, optional): `None` or either a Python list/tuple or a string representing\n a filepath. If a list/tuple is passed, it must contain the file names (full paths) of the\n images to be used. Note that the list/tuple must contain the paths to the images,\n not the images themselves. If a filepath string is passed, it must point either to\n (1) a pickled file containing a list/tuple as described above. In this case the `filenames_type`\n argument must be set to `pickle`.\n Or\n (2) a text file. Each line of the text file contains the file name (basename of the file only,\n not the full directory path) to one image and nothing else. In this case the `filenames_type`\n argument must be set to `text` and you must pass the path to the directory that contains the\n images in `images_dir`.\n filenames_type (string, optional): In case a string is passed for `filenames`, this indicates what\n type of file `filenames` is. It can be either 'pickle' for a pickled file or 'text' for a\n plain text file.\n images_dir (string, optional): In case a text file is passed for `filenames`, the full paths to\n the images will be composed from `images_dir` and the names in the text file, i.e. this\n should be the directory that contains the images to which the text file refers.\n If `filenames_type` is not 'text', then this argument is irrelevant.\n labels (string or list, optional): `None` or either a Python list/tuple or a string representing\n the path to a pickled file containing a list/tuple. The list/tuple must contain Numpy arrays\n that represent the labels of the dataset.\n image_ids (string or list, optional): `None` or either a Python list/tuple or a string representing\n the path to a pickled file containing a list/tuple. The list/tuple must contain the image\n IDs of the images in the dataset.\n eval_neutral (string or list, optional): `None` or either a Python list/tuple or a string representing\n the path to a pickled file containing a list/tuple. The list/tuple must contain for each image\n a list that indicates for each ground truth object in the image whether that object is supposed\n to be treated as neutral during an evaluation.\n labels_output_format (list, optional): A list of five strings representing the desired order of the five\n items class ID, xmin, ymin, xmax, ymax in the generated ground truth data (if any). The expected\n strings are 'xmin', 'ymin', 'xmax', 'ymax', 'class_id'.\n verbose (bool, optional): If `True`, prints out the progress for some constructor operations that may\n take a bit longer.\n '''\n self.labels_output_format = labels_output_format\n self.labels_format={'class_id': labels_output_format.index('class_id'),\n 'xmin': labels_output_format.index('xmin'),\n 'ymin': labels_output_format.index('ymin'),\n 'xmax': labels_output_format.index('xmax'),\n 'ymax': labels_output_format.index('ymax')} # This dictionary is for internal use.\n\n self.dataset_size = 0 # As long as we haven't loaded anything yet, the dataset size is zero.\n self.load_images_into_memory = load_images_into_memory\n self.images = None # The only way that this list will not stay `None` is if `load_images_into_memory == True`.\n\n # `self.filenames` is a list containing all file names of the image samples (full paths).\n # Note that it does not contain the actual image files themselves. This list is one of the outputs of the parser methods.\n # In case you are loading an HDF5 dataset, this list will be `None`.\n if not filenames is None:\n if isinstance(filenames, (list, tuple)):\n self.filenames = filenames\n elif isinstance(filenames, str):\n with open(filenames, 'rb') as f:\n if filenames_type == 'pickle':\n self.filenames = pickle.load(f)\n elif filenames_type == 'text':\n self.filenames = [os.path.join(images_dir, line.strip()) for line in f]\n else:\n raise ValueError(\"`filenames_type` can be either 'text' or 'pickle'.\")\n else:\n raise ValueError(\"`filenames` must be either a Python list/tuple or a string representing a filepath (to a pickled or text file). The value you passed is neither of the two.\")\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n else:\n self.filenames = None\n\n # In case ground truth is available, `self.labels` is a list containing for each image a list (or NumPy array)\n # of ground truth bounding boxes for that image.\n if not labels is None:\n if isinstance(labels, str):\n with open(labels, 'rb') as f:\n self.labels = pickle.load(f)\n elif isinstance(labels, (list, tuple)):\n self.labels = labels\n else:\n raise ValueError(\"`labels` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.\")\n else:\n self.labels = None\n\n if not image_ids is None:\n if isinstance(image_ids, str):\n with open(image_ids, 'rb') as f:\n self.image_ids = pickle.load(f)\n elif isinstance(image_ids, (list, tuple)):\n self.image_ids = image_ids\n else:\n raise ValueError(\"`image_ids` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.\")\n else:\n self.image_ids = None\n\n if not eval_neutral is None:\n if isinstance(eval_neutral, str):\n with open(eval_neutral, 'rb') as f:\n self.eval_neutral = pickle.load(f)\n elif isinstance(eval_neutral, (list, tuple)):\n self.eval_neutral = eval_neutral\n else:\n raise ValueError(\"`image_ids` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.\")\n else:\n self.eval_neutral = None\n\n if not hdf5_dataset_path is None:\n self.hdf5_dataset_path = hdf5_dataset_path\n self.load_hdf5_dataset(verbose=verbose)\n else:\n self.hdf5_dataset = None\n\n\n def load_hdf5_dataset(self, verbose=True):\n '''\n Loads an HDF5 dataset that is in the format that the `create_hdf5_dataset()` method\n produces.\n\n Arguments:\n verbose (bool, optional): If `True`, prints out the progress while loading\n the dataset.\n\n Returns:\n None.\n '''\n\n self.hdf5_dataset = h5py.File(self.hdf5_dataset_path, 'r')\n self.dataset_size = len(self.hdf5_dataset['images'])\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32) # Instead of shuffling the HDF5 dataset or images in memory, we will shuffle this index list.\n\n if self.load_images_into_memory:\n self.images = []\n if verbose: tr = trange(self.dataset_size, desc='Loading images into memory', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.images.append(self.hdf5_dataset['images'][i].reshape(self.hdf5_dataset['image_shapes'][i]))\n\n if self.hdf5_dataset.attrs['has_labels']:\n self.labels = []\n labels = self.hdf5_dataset['labels']\n label_shapes = self.hdf5_dataset['label_shapes']\n if verbose: tr = trange(self.dataset_size, desc='Loading labels', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.labels.append(labels[i].reshape(label_shapes[i]))\n\n if self.hdf5_dataset.attrs['has_image_ids']:\n self.image_ids = []\n image_ids = self.hdf5_dataset['image_ids']\n if verbose: tr = trange(self.dataset_size, desc='Loading image IDs', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.image_ids.append(image_ids[i])\n\n if self.hdf5_dataset.attrs['has_eval_neutral']:\n self.eval_neutral = []\n eval_neutral = self.hdf5_dataset['eval_neutral']\n if verbose: tr = trange(self.dataset_size, desc='Loading evaluation-neutrality annotations', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.eval_neutral.append(eval_neutral[i])\n\n def parse_csv(self,\n images_dir,\n labels_filename,\n input_format,\n include_classes='all',\n random_sample=False,\n ret=False,\n verbose=True):\n '''\n Arguments:\n images_dir (str): The path to the directory that contains the images.\n labels_filename (str): The filepath to a CSV file that contains one ground truth bounding box per line\n and each line contains the following six items: image file name, class ID, xmin, xmax, ymin, ymax.\n The six items do not have to be in a specific order, but they must be the first six columns of\n each line. The order of these items in the CSV file must be specified in `input_format`.\n The class ID is an integer greater than zero. Class ID 0 is reserved for the background class.\n `xmin` and `xmax` are the left-most and right-most absolute horizontal coordinates of the box,\n `ymin` and `ymax` are the top-most and bottom-most absolute vertical coordinates of the box.\n The image name is expected to be just the name of the image file without the directory path\n at which the image is located.\n input_format (list): A list of six strings representing the order of the six items\n image file name, class ID, xmin, xmax, ymin, ymax in the input CSV file. The expected strings\n are 'image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'.\n include_classes (list, optional): Either 'all' or a list of integers containing the class IDs that\n are to be included in the dataset. If 'all', all ground truth boxes will be included in the dataset.\n random_sample (float, optional): Either `False` or a float in `[0,1]`. If this is `False`, the\n full dataset will be used by the generator. If this is a float in `[0,1]`, a randomly sampled\n fraction of the dataset will be used, where `random_sample` is the fraction of the dataset\n to be used. For example, if `random_sample = 0.2`, 20 precent of the dataset will be randomly selected,\n the rest will be ommitted. The fraction refers to the number of images, not to the number\n of boxes, i.e. each image that will be added to the dataset will always be added with all\n of its boxes.\n ret (bool, optional): Whether or not to return the outputs of the parser.\n verbose (bool, optional): If `True`, prints out the progress for operations that may take a bit longer.\n\n Returns:\n None by default, optionally lists for whichever are available of images, image filenames, labels, and image IDs.\n '''\n\n # Set class members.\n self.images_dir = images_dir\n self.labels_filename = labels_filename\n self.input_format = input_format\n self.include_classes = include_classes\n\n # Before we begin, make sure that we have a labels_filename and an input_format\n if self.labels_filename is None or self.input_format is None:\n raise ValueError(\"`labels_filename` and/or `input_format` have not been set yet. You need to pass them as arguments.\")\n\n # Erase data that might have been parsed before\n self.filenames = []\n self.image_ids = []\n self.labels = []\n\n # First, just read in the CSV file lines and sort them.\n\n data = []\n\n with open(self.labels_filename,mode='r') as csvfile:\n csvread = csv.reader(csvfile, delimiter=',')\n next(csvread) # Skip the header row.\n for row in csvread: # For every line (i.e for every bounding box) in the CSV file...\n \n if self.include_classes == 'all' or int(row[self.input_format.index('class_id')].strip()) in self.include_classes: # If the class_id is among the classes that are to be included in the dataset...\n box = [] # Store the box class and coordinates here\n box.append(row[self.input_format.index('image_name')].strip()) # Select the image name column in the input format and append its content to `box`\n for element in self.labels_output_format: # For each element in the output format (where the elements are the class ID and the four box coordinates)...\n box.append(int(row[self.input_format.index(element)].strip())) # ...select the respective column in the input format and append it to `box`.\n data.append(box)\n\n data = sorted(data) # The data needs to be sorted, otherwise the next step won't give the correct result\n\n # Now that we've made sure that the data is sorted by file names,\n # we can compile the actual samples and labels lists\n\n current_file = data[0][0] # The current image for which we're collecting the ground truth boxes\n current_image_id = data[0][0].split('.')[0] # The image ID will be the portion of the image name before the first dot.\n current_labels = [] # The list where we collect all ground truth boxes for a given image\n add_to_dataset = False\n for i, box in enumerate(data):\n\n if box[0] == current_file: # If this box (i.e. this line of the CSV file) belongs to the current image file\n current_labels.append(box[1:])\n if i == len(data)-1: # If this is the last line of the CSV file\n if random_sample: # In case we're not using the full dataset, but a random sample of it.\n p = np.random.uniform(0,1)\n if p >= (1-random_sample):\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else:\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else: # If this box belongs to a new image file\n if random_sample: # In case we're not using the full dataset, but a random sample of it.\n p = np.random.uniform(0,1)\n if p >= (1-random_sample):\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else:\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n current_labels = [] # Reset the labels list because this is a new file.\n current_file = box[0]\n current_image_id = box[0].split('.')[0]\n current_labels.append(box[1:])\n if i == len(data)-1: # If this is the last line of the CSV file\n if random_sample: # In case we're not using the full dataset, but a random sample of it.\n p = np.random.uniform(0,1)\n if p >= (1-random_sample):\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else:\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if self.load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n\n if ret: # In case we want to return these\n return self.images, self.filenames, self.labels, self.image_ids\n\n def parse_xml(self,\n images_dirs,\n image_set_filenames,\n annotations_dirs=[],\n classes=['background',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat',\n 'chair', 'cow', 'diningtable', 'dog',\n 'horse', 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor'],\n include_classes = 'all',\n exclude_truncated=False,\n exclude_difficult=False,\n ret=False,\n verbose=True):\n self.class_counts = np.zeros(len(classes))\n '''\n This is an XML parser for the Pascal VOC datasets. It might be applicable to other datasets with minor changes to\n the code, but in its current form it expects the data format and XML tags of the Pascal VOC datasets.\n\n Arguments:\n images_dirs (list): A list of strings, where each string is the path of a directory that\n contains images that are to be part of the dataset. This allows you to aggregate multiple datasets\n into one (e.g. one directory that contains the images for Pascal VOC 2007, another that contains\n the images for Pascal VOC 2012, etc.).\n image_set_filenames (list): A list of strings, where each string is the path of the text file with the image\n set to be loaded. Must be one file per image directory given. These text files define what images in the\n respective image directories are to be part of the dataset and simply contains one image ID per line\n and nothing else.\n annotations_dirs (list, optional): A list of strings, where each string is the path of a directory that\n contains the annotations (XML files) that belong to the images in the respective image directories given.\n The directories must contain one XML file per image and the name of an XML file must be the image ID\n of the image it belongs to. The content of the XML files must be in the Pascal VOC format.\n classes (list, optional): A list containing the names of the object classes as found in the\n `name` XML tags. Must include the class `background` as the first list item. The order of this list\n defines the class IDs.\n include_classes (list, optional): Either 'all' or a list of integers containing the class IDs that\n are to be included in the dataset. If 'all', all ground truth boxes will be included in the dataset.\n exclude_truncated (bool, optional): If `True`, excludes boxes that are labeled as 'truncated'.\n exclude_difficult (bool, optional): If `True`, excludes boxes that are labeled as 'difficult'.\n ret (bool, optional): Whether or not to return the outputs of the parser.\n verbose (bool, optional): If `True`, prints out the progress for operations that may take a bit longer.\n\n Returns:\n None by default, optionally lists for whichever are available of images, image filenames, labels, image IDs,\n and a list indicating which boxes are annotated with the label \"difficult\".\n '''\n # Set class members.\n self.images_dirs = images_dirs\n self.annotations_dirs = annotations_dirs\n self.image_set_filenames = image_set_filenames\n self.classes = classes\n self.include_classes = include_classes\n\n # Erase data that might have been parsed before.\n self.filenames = []\n self.image_ids = []\n self.labels = []\n self.eval_neutral = []\n if not annotations_dirs:\n self.labels = None\n self.eval_neutral = None\n annotations_dirs = [None] * len(images_dirs)\n\n for images_dir, image_set_filename, annotations_dir in zip(images_dirs, image_set_filenames, annotations_dirs):\n # Read the image set file that so that we know all the IDs of all the images to be included in the dataset.\n with open(image_set_filename) as f:\n image_ids = [line.strip() for line in f] # Note: These are strings, not integers.\n self.image_ids += image_ids\n\n if verbose: it = tqdm(image_ids, desc=\"Processing image set '{}'\".format(os.path.basename(image_set_filename)), file=sys.stdout)\n else: it = image_ids\n\n # Loop over all images in this dataset.\n for image_id in it:\n\n filename = '{}'.format(image_id) + '.jpg'\n self.filenames.append(os.path.join(images_dir, filename))\n\n if not annotations_dir is None:\n # Parse the XML file for this image.\n with open(os.path.join(annotations_dir, image_id + '.xml')) as f:\n soup = BeautifulSoup(f, 'xml')\n\n folder = soup.folder.text # In case we want to return the folder in addition to the image file name. Relevant for determining which dataset an image belongs to.\n #filename = soup.filename.text\n\n boxes = [] # We'll store all boxes for this image here.\n eval_neutr = [] # We'll store whether a box is annotated as \"difficult\" here.\n objects = soup.find_all('object') # Get a list of all objects in this image.\n\n # Parse the data for each object.\n for obj in objects:\n class_name = obj.find('name', recursive=False).text\n try:\n class_id = self.classes.index(class_name)\n class_name = self.classes[class_id]\n except ValueError:\n class_id = -1\n for c in classes:\n if class_name.find(c) > -1:\n class_id = self.classes.index(c)\n class_name = self.classes[class_id]\n if class_id < 0:\n class_id = 0;\n class_name = '0samples'\n\n self.class_counts[class_id] += 1\n # Check whether this class is supposed to be included in the dataset.\n if (not self.include_classes == 'all') and (not class_id in self.include_classes): continue\n pose = obj.find('pose', recursive=False).text\n truncated = int(obj.find('truncated', recursive=False).text)\n if exclude_truncated and (truncated == 1): continue\n difficult = int(obj.find('difficult', recursive=False).text)\n if exclude_difficult and (difficult == 1): continue\n # Get the bounding box coordinates.\n bndbox = obj.find('bndbox', recursive=False)\n xmin = int(bndbox.xmin.text)\n ymin = int(bndbox.ymin.text)\n xmax = int(bndbox.xmax.text)\n ymax = int(bndbox.ymax.text)\n item_dict = {'folder': folder,\n 'image_name': filename,\n 'image_id': image_id,\n 'class_name': class_name,\n 'class_id': class_id,\n 'pose': pose,\n 'truncated': truncated,\n 'difficult': difficult,\n 'xmin': xmin,\n 'ymin': ymin,\n 'xmax': xmax,\n 'ymax': ymax}\n box = []\n for item in self.labels_output_format:\n box.append(item_dict[item])\n boxes.append(box)\n if difficult: eval_neutr.append(True)\n else: eval_neutr.append(False)\n\n self.labels.append(boxes)\n self.eval_neutral.append(eval_neutr)\n\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if self.load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n\n if ret:\n return self.images, self.filenames, self.labels, self.image_ids, self.eval_neutral\n\n def parse_json(self,\n images_dirs,\n annotations_filenames,\n ground_truth_available=False,\n include_classes='all',\n ret=False,\n verbose=True):\n '''\n This is an JSON parser for the MS COCO datasets. It might be applicable to other datasets with minor changes to\n the code, but in its current form it expects the JSON format of the MS COCO datasets.\n\n Arguments:\n images_dirs (list, optional): A list of strings, where each string is the path of a directory that\n contains images that are to be part of the dataset. This allows you to aggregate multiple datasets\n into one (e.g. one directory that contains the images for MS COCO Train 2014, another one for MS COCO\n Val 2014, another one for MS COCO Train 2017 etc.).\n annotations_filenames (list): A list of strings, where each string is the path of the JSON file\n that contains the annotations for the images in the respective image directories given, i.e. one\n JSON file per image directory that contains the annotations for all images in that directory.\n The content of the JSON files must be in MS COCO object detection format. Note that these annotations\n files do not necessarily need to contain ground truth information. MS COCO also provides annotations\n files without ground truth information for the test datasets, called `image_info_[...].json`.\n ground_truth_available (bool, optional): Set `True` if the annotations files contain ground truth information.\n include_classes (list, optional): Either 'all' or a list of integers containing the class IDs that\n are to be included in the dataset. If 'all', all ground truth boxes will be included in the dataset.\n ret (bool, optional): Whether or not to return the outputs of the parser.\n verbose (bool, optional): If `True`, prints out the progress for operations that may take a bit longer.\n\n Returns:\n None by default, optionally lists for whichever are available of images, image filenames, labels and image IDs.\n '''\n self.images_dirs = images_dirs\n self.annotations_filenames = annotations_filenames\n self.include_classes = include_classes\n # Erase data that might have been parsed before.\n self.filenames = []\n self.image_ids = []\n self.labels = []\n if not ground_truth_available:\n self.labels = None\n\n # Build the dictionaries that map between class names and class IDs.\n with open(annotations_filenames[0], 'r') as f:\n annotations = json.load(f)\n # Unfortunately the 80 MS COCO class IDs are not all consecutive. They go\n # from 1 to 90 and some numbers are skipped. Since the IDs that we feed\n # into a neural network must be consecutive, we'll save both the original\n # (non-consecutive) IDs as well as transformed maps.\n # We'll save both the map between the original\n self.cats_to_names = {} # The map between class names (values) and their original IDs (keys)\n self.classes_to_names = [] # A list of the class names with their indices representing the transformed IDs\n self.classes_to_names.append('background') # Need to add the background class first so that the indexing is right.\n self.cats_to_classes = {} # A dictionary that maps between the original (keys) and the transformed IDs (values)\n self.classes_to_cats = {} # A dictionary that maps between the transformed (keys) and the original IDs (values)\n for i, cat in enumerate(annotations['categories']):\n self.cats_to_names[cat['id']] = cat['name']\n self.classes_to_names.append(cat['name'])\n self.cats_to_classes[cat['id']] = i + 1\n self.classes_to_cats[i + 1] = cat['id']\n\n # Iterate over all datasets.\n for images_dir, annotations_filename in zip(self.images_dirs, self.annotations_filenames):\n # Load the JSON file.\n with open(annotations_filename, 'r') as f:\n annotations = json.load(f)\n\n if ground_truth_available:\n # Create the annotations map, a dictionary whose keys are the image IDs\n # and whose values are the annotations for the respective image ID.\n image_ids_to_annotations = defaultdict(list)\n for annotation in annotations['annotations']:\n image_ids_to_annotations[annotation['image_id']].append(annotation)\n\n if verbose: it = tqdm(annotations['images'], desc=\"Processing '{}'\".format(os.path.basename(annotations_filename)), file=sys.stdout)\n else: it = annotations['images']\n\n # Loop over all images in this dataset.\n for img in it:\n\n self.filenames.append(os.path.join(images_dir, img['file_name']))\n self.image_ids.append(img['id'])\n\n if ground_truth_available:\n # Get all annotations for this image.\n annotations = image_ids_to_annotations[img['id']]\n boxes = []\n for annotation in annotations:\n cat_id = annotation['category_id']\n # Check if this class is supposed to be included in the dataset.\n if (not self.include_classes == 'all') and (not cat_id in self.include_classes): continue\n # Transform the original class ID to fit in the sequence of consecutive IDs.\n class_id = self.cats_to_classes[cat_id]\n xmin = annotation['bbox'][0]\n ymin = annotation['bbox'][1]\n width = annotation['bbox'][2]\n height = annotation['bbox'][3]\n # Compute `xmax` and `ymax`.\n xmax = xmin + width\n ymax = ymin + height\n item_dict = {'image_name': img['file_name'],\n 'image_id': img['id'],\n 'class_id': class_id,\n 'xmin': xmin,\n 'ymin': ymin,\n 'xmax': xmax,\n 'ymax': ymax}\n box = []\n for item in self.labels_output_format:\n box.append(item_dict[item])\n boxes.append(box)\n self.labels.append(boxes)\n\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if self.load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n\n if ret:\n return self.images, self.filenames, self.labels, self.image_ids\n\n def create_hdf5_dataset(self,\n file_path='dataset.h5',\n resize=False,\n variable_image_size=True,\n verbose=True):\n '''\n Converts the currently loaded dataset into a HDF5 file. This HDF5 file contains all\n images as uncompressed arrays in a contiguous block of memory, which allows for them\n to be loaded faster. Such an uncompressed dataset, however, may take up considerably\n more space on your hard drive than the sum of the source images in a compressed format\n such as JPG or PNG.\n\n It is recommended that you always convert the dataset into an HDF5 dataset if you\n have enugh hard drive space since loading from an HDF5 dataset accelerates the data\n generation noticeably.\n\n Note that you must load a dataset (e.g. via one of the parser methods) before creating\n an HDF5 dataset from it.\n\n The created HDF5 dataset will remain open upon its creation so that it can be used right\n away.\n\n Arguments:\n file_path (str, optional): The full file path under which to store the HDF5 dataset.\n You can load this output file via the `DataGenerator` constructor in the future.\n resize (tuple, optional): `False` or a 2-tuple `(height, width)` that represents the\n target size for the images. All images in the dataset will be resized to this\n target size before they will be written to the HDF5 file. If `False`, no resizing\n will be performed.\n variable_image_size (bool, optional): The only purpose of this argument is that its\n value will be stored in the HDF5 dataset in order to be able to quickly find out\n whether the images in the dataset all have the same size or not.\n verbose (bool, optional): Whether or not prit out the progress of the dataset creation.\n\n Returns:\n None.\n '''\n\n self.hdf5_dataset_path = file_path\n\n dataset_size = len(self.filenames)\n\n # Create the HDF5 file.\n hdf5_dataset = h5py.File(file_path, 'w')\n\n # Create a few attributes that tell us what this dataset contains.\n # The dataset will obviously always contain images, but maybe it will\n # also contain labels, image IDs, etc.\n hdf5_dataset.attrs.create(name='has_labels', data=False, shape=None, dtype=np.bool_)\n hdf5_dataset.attrs.create(name='has_image_ids', data=False, shape=None, dtype=np.bool_)\n hdf5_dataset.attrs.create(name='has_eval_neutral', data=False, shape=None, dtype=np.bool_)\n # It's useful to be able to quickly check whether the images in a dataset all\n # have the same size or not, so add a boolean attribute for that.\n if variable_image_size and not resize:\n hdf5_dataset.attrs.create(name='variable_image_size', data=True, shape=None, dtype=np.bool_)\n else:\n hdf5_dataset.attrs.create(name='variable_image_size', data=False, shape=None, dtype=np.bool_)\n\n # Create the dataset in which the images will be stored as flattened arrays.\n # This allows us, among other things, to store images of variable size.\n hdf5_images = hdf5_dataset.create_dataset(name='images',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=np.uint8))\n\n # Create the dataset that will hold the image heights, widths and channels that\n # we need in order to reconstruct the images from the flattened arrays later.\n hdf5_image_shapes = hdf5_dataset.create_dataset(name='image_shapes',\n shape=(dataset_size, 3),\n maxshape=(None, 3),\n dtype=np.int32)\n\n if not (self.labels is None):\n\n # Create the dataset in which the labels will be stored as flattened arrays.\n hdf5_labels = hdf5_dataset.create_dataset(name='labels',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=np.int32))\n\n # Create the dataset that will hold the dimensions of the labels arrays for\n # each image so that we can restore the labels from the flattened arrays later.\n hdf5_label_shapes = hdf5_dataset.create_dataset(name='label_shapes',\n shape=(dataset_size, 2),\n maxshape=(None, 2),\n dtype=np.int32)\n\n hdf5_dataset.attrs.modify(name='has_labels', value=True)\n\n if not (self.image_ids is None):\n\n hdf5_image_ids = hdf5_dataset.create_dataset(name='image_ids',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=str))\n\n hdf5_dataset.attrs.modify(name='has_image_ids', value=True)\n\n if not (self.eval_neutral is None):\n\n # Create the dataset in which the labels will be stored as flattened arrays.\n hdf5_eval_neutral = hdf5_dataset.create_dataset(name='eval_neutral',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=np.bool_))\n\n hdf5_dataset.attrs.modify(name='has_eval_neutral', value=True)\n\n if verbose:\n tr = trange(dataset_size, desc='Creating HDF5 dataset', file=sys.stdout)\n else:\n tr = range(dataset_size)\n\n # Iterate over all images in the dataset.\n for i in tr:\n\n # Store the image.\n with Image.open(self.filenames[i]) as image:\n\n image = np.asarray(image, dtype=np.uint8)\n\n # Make sure all images end up having three channels.\n if image.ndim == 2:\n image = np.stack([image] * 3, axis=-1)\n elif image.ndim == 3:\n if image.shape[2] == 1:\n image = np.concatenate([image] * 3, axis=-1)\n elif image.shape[2] == 4:\n image = image[:,:,:3]\n\n if resize:\n image = cv2.resize(image, dsize=(resize[1], resize[0]))\n\n # Flatten the image array and write it to the images dataset.\n hdf5_images[i] = image.reshape(-1)\n # Write the image's shape to the image shapes dataset.\n hdf5_image_shapes[i] = image.shape\n\n # Store the ground truth if we have any.\n if not (self.labels is None):\n\n labels = np.asarray(self.labels[i])\n # Flatten the labels array and write it to the labels dataset.\n hdf5_labels[i] = labels.reshape(-1)\n # Write the labels' shape to the label shapes dataset.\n hdf5_label_shapes[i] = labels.shape\n\n # Store the image ID if we have one.\n if not (self.image_ids is None):\n\n hdf5_image_ids[i] = self.image_ids[i]\n\n # Store the evaluation-neutrality annotations if we have any.\n if not (self.eval_neutral is None):\n\n hdf5_eval_neutral[i] = self.eval_neutral[i]\n\n hdf5_dataset.close()\n self.hdf5_dataset = h5py.File(file_path, 'r')\n self.hdf5_dataset_path = file_path\n self.dataset_size = len(self.hdf5_dataset['images'])\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32) # Instead of shuffling the HDF5 dataset, we will shuffle this index list.\n\n def generate(self,\n batch_size=32,\n shuffle=True,\n transformations=[],\n label_encoder=None,\n returns={'processed_images', 'encoded_labels'},\n keep_images_without_gt=False,\n degenerate_box_handling='remove'):\n '''\n Generates batches of samples and (optionally) corresponding labels indefinitely.\n\n Can shuffle the samples consistently after each complete pass.\n\n Optionally takes a list of arbitrary image transformations to apply to the\n samples ad hoc.\n\n Arguments:\n batch_size (int, optional): The size of the batches to be generated.\n shuffle (bool, optional): Whether or not to shuffle the dataset before each pass.\n This option should always be `True` during training, but it can be useful to turn shuffling off\n for debugging or if you're using the generator for prediction.\n transformations (list, optional): A list of transformations that will be applied to the images and labels\n in the given order. Each transformation is a callable that takes as input an image (as a Numpy array)\n and optionally labels (also as a Numpy array) and returns an image and optionally labels in the same\n format.\n label_encoder (callable, optional): Only relevant if labels are given. A callable that takes as input the\n labels of a batch (as a list of Numpy arrays) and returns some structure that represents those labels.\n The general use case for this is to convert labels from their input format to a format that a given object\n detection model needs as its training targets.\n returns (set, optional): A set of strings that determines what outputs the generator yields. The generator's output\n is always a tuple that contains the outputs specified in this set and only those. If an output is not available,\n it will be `None`. The output tuple can contain the following outputs according to the specified keyword strings:\n * 'processed_images': An array containing the processed images. Will always be in the outputs, so it doesn't\n matter whether or not you include this keyword in the set.\n * 'encoded_labels': The encoded labels tensor. Will always be in the outputs if a label encoder is given,\n so it doesn't matter whether or not you include this keyword in the set if you pass a label encoder.\n * 'matched_anchors': Only available if `labels_encoder` is an `SSDInputEncoder` object. The same as 'encoded_labels',\n but containing anchor box coordinates for all matched anchor boxes instead of ground truth coordinates.\n This can be useful to visualize what anchor boxes are being matched to each ground truth box. Only available\n in training mode.\n * 'processed_labels': The processed, but not yet encoded labels. This is a list that contains for each\n batch image a Numpy array with all ground truth boxes for that image. Only available if ground truth is available.\n * 'filenames': A list containing the file names (full paths) of the images in the batch.\n * 'image_ids': A list containing the integer IDs of the images in the batch. Only available if there\n are image IDs available.\n * 'evaluation-neutral': A nested list of lists of booleans. Each list contains `True` or `False` for every ground truth\n bounding box of the respective image depending on whether that bounding box is supposed to be evaluation-neutral (`True`)\n or not (`False`). May return `None` if there exists no such concept for a given dataset. An example for\n evaluation-neutrality are the ground truth boxes annotated as \"difficult\" in the Pascal VOC datasets, which are\n usually treated to be neutral in a model evaluation.\n * 'inverse_transform': A nested list that contains a list of \"inverter\" functions for each item in the batch.\n These inverter functions take (predicted) labels for an image as input and apply the inverse of the transformations\n that were applied to the original image to them. This makes it possible to let the model make predictions on a\n transformed image and then convert these predictions back to the original image. This is mostly relevant for\n evaluation: If you want to evaluate your model on a dataset with varying image sizes, then you are forced to\n transform the images somehow (e.g. by resizing or cropping) to make them all the same size. Your model will then\n predict boxes for those transformed images, but for the evaluation you will need predictions with respect to the\n original images, not with respect to the transformed images. This means you will have to transform the predicted\n box coordinates back to the original image sizes. Note that for each image, the inverter functions for that\n image need to be applied in the order in which they are given in the respective list for that image.\n * 'original_images': A list containing the original images in the batch before any processing.\n * 'original_labels': A list containing the original ground truth boxes for the images in this batch before any\n processing. Only available if ground truth is available.\n The order of the outputs in the tuple is the order of the list above. If `returns` contains a keyword for an\n output that is unavailable, that output omitted in the yielded tuples and a warning will be raised.\n keep_images_without_gt (bool, optional): If `False`, images for which there aren't any ground truth boxes before\n any transformations have been applied will be removed from the batch. If `True`, such images will be kept\n in the batch.\n degenerate_box_handling (str, optional): How to handle degenerate boxes, which are boxes that have `xmax <= xmin` and/or\n `ymax <= ymin`. Degenerate boxes can sometimes be in the dataset, or non-degenerate boxes can become degenerate\n after they were processed by transformations. Note that the generator checks for degenerate boxes after all\n transformations have been applied (if any), but before the labels were passed to the `label_encoder` (if one was given).\n Can be one of 'warn' or 'remove'. If 'warn', the generator will merely print a warning to let you know that there\n are degenerate boxes in a batch. If 'remove', the generator will remove degenerate boxes from the batch silently.\n\n Yields:\n The next batch as a tuple of items as defined by the `returns` argument.\n '''\n\n if self.dataset_size == 0:\n raise DatasetError(\"Cannot generate batches because you did not load a dataset.\")\n\n #############################################################################################\n # Warn if any of the set returns aren't possible.\n #############################################################################################\n\n if self.labels is None:\n if any([ret in returns for ret in ['original_labels', 'processed_labels', 'encoded_labels', 'matched_anchors', 'evaluation-neutral']]):\n warnings.warn(\"Since no labels were given, none of 'original_labels', 'processed_labels', 'evaluation-neutral', 'encoded_labels', and 'matched_anchors' \" +\n \"are possible returns, but you set `returns = {}`. The impossible returns will be `None`.\".format(returns))\n elif label_encoder is None:\n if any([ret in returns for ret in ['encoded_labels', 'matched_anchors']]):\n warnings.warn(\"Since no label encoder was given, 'encoded_labels' and 'matched_anchors' aren't possible returns, \" +\n \"but you set `returns = {}`. The impossible returns will be `None`.\".format(returns))\n elif not isinstance(label_encoder, SSDInputEncoder):\n if 'matched_anchors' in returns:\n warnings.warn(\"`label_encoder` is not an `SSDInputEncoder` object, therefore 'matched_anchors' is not a possible return, \" +\n \"but you set `returns = {}`. The impossible returns will be `None`.\".format(returns))\n\n #############################################################################################\n # Do a few preparatory things like maybe shuffling the dataset initially.\n #############################################################################################\n\n if shuffle:\n objects_to_shuffle = [self.dataset_indices]\n if not (self.filenames is None):\n objects_to_shuffle.append(self.filenames)\n if not (self.labels is None):\n objects_to_shuffle.append(self.labels)\n if not (self.image_ids is None):\n objects_to_shuffle.append(self.image_ids)\n if not (self.eval_neutral is None):\n objects_to_shuffle.append(self.eval_neutral)\n shuffled_objects = sklearn.utils.shuffle(*objects_to_shuffle)\n for i in range(len(objects_to_shuffle)):\n objects_to_shuffle[i][:] = shuffled_objects[i]\n\n if degenerate_box_handling == 'remove':\n box_filter = BoxFilter(check_overlap=False,\n check_min_area=False,\n check_degenerate=True,\n labels_format=self.labels_format)\n\n # Override the labels formats of all the transformations to make sure they are set correctly.\n if not (self.labels is None):\n for transform in transformations:\n transform.labels_format = self.labels_format\n\n #############################################################################################\n # Generate mini batches.\n #############################################################################################\n\n current = 0\n\n while True:\n\n batch_X, batch_y = [], []\n\n if current >= self.dataset_size:\n current = 0\n\n #########################################################################################\n # Maybe shuffle the dataset if a full pass over the dataset has finished.\n #########################################################################################\n\n if shuffle:\n objects_to_shuffle = [self.dataset_indices]\n if not (self.filenames is None):\n objects_to_shuffle.append(self.filenames)\n if not (self.labels is None):\n objects_to_shuffle.append(self.labels)\n if not (self.image_ids is None):\n objects_to_shuffle.append(self.image_ids)\n if not (self.eval_neutral is None):\n objects_to_shuffle.append(self.eval_neutral)\n shuffled_objects = sklearn.utils.shuffle(*objects_to_shuffle)\n for i in range(len(objects_to_shuffle)):\n objects_to_shuffle[i][:] = shuffled_objects[i]\n\n #########################################################################################\n # Get the images, (maybe) image IDs, (maybe) labels, etc. for this batch.\n #########################################################################################\n\n # We prioritize our options in the following order:\n # 1) If we have the images already loaded in memory, get them from there.\n # 2) Else, if we have an HDF5 dataset, get the images from there.\n # 3) Else, if we have neither of the above, we'll have to load the individual image\n # files from disk.\n batch_indices = self.dataset_indices[current:current+batch_size]\n if not (self.images is None):\n for i in batch_indices:\n batch_X.append(self.images[i])\n if not (self.filenames is None):\n batch_filenames = self.filenames[current:current+batch_size]\n else:\n batch_filenames = None\n elif not (self.hdf5_dataset is None):\n for i in batch_indices:\n batch_X.append(self.hdf5_dataset['images'][i].reshape(self.hdf5_dataset['image_shapes'][i]))\n if not (self.filenames is None):\n batch_filenames = self.filenames[current:current+batch_size]\n else:\n batch_filenames = None\n else:\n batch_filenames = self.filenames[current:current+batch_size]\n for filename in batch_filenames:\n with Image.open(filename) as image:\n batch_X.append(np.array(image, dtype=np.uint8))\n\n # Get the labels for this batch (if there are any).\n if not (self.labels is None):\n batch_y = deepcopy(self.labels[current:current+batch_size])\n else:\n batch_y = None\n\n if not (self.eval_neutral is None):\n batch_eval_neutral = self.eval_neutral[current:current+batch_size]\n else:\n batch_eval_neutral = None\n\n # Get the image IDs for this batch (if there are any).\n if not (self.image_ids is None):\n batch_image_ids = self.image_ids[current:current+batch_size]\n else:\n batch_image_ids = None\n\n if 'original_images' in returns:\n batch_original_images = deepcopy(batch_X) # The original, unaltered images\n if 'original_labels' in returns:\n batch_original_labels = deepcopy(batch_y) # The original, unaltered labels\n\n current += batch_size\n\n #########################################################################################\n # Maybe perform image transformations.\n #########################################################################################\n\n batch_items_to_remove = [] # In case we need to remove any images from the batch, store their indices in this list.\n batch_inverse_transforms = []\n\n for i in range(len(batch_X)):\n\n if not (self.labels is None):\n # Convert the labels for this image to an array (in case they aren't already).\n batch_y[i] = np.array(batch_y[i])\n # If this image has no ground truth boxes, maybe we don't want to keep it in the batch.\n if (batch_y[i].size == 0) and not keep_images_without_gt:\n batch_items_to_remove.append(i)\n batch_inverse_transforms.append([])\n continue\n\n # Apply any image transformations we may have received.\n if transformations:\n\n inverse_transforms = []\n\n for transform in transformations:\n\n if not (self.labels is None):\n \n if ('inverse_transform' in returns) and ('return_inverter' in inspect.signature(transform).parameters):\n batch_X[i], batch_y[i], inverse_transform = transform(batch_X[i], batch_y[i], return_inverter=True)\n inverse_transforms.append(inverse_transform)\n else:\n batch_X[i], batch_y[i] = transform(batch_X[i], batch_y[i])\n\n if batch_X[i] is None: # In case the transform failed to produce an output image, which is possible for some random transforms.\n batch_items_to_remove.append(i)\n batch_inverse_transforms.append([])\n continue\n\n else:\n\n if ('inverse_transform' in returns) and ('return_inverter' in inspect.signature(transform).parameters):\n batch_X[i], inverse_transform = transform(batch_X[i], return_inverter=True)\n inverse_transforms.append(inverse_transform)\n else:\n batch_X[i] = transform(batch_X[i])\n\n batch_inverse_transforms.append(inverse_transforms[::-1])\n\n #########################################################################################\n # Check for degenerate boxes in this batch item.\n #########################################################################################\n\n if not (self.labels is None):\n\n xmin = self.labels_format['xmin']\n ymin = self.labels_format['ymin']\n xmax = self.labels_format['xmax']\n ymax = self.labels_format['ymax']\n\n if np.any(batch_y[i][:,xmax] - batch_y[i][:,xmin] <= 0) or np.any(batch_y[i][:,ymax] - batch_y[i][:,ymin] <= 0):\n if degenerate_box_handling == 'warn':\n warnings.warn(\"Detected degenerate ground truth bounding boxes for batch item {} with bounding boxes {}, \".format(i, batch_y[i]) +\n \"i.e. bounding boxes where xmax <= xmin and/or ymax <= ymin. \" +\n \"This could mean that your dataset contains degenerate ground truth boxes, or that any image transformations you may apply might \" +\n \"result in degenerate ground truth boxes, or that you are parsing the ground truth in the wrong coordinate format.\" +\n \"Degenerate ground truth bounding boxes may lead to NaN errors during the training.\")\n elif degenerate_box_handling == 'remove':\n batch_y[i] = box_filter(batch_y[i])\n if (batch_y[i].size == 0) and not keep_images_without_gt:\n batch_items_to_remove.append(i)\n\n #########################################################################################\n # Remove any items we might not want to keep from the batch.\n #########################################################################################\n\n if batch_items_to_remove:\n for j in sorted(batch_items_to_remove, reverse=True):\n # This isn't efficient, but it hopefully shouldn't need to be done often anyway.\n batch_X.pop(j)\n batch_filenames.pop(j)\n if batch_inverse_transforms: batch_inverse_transforms.pop(j)\n if not (self.labels is None): batch_y.pop(j)\n if not (self.image_ids is None): batch_image_ids.pop(j)\n if not (self.eval_neutral is None): batch_eval_neutral.pop(j)\n if 'original_images' in returns: batch_original_images.pop(j)\n if 'original_labels' in returns and not (self.labels is None): batch_original_labels.pop(j)\n\n #########################################################################################\n\n # CAUTION: Converting `batch_X` into an array will result in an empty batch if the images have varying sizes\n # or varying numbers of channels. At this point, all images must have the same size and the same\n # number of channels.\n batch_X = np.array(batch_X)\n if (batch_X.size == 0):\n raise DegenerateBatchError(\"You produced an empty batch. This might be because the images in the batch vary \" +\n \"in their size and/or number of channels. Note that after all transformations \" +\n \"(if any were given) have been applied to all images in the batch, all images \" +\n \"must be homogenous in size along all axes.\")\n\n #########################################################################################\n # If we have a label encoder, encode our labels.\n #########################################################################################\n\n if not (label_encoder is None or self.labels is None):\n\n if ('matched_anchors' in returns) and isinstance(label_encoder, SSDInputEncoder):\n batch_y_encoded, batch_matched_anchors = label_encoder(batch_y, diagnostics=True)\n else:\n batch_y_encoded = label_encoder(batch_y, diagnostics=False)\n batch_matched_anchors = None\n\n else:\n batch_y_encoded = None\n batch_matched_anchors = None\n\n #########################################################################################\n # Compose the output.\n #########################################################################################\n\n ret = []\n\n if 'bankofamerica/img00002' in batch_image_ids:\n pdb.set_trace()\n if 'processed_images' in returns: ret.append(batch_X)\n if 'encoded_labels' in returns: ret.append(batch_y_encoded)\n if 'matched_anchors' in returns: ret.append(batch_matched_anchors)\n if 'processed_labels' in returns: ret.append(batch_y)\n if 'filenames' in returns: ret.append(batch_filenames)\n if 'image_ids' in returns: ret.append(batch_image_ids)\n if 'evaluation-neutral' in returns: ret.append(batch_eval_neutral)\n if 'inverse_transform' in returns: ret.append(batch_inverse_transforms)\n if 'original_images' in returns: ret.append(batch_original_images)\n if 'original_labels' in returns: ret.append(batch_original_labels)\n\n yield ret\n\n def save_dataset(self,\n filenames_path='filenames.pkl',\n labels_path=None,\n image_ids_path=None,\n eval_neutral_path=None):\n '''\n Writes the current `filenames`, `labels`, and `image_ids` lists to the specified files.\n This is particularly useful for large datasets with annotations that are\n parsed from XML files, which can take quite long. If you'll be using the\n same dataset repeatedly, you don't want to have to parse the XML label\n files every time.\n\n Arguments:\n filenames_path (str): The path under which to save the filenames pickle.\n labels_path (str): The path under which to save the labels pickle.\n image_ids_path (str, optional): The path under which to save the image IDs pickle.\n eval_neutral_path (str, optional): The path under which to save the pickle for\n the evaluation-neutrality annotations.\n '''\n with open(filenames_path, 'wb') as f:\n pickle.dump(self.filenames, f)\n if not labels_path is None:\n with open(labels_path, 'wb') as f:\n pickle.dump(self.labels, f)\n if not image_ids_path is None:\n with open(image_ids_path, 'wb') as f:\n pickle.dump(self.image_ids, f)\n if not eval_neutral_path is None:\n with open(eval_neutral_path, 'wb') as f:\n pickle.dump(self.eval_neutral, f)\n\n def get_dataset(self):\n '''\n Returns:\n 4-tuple containing lists and/or `None` for the filenames, labels, image IDs,\n and evaluation-neutrality annotations.\n '''\n return self.filenames, self.labels, self.image_ids, self.eval_neutral\n\n def get_dataset_size(self):\n '''\n Returns:\n The number of images in the dataset.\n '''\n return self.dataset_size\n"
] | [
[
"numpy.random.uniform",
"numpy.any",
"numpy.asarray",
"numpy.arange",
"numpy.stack",
"numpy.concatenate",
"numpy.array"
]
] |
Yeachan-Heo/ray | [
"a73c488c74b1e01da3961db2eb538c43c29753f5",
"a73c488c74b1e01da3961db2eb538c43c29753f5"
] | [
"rllib/agents/dqn/dqn_torch_model.py",
"python/ray/serve/master.py"
] | [
"import numpy as np\n\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\nfrom ray.rllib.utils import try_import_torch\n\ntorch, nn = try_import_torch()\n\n\nclass DQNTorchModel(TorchModelV2):\n \"\"\"Extension of standard TorchModelV2 to provide dueling-Q functionality.\n \"\"\"\n\n def __init__(\n self,\n obs_space,\n action_space,\n num_outputs,\n model_config,\n name,\n *,\n dueling=False,\n q_hiddens=(256, ),\n dueling_activation=\"relu\",\n use_noisy=False,\n sigma0=0.5,\n # TODO(sven): Move `add_layer_norm` into ModelCatalog as\n # generic option, then error if we use ParameterNoise as\n # Exploration type and do not have any LayerNorm layers in\n # the net.\n add_layer_norm=False):\n \"\"\"Initialize variables of this model.\n\n Extra model kwargs:\n dueling (bool): Whether to build the advantage(A)/value(V) heads\n for DDQN. If True, Q-values are calculated as:\n Q = (A - mean[A]) + V. If False, raw NN output is interpreted\n as Q-values.\n q_hiddens (List[int]): List of layer-sizes after(!) the\n Advantages(A)/Value(V)-split. Hence, each of the A- and V-\n branches will have this structure of Dense layers. To define\n the NN before this A/V-split, use - as always -\n config[\"model\"][\"fcnet_hiddens\"].\n dueling_activation (str): The activation to use for all dueling\n layers (A- and V-branch). One of \"relu\", \"tanh\", \"linear\".\n use_noisy (bool): use noisy nets\n sigma0 (float): initial value of noisy nets\n add_layer_norm (bool): Enable layer norm (for param noise).\n \"\"\"\n\n super(DQNTorchModel, self).__init__(obs_space, action_space,\n num_outputs, model_config, name)\n\n self.dueling = dueling\n ins = num_outputs\n\n # Dueling case: Build the shared (advantages and value) fc-network.\n advantage_module = nn.Sequential()\n value_module = None\n if self.dueling:\n value_module = nn.Sequential()\n for i, n in enumerate(q_hiddens):\n advantage_module.add_module(\"dueling_A_{}\".format(i),\n nn.Linear(ins, n))\n value_module.add_module(\"dueling_V_{}\".format(i),\n nn.Linear(ins, n))\n # Add activations if necessary.\n if dueling_activation == \"relu\":\n advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n nn.ReLU())\n value_module.add_module(\"dueling_V_act_{}\".format(i),\n nn.ReLU())\n elif dueling_activation == \"tanh\":\n advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n nn.Tanh())\n value_module.add_module(\"dueling_V_act_{}\".format(i),\n nn.Tanh())\n\n # Add LayerNorm after each Dense.\n if add_layer_norm:\n advantage_module.add_module(\"LayerNorm_A_{}\".format(i),\n nn.LayerNorm(n))\n value_module.add_module(\"LayerNorm_V_{}\".format(i),\n nn.LayerNorm(n))\n ins = n\n # Actual Advantages layer (nodes=num-actions) and\n # value layer (nodes=1).\n advantage_module.add_module(\"A\", nn.Linear(ins, action_space.n))\n value_module.add_module(\"V\", nn.Linear(ins, 1))\n # Non-dueling:\n # Q-value layer (use main module's outputs as Q-values).\n else:\n pass\n\n self.advantage_module = advantage_module\n self.value_module = value_module\n\n def get_advantages_or_q_values(self, model_out):\n \"\"\"Returns distributional values for Q(s, a) given a state embedding.\n\n Override this in your custom model to customize the Q output head.\n\n Arguments:\n model_out (Tensor): embedding from the model layers\n\n Returns:\n (action_scores, logits, dist) if num_atoms == 1, otherwise\n (action_scores, z, support_logits_per_action, logits, dist)\n \"\"\"\n\n return self.advantage_module(model_out)\n\n def get_state_value(self, model_out):\n \"\"\"Returns the state value prediction for the given state embedding.\"\"\"\n\n return self.value_module(model_out)\n\n def _noisy_layer(self, action_in, out_size, sigma0, non_linear=True):\n \"\"\"\n a common dense layer: y = w^{T}x + b\n a noisy layer: y = (w + \\\\epsilon_w*\\\\sigma_w)^{T}x +\n (b+\\\\epsilon_b*\\\\sigma_b)\n where \\epsilon are random variables sampled from factorized normal\n distributions and \\\\sigma are trainable variables which are expected to\n vanish along the training procedure\n \"\"\"\n in_size = int(action_in.shape[1])\n\n epsilon_in = torch.normal(\n mean=torch.zeros([in_size]), std=torch.ones([in_size]))\n epsilon_out = torch.normal(\n mean=torch.zeros([out_size]), std=torch.ones([out_size]))\n epsilon_in = self._f_epsilon(epsilon_in)\n epsilon_out = self._f_epsilon(epsilon_out)\n epsilon_w = torch.matmul(\n torch.unsqueeze(epsilon_in, -1),\n other=torch.unsqueeze(epsilon_out, 0))\n epsilon_b = epsilon_out\n\n sigma_w = torch.Tensor(\n data=np.random.uniform(\n low=-1.0 / np.sqrt(float(in_size)),\n high=1.0 / np.sqrt(float(in_size)),\n size=[in_size, out_size]),\n dtype=torch.float32,\n requires_grad=True)\n # TF noise generation can be unreliable on GPU\n # If generating the noise on the CPU,\n # lowering sigma0 to 0.1 may be helpful\n sigma_b = torch.Tensor(\n data=np.full(\n shape=[out_size], fill_value=sigma0 / np.sqrt(float(in_size))),\n requires_grad=True)\n w = torch.Tensor(\n data=np.full(\n shape=[in_size, out_size],\n fill_value=6 / np.sqrt(float(in_size) + float(out_size))),\n requires_grad=True)\n b = torch.Tensor(data=np.zeros([out_size]), requires_grad=True)\n action_activation = torch.matmul(action_in, w + sigma_w * epsilon_w) \\\n + b + sigma_b * epsilon_b\n\n if not non_linear:\n return action_activation\n return nn.functional.relu(action_activation)\n\n def _f_epsilon(self, x):\n return torch.sign(x) * torch.pow(torch.abs(x), 0.5)\n",
"import asyncio\nfrom collections import defaultdict\nimport os\nimport random\nimport time\n\nimport ray\nimport ray.cloudpickle as pickle\nfrom ray.serve.backend_worker import create_backend_worker\nfrom ray.serve.constants import (ASYNC_CONCURRENCY, SERVE_ROUTER_NAME,\n SERVE_PROXY_NAME, SERVE_METRIC_SINK_NAME)\nfrom ray.serve.http_proxy import HTTPProxyActor\nfrom ray.serve.kv_store import RayInternalKVStore\nfrom ray.serve.metric.exporter import MetricExporterActor\nfrom ray.serve.router import Router\nfrom ray.serve.utils import (async_retryable, format_actor_name,\n get_random_letters, logger)\n\nimport numpy as np\n\n# Used for testing purposes only. If this is set, the master actor will crash\n# after writing each checkpoint with the specified probability.\n_CRASH_AFTER_CHECKPOINT_PROBABILITY = 0.0\nCHECKPOINT_KEY = \"serve-master-checkpoint\"\n\n\[email protected]\nclass ServeMaster:\n \"\"\"Responsible for managing the state of the serving system.\n\n The master actor implements fault tolerance by persisting its state in\n a new checkpoint each time a state change is made. If the actor crashes,\n the latest checkpoint is loaded and the state is recovered. Checkpoints\n are written/read using a provided KV-store interface.\n\n All hard state in the system is maintained by this actor and persisted via\n these checkpoints. Soft state required by other components is fetched by\n those actors from this actor on startup and updates are pushed out from\n this actor.\n\n All other actors started by the master actor are named, detached actors\n so they will not fate share with the master if it crashes.\n\n The following guarantees are provided for state-changing calls to the\n master actor:\n - If the call succeeds, the change was made and will be reflected in\n the system even if the master actor or other actors die unexpectedly.\n - If the call fails, the change may have been made but isn't guaranteed\n to have been. The client should retry in this case. Note that this\n requires all implementations here to be idempotent.\n \"\"\"\n\n async def __init__(self, cluster_name, start_http_proxy, http_node_id,\n http_proxy_host, http_proxy_port,\n metric_exporter_class):\n # Unique name of the serve cluster managed by this actor. Used to\n # namespace child actors and checkpoints.\n self.cluster_name = cluster_name\n # Used to read/write checkpoints.\n self.kv_store = RayInternalKVStore()\n # path -> (endpoint, methods).\n self.routes = {}\n # backend -> (backend_worker, backend_config, replica_config).\n self.backends = {}\n # backend -> replica_tags.\n self.replicas = defaultdict(list)\n # replicas that should be started if recovering from a checkpoint.\n self.replicas_to_start = defaultdict(list)\n # replicas that should be stopped if recovering from a checkpoint.\n self.replicas_to_stop = defaultdict(list)\n # backends that should be removed from the router if recovering from a\n # checkpoint.\n self.backends_to_remove = list()\n # endpoints that should be removed from the router if recovering from a\n # checkpoint.\n self.endpoints_to_remove = list()\n # endpoint -> traffic_dict\n self.traffic_policies = dict()\n # Dictionary of backend tag to dictionaries of replica tag to worker.\n # TODO(edoakes): consider removing this and just using the names.\n self.workers = defaultdict(dict)\n\n # Used to ensure that only a single state-changing operation happens\n # at any given time.\n self.write_lock = asyncio.Lock()\n\n # Cached handles to actors in the system.\n self.router = None\n self.http_proxy = None\n self.metric_exporter = None\n\n # If starting the actor for the first time, starts up the other system\n # components. If recovering, fetches their actor handles.\n self._get_or_start_metric_exporter(metric_exporter_class)\n self._get_or_start_router()\n if start_http_proxy:\n self._get_or_start_http_proxy(http_node_id, http_proxy_host,\n http_proxy_port)\n\n # NOTE(edoakes): unfortunately, we can't completely recover from a\n # checkpoint in the constructor because we block while waiting for\n # other actors to start up, and those actors fetch soft state from\n # this actor. Because no other tasks will start executing until after\n # the constructor finishes, if we were to run this logic in the\n # constructor it could lead to deadlock between this actor and a child.\n # However we do need to guarantee that we have fully recovered from a\n # checkpoint before any other state-changing calls run. We address this\n # by acquiring the write_lock and then posting the task to recover from\n # a checkpoint to the event loop. Other state-changing calls acquire\n # this lock and will be blocked until recovering from the checkpoint\n # finishes.\n checkpoint_key = CHECKPOINT_KEY\n if self.cluster_name is not None:\n checkpoint_key = \"{}:{}\".format(self.cluster_name, checkpoint_key)\n checkpoint = self.kv_store.get(checkpoint_key)\n if checkpoint is None:\n logger.debug(\"No checkpoint found\")\n else:\n await self.write_lock.acquire()\n asyncio.get_event_loop().create_task(\n self._recover_from_checkpoint(checkpoint))\n\n def _get_or_start_router(self):\n \"\"\"Get the router belonging to this serve cluster.\n\n If the router does not already exist, it will be started.\n \"\"\"\n router_name = format_actor_name(SERVE_ROUTER_NAME, self.cluster_name)\n try:\n self.router = ray.util.get_actor(router_name)\n except ValueError:\n logger.info(\"Starting router with name '{}'\".format(router_name))\n self.router = async_retryable(ray.remote(Router)).options(\n detached=True,\n name=router_name,\n max_concurrency=ASYNC_CONCURRENCY,\n max_restarts=-1,\n ).remote(cluster_name=self.cluster_name)\n\n def get_router(self):\n \"\"\"Returns a handle to the router managed by this actor.\"\"\"\n return [self.router]\n\n def _get_or_start_http_proxy(self, node_id, host, port):\n \"\"\"Get the HTTP proxy belonging to this serve cluster.\n\n If the HTTP proxy does not already exist, it will be started.\n \"\"\"\n proxy_name = format_actor_name(SERVE_PROXY_NAME, self.cluster_name)\n try:\n self.http_proxy = ray.util.get_actor(proxy_name)\n except ValueError:\n logger.info(\n \"Starting HTTP proxy with name '{}' on node '{}'\".format(\n proxy_name, node_id))\n self.http_proxy = async_retryable(HTTPProxyActor).options(\n detached=True,\n name=proxy_name,\n max_concurrency=ASYNC_CONCURRENCY,\n max_restarts=-1,\n resources={\n node_id: 0.01\n },\n ).remote(\n host, port, cluster_name=self.cluster_name)\n\n def get_http_proxy(self):\n \"\"\"Returns a handle to the HTTP proxy managed by this actor.\"\"\"\n return [self.http_proxy]\n\n def get_http_proxy_config(self):\n \"\"\"Called by the HTTP proxy on startup to fetch required state.\"\"\"\n return self.routes, self.get_router()\n\n def _get_or_start_metric_exporter(self, metric_exporter_class):\n \"\"\"Get the metric exporter belonging to this serve cluster.\n\n If the metric exporter does not already exist, it will be started.\n \"\"\"\n metric_sink_name = format_actor_name(SERVE_METRIC_SINK_NAME,\n self.cluster_name)\n try:\n self.metric_exporter = ray.util.get_actor(metric_sink_name)\n except ValueError:\n logger.info(\"Starting metric exporter with name '{}'\".format(\n metric_sink_name))\n self.metric_exporter = MetricExporterActor.options(\n detached=True,\n name=metric_sink_name).remote(metric_exporter_class)\n\n def get_metric_exporter(self):\n \"\"\"Returns a handle to the metric exporter managed by this actor.\"\"\"\n return [self.metric_exporter]\n\n def _checkpoint(self):\n \"\"\"Checkpoint internal state and write it to the KV store.\"\"\"\n logger.debug(\"Writing checkpoint\")\n start = time.time()\n checkpoint = pickle.dumps(\n (self.routes, self.backends, self.traffic_policies, self.replicas,\n self.replicas_to_start, self.replicas_to_stop,\n self.backends_to_remove, self.endpoints_to_remove))\n\n self.kv_store.put(CHECKPOINT_KEY, checkpoint)\n logger.debug(\"Wrote checkpoint in {:.2f}\".format(time.time() - start))\n\n if random.random() < _CRASH_AFTER_CHECKPOINT_PROBABILITY:\n logger.warning(\"Intentionally crashing after checkpoint\")\n os._exit(0)\n\n async def _recover_from_checkpoint(self, checkpoint_bytes):\n \"\"\"Recover the cluster state from the provided checkpoint.\n\n Performs the following operations:\n 1) Deserializes the internal state from the checkpoint.\n 2) Pushes the latest configuration to the HTTP proxy and router\n in case we crashed before updating them.\n 3) Starts/stops any worker replicas that are pending creation or\n deletion.\n\n NOTE: this requires that self.write_lock is already acquired and will\n release it before returning.\n \"\"\"\n assert self.write_lock.locked()\n\n start = time.time()\n logger.info(\"Recovering from checkpoint\")\n\n # Load internal state from the checkpoint data.\n (\n self.routes,\n self.backends,\n self.traffic_policies,\n self.replicas,\n self.replicas_to_start,\n self.replicas_to_stop,\n self.backends_to_remove,\n self.endpoints_to_remove,\n ) = pickle.loads(checkpoint_bytes)\n\n # Fetch actor handles for all of the backend replicas in the system.\n # All of these workers are guaranteed to already exist because they\n # would not be written to a checkpoint in self.workers until they\n # were created.\n for backend_tag, replica_tags in self.replicas.items():\n for replica_tag in replica_tags:\n replica_name = format_actor_name(replica_tag,\n self.cluster_name)\n self.workers[backend_tag][replica_tag] = ray.util.get_actor(\n replica_name)\n\n # Push configuration state to the router.\n # TODO(edoakes): should we make this a pull-only model for simplicity?\n for endpoint, traffic_policy in self.traffic_policies.items():\n await self.router.set_traffic.remote(endpoint, traffic_policy)\n\n for backend_tag, replica_dict in self.workers.items():\n for replica_tag, worker in replica_dict.items():\n await self.router.add_new_worker.remote(\n backend_tag, replica_tag, worker)\n\n for backend, (_, backend_config, _) in self.backends.items():\n await self.router.set_backend_config.remote(\n backend, backend_config)\n\n # Push configuration state to the HTTP proxy.\n await self.http_proxy.set_route_table.remote(self.routes)\n\n # Start/stop any pending backend replicas.\n await self._start_pending_replicas()\n await self._stop_pending_replicas()\n\n # Remove any pending backends and endpoints.\n await self._remove_pending_backends()\n await self._remove_pending_endpoints()\n\n logger.info(\n \"Recovered from checkpoint in {:.3f}s\".format(time.time() - start))\n\n self.write_lock.release()\n\n def get_backend_configs(self):\n \"\"\"Fetched by the router on startup.\"\"\"\n backend_configs = {}\n for backend, (_, backend_config, _) in self.backends.items():\n backend_configs[backend] = backend_config\n return backend_configs\n\n def get_traffic_policies(self):\n \"\"\"Fetched by the router on startup.\"\"\"\n return self.traffic_policies\n\n def _list_replicas(self, backend_tag):\n \"\"\"Used only for testing.\"\"\"\n return self.replicas[backend_tag]\n\n def get_traffic_policy(self, endpoint):\n \"\"\"Fetched by serve handles.\"\"\"\n return self.traffic_policies[endpoint]\n\n async def _start_backend_worker(self, backend_tag, replica_tag):\n \"\"\"Creates a backend worker and waits for it to start up.\n\n Assumes that the backend configuration has already been registered\n in self.backends.\n \"\"\"\n logger.debug(\"Starting worker '{}' for backend '{}'.\".format(\n replica_tag, backend_tag))\n (backend_worker, backend_config,\n replica_config) = self.backends[backend_tag]\n\n replica_name = format_actor_name(replica_tag, self.cluster_name)\n worker_handle = async_retryable(ray.remote(backend_worker)).options(\n detached=True,\n name=replica_name,\n max_restarts=-1,\n **replica_config.ray_actor_options).remote(\n backend_tag,\n replica_tag,\n replica_config.actor_init_args,\n cluster_name=self.cluster_name)\n # TODO(edoakes): we should probably have a timeout here.\n await worker_handle.ready.remote()\n return worker_handle\n\n async def _start_pending_replicas(self):\n \"\"\"Starts the pending backend replicas in self.replicas_to_start.\n\n Starts the worker, then pushes an update to the router to add it to\n the proper backend. If the worker has already been started, only\n updates the router.\n\n Clears self.replicas_to_start.\n \"\"\"\n for backend_tag, replicas_to_create in self.replicas_to_start.items():\n for replica_tag in replicas_to_create:\n # NOTE(edoakes): the replicas may already be created if we\n # failed after creating them but before writing a checkpoint.\n try:\n worker_handle = ray.util.get_actor(replica_tag)\n except ValueError:\n worker_handle = await self._start_backend_worker(\n backend_tag, replica_tag)\n\n self.replicas[backend_tag].append(replica_tag)\n self.workers[backend_tag][replica_tag] = worker_handle\n\n # Register the worker with the router.\n await self.router.add_new_worker.remote(\n backend_tag, replica_tag, worker_handle)\n\n self.replicas_to_start.clear()\n\n async def _stop_pending_replicas(self):\n \"\"\"Stops the pending backend replicas in self.replicas_to_stop.\n\n Stops workers by telling the router to remove them.\n\n Clears self.replicas_to_stop.\n \"\"\"\n for backend_tag, replicas_to_stop in self.replicas_to_stop.items():\n for replica_tag in replicas_to_stop:\n # NOTE(edoakes): the replicas may already be stopped if we\n # failed after stopping them but before writing a checkpoint.\n try:\n # Remove the replica from router.\n # This will also submit __ray_terminate__ on the worker.\n # NOTE(edoakes): we currently need to kill the worker from\n # the router to guarantee that the router won't submit any\n # more requests to it.\n await self.router.remove_worker.remote(\n backend_tag, replica_tag)\n except ValueError:\n pass\n\n self.replicas_to_stop.clear()\n\n async def _remove_pending_backends(self):\n \"\"\"Removes the pending backends in self.backends_to_remove.\n\n Clears self.backends_to_remove.\n \"\"\"\n for backend_tag in self.backends_to_remove:\n await self.router.remove_backend.remote(backend_tag)\n self.backends_to_remove.clear()\n\n async def _remove_pending_endpoints(self):\n \"\"\"Removes the pending endpoints in self.endpoints_to_remove.\n\n Clears self.endpoints_to_remove.\n \"\"\"\n for endpoint_tag in self.endpoints_to_remove:\n await self.router.remove_endpoint.remote(endpoint_tag)\n self.endpoints_to_remove.clear()\n\n def _scale_replicas(self, backend_tag, num_replicas):\n \"\"\"Scale the given backend to the number of replicas.\n\n NOTE: this does not actually start or stop the replicas, but instead\n adds the intention to start/stop them to self.workers_to_start and\n self.workers_to_stop. The caller is responsible for then first writing\n a checkpoint and then actually starting/stopping the intended replicas.\n This avoids inconsistencies with starting/stopping a worker and then\n crashing before writing a checkpoint.\n \"\"\"\n logger.debug(\"Scaling backend '{}' to {} replicas\".format(\n backend_tag, num_replicas))\n assert (backend_tag in self.backends\n ), \"Backend {} is not registered.\".format(backend_tag)\n assert num_replicas >= 0, (\"Number of replicas must be\"\n \" greater than or equal to 0.\")\n\n current_num_replicas = len(self.replicas[backend_tag])\n delta_num_replicas = num_replicas - current_num_replicas\n\n if delta_num_replicas > 0:\n logger.debug(\"Adding {} replicas to backend {}\".format(\n delta_num_replicas, backend_tag))\n for _ in range(delta_num_replicas):\n replica_tag = \"{}#{}\".format(backend_tag, get_random_letters())\n self.replicas_to_start[backend_tag].append(replica_tag)\n\n elif delta_num_replicas < 0:\n logger.debug(\"Removing {} replicas from backend {}\".format(\n -delta_num_replicas, backend_tag))\n assert len(self.replicas[backend_tag]) >= delta_num_replicas\n for _ in range(-delta_num_replicas):\n replica_tag = self.replicas[backend_tag].pop()\n if len(self.replicas[backend_tag]) == 0:\n del self.replicas[backend_tag]\n del self.workers[backend_tag][replica_tag]\n if len(self.workers[backend_tag]) == 0:\n del self.workers[backend_tag]\n\n self.replicas_to_stop[backend_tag].append(replica_tag)\n\n def get_all_worker_handles(self):\n \"\"\"Fetched by the router on startup.\"\"\"\n return self.workers\n\n def get_all_backends(self):\n \"\"\"Used for validation by the API client.\"\"\"\n return list(self.backends.keys())\n\n def get_all_endpoints(self):\n \"\"\"Used for validation by the API client.\"\"\"\n return [endpoint for endpoint, methods in self.routes.values()]\n\n async def set_traffic(self, endpoint_name, traffic_policy_dictionary):\n \"\"\"Sets the traffic policy for the specified endpoint.\"\"\"\n async with self.write_lock:\n if endpoint_name not in self.get_all_endpoints():\n raise ValueError(\n \"Attempted to assign traffic for an endpoint '{}'\"\n \" that is not registered.\".format(endpoint_name))\n\n assert isinstance(traffic_policy_dictionary,\n dict), \"Traffic policy must be dictionary\"\n prob = 0\n for backend, weight in traffic_policy_dictionary.items():\n if weight < 0:\n raise ValueError(\n \"Attempted to assign a weight of {} to backend '{}'. \"\n \"Weights cannot be negative.\".format(weight, backend))\n prob += weight\n if backend not in self.backends:\n raise ValueError(\n \"Attempted to assign traffic to a backend '{}' that \"\n \"is not registered.\".format(backend))\n\n # These weights will later be plugged into np.random.choice, which\n # uses a tolerance of 1e-8.\n assert np.isclose(\n prob, 1, atol=1e-8\n ), \"weights must sum to 1, currently they sum to {}\".format(prob)\n\n self.traffic_policies[endpoint_name] = traffic_policy_dictionary\n\n # NOTE(edoakes): we must write a checkpoint before pushing the\n # update to avoid inconsistent state if we crash after pushing the\n # update.\n self._checkpoint()\n await self.router.set_traffic.remote(endpoint_name,\n traffic_policy_dictionary)\n\n async def create_endpoint(self, route, endpoint, methods):\n \"\"\"Create a new endpoint with the specified route and methods.\n\n If the route is None, this is a \"headless\" endpoint that will not\n be added to the HTTP proxy (can only be accessed via a handle).\n \"\"\"\n async with self.write_lock:\n # If this is a headless endpoint with no route, key the endpoint\n # based on its name.\n # TODO(edoakes): we should probably just store routes and endpoints\n # separately.\n if route is None:\n route = endpoint\n\n # TODO(edoakes): move this to client side.\n err_prefix = \"Cannot create endpoint.\"\n if route in self.routes:\n if self.routes[route] == (endpoint, methods):\n return\n else:\n raise ValueError(\n \"{} Route '{}' is already registered.\".format(\n err_prefix, route))\n\n if endpoint in self.get_all_endpoints():\n raise ValueError(\n \"{} Endpoint '{}' is already registered.\".format(\n err_prefix, endpoint))\n\n logger.info(\n \"Registering route {} to endpoint {} with methods {}.\".format(\n route, endpoint, methods))\n\n self.routes[route] = (endpoint, methods)\n\n # NOTE(edoakes): we must write a checkpoint before pushing the\n # update to avoid inconsistent state if we crash after pushing the\n # update.\n self._checkpoint()\n await self.http_proxy.set_route_table.remote(self.routes)\n\n async def delete_endpoint(self, endpoint):\n \"\"\"Delete the specified endpoint.\n\n Does not modify any corresponding backends.\n \"\"\"\n logger.info(\"Deleting endpoint '{}'\".format(endpoint))\n async with self.write_lock:\n # This method must be idempotent. We should validate that the\n # specified endpoint exists on the client.\n for route, (route_endpoint, _) in self.routes.items():\n if route_endpoint == endpoint:\n route_to_delete = route\n break\n else:\n logger.info(\"Endpoint '{}' doesn't exist\".format(endpoint))\n return\n\n # Remove the routing entry.\n del self.routes[route_to_delete]\n\n # Remove the traffic policy entry if it exists.\n if endpoint in self.traffic_policies:\n del self.traffic_policies[endpoint]\n\n self.endpoints_to_remove.append(endpoint)\n\n # NOTE(edoakes): we must write a checkpoint before pushing the\n # updates to the HTTP proxy and router to avoid inconsistent state\n # if we crash after pushing the update.\n self._checkpoint()\n\n # Update the HTTP proxy first to ensure no new requests for the\n # endpoint are sent to the router.\n await self.http_proxy.set_route_table.remote(self.routes)\n await self._remove_pending_endpoints()\n\n async def create_backend(self, backend_tag, backend_config,\n replica_config):\n \"\"\"Register a new backend under the specified tag.\"\"\"\n async with self.write_lock:\n backend_worker = create_backend_worker(\n replica_config.func_or_class)\n\n # Save creator that starts replicas, the arguments to be passed in,\n # and the configuration for the backends.\n self.backends[backend_tag] = (backend_worker, backend_config,\n replica_config)\n\n self._scale_replicas(backend_tag, backend_config.num_replicas)\n\n # NOTE(edoakes): we must write a checkpoint before starting new\n # or pushing the updated config to avoid inconsistent state if we\n # crash while making the change.\n self._checkpoint()\n await self._start_pending_replicas()\n\n # Set the backend config inside the router\n # (particularly for max-batch-size).\n await self.router.set_backend_config.remote(\n backend_tag, backend_config)\n\n async def delete_backend(self, backend_tag):\n async with self.write_lock:\n # This method must be idempotent. We should validate that the\n # specified backend exists on the client.\n if backend_tag not in self.backends:\n return\n\n # Check that the specified backend isn't used by any endpoints.\n for endpoint, traffic_dict in self.traffic_policies.items():\n if backend_tag in traffic_dict:\n raise ValueError(\"Backend '{}' is used by endpoint '{}' \"\n \"and cannot be deleted. Please remove \"\n \"the backend from all endpoints and try \"\n \"again.\".format(backend_tag, endpoint))\n\n # Scale its replicas down to 0. This will also remove the backend\n # from self.backends and self.replicas.\n self._scale_replicas(backend_tag, 0)\n\n # Remove the backend's metadata.\n del self.backends[backend_tag]\n\n # Add the intention to remove the backend from the router.\n self.backends_to_remove.append(backend_tag)\n\n # NOTE(edoakes): we must write a checkpoint before removing the\n # backend from the router to avoid inconsistent state if we crash\n # after pushing the update.\n self._checkpoint()\n await self._stop_pending_replicas()\n await self._remove_pending_backends()\n\n async def update_backend_config(self, backend_tag, config_options):\n \"\"\"Set the config for the specified backend.\"\"\"\n async with self.write_lock:\n assert (backend_tag in self.backends\n ), \"Backend {} is not registered.\".format(backend_tag)\n assert isinstance(config_options, dict)\n backend_worker, backend_config, replica_config = self.backends[\n backend_tag]\n\n backend_config.update(config_options)\n self.backends[backend_tag] = (backend_worker, backend_config,\n replica_config)\n\n # Scale the replicas with the new configuration.\n self._scale_replicas(backend_tag, backend_config.num_replicas)\n\n # NOTE(edoakes): we must write a checkpoint before pushing the\n # update to avoid inconsistent state if we crash after pushing the\n # update.\n self._checkpoint()\n\n # Inform the router about change in configuration\n # (particularly for setting max_batch_size).\n await self.router.set_backend_config.remote(\n backend_tag, backend_config)\n\n await self._start_pending_replicas()\n await self._stop_pending_replicas()\n\n def get_backend_config(self, backend_tag):\n \"\"\"Get the current config for the specified backend.\"\"\"\n assert (backend_tag in self.backends\n ), \"Backend {} is not registered.\".format(backend_tag)\n return self.backends[backend_tag][2]\n"
] | [
[
"numpy.zeros"
],
[
"numpy.isclose"
]
] |
huynhngoc/deoxys | [
"b2e9936b723807e129fda36d8d6131ca00db558f"
] | [
"src/deoxys/experiment/postprocessor.py"
] | [
"from ..loaders import load_data\nfrom ..utils import load_json_config\n\n\nfrom deoxys_image.patch_sliding import get_patch_indice\nfrom deoxys_vis import read_csv\n\nimport numpy as np\nimport h5py\nimport pandas as pd\nimport os\nfrom time import time\nimport shutil\nimport matplotlib.pyplot as plt\nimport warnings\n\n\nclass H5Metric:\n def __init__(self, ref_file, save_file, metric_name='score',\n predicted_dataset='predicted',\n target_dataset='y', batch_size=4,\n map_file=None, map_column=None):\n self.metric_name = metric_name\n self.ref_file = ref_file\n\n self.predicted = predicted_dataset\n self.target = target_dataset\n\n with h5py.File(ref_file, 'r') as f:\n keys = list(f.keys())\n if target_dataset not in keys:\n self.predicted = [f'{key}/{predicted_dataset}' for key in keys]\n self.target = [f'{key}/{target_dataset}' for key in keys]\n\n self.batch_size = batch_size\n\n self.res_file = save_file\n self.map_file = map_file\n self.map_column = map_column\n\n def get_img_batch(self):\n self.scores = []\n\n if self.map_file is None:\n if type(self.predicted) == str:\n with h5py.File(self.ref_file, 'r') as f:\n size = f[self.target].shape[0]\n\n for i in range(0, size, self.batch_size):\n with h5py.File(self.ref_file, 'r') as f:\n predicted = f[self.predicted][i:i+self.batch_size]\n targets = f[self.target][i:i+self.batch_size]\n yield targets, predicted\n else:\n for pred, target in zip(self.predicted, self.target):\n with h5py.File(self.ref_file, 'r') as f:\n size = f[target].shape[0]\n\n for i in range(0, size, self.batch_size):\n with h5py.File(self.ref_file, 'r') as f:\n predicted = f[pred][i:i+self.batch_size]\n targets = f[target][i:i+self.batch_size]\n yield targets, predicted\n else: # handle 3d with different sizes\n map_df = pd.read_csv(self.map_file)\n map_data = map_df[self.map_column].values\n\n for idx in map_data:\n with h5py.File(self.ref_file, 'r') as f:\n predicted = f[self.predicted][str(idx)][:]\n targets = f[self.target][str(idx)][:]\n yield np.expand_dims(targets, axis=0), np.expand_dims(\n predicted, axis=0)\n\n def update_score(self, scores):\n self.scores.extend(scores)\n\n def save_score(self):\n if os.path.isfile(self.res_file):\n df = pd.read_csv(self.res_file)\n df[f'{self.metric_name}'] = self.scores\n else:\n df = pd.DataFrame(self.scores, columns=[f'{self.metric_name}'])\n\n df.to_csv(self.res_file, index=False)\n\n def post_process(self, **kwargs):\n for targets, prediction in self.get_img_batch():\n scores = self.calculate_metrics(\n targets, prediction, **kwargs)\n self.update_score(scores)\n\n self.save_score()\n\n def calculate_metrics(targets, predictions, **kwargs):\n raise NotImplementedError\n\n\nclass H5CalculateFScore(H5Metric):\n def __init__(self, ref_file, save_file, metric_name='f1_score',\n predicted_dataset='predicted',\n target_dataset='y', batch_size=4, beta=1, threshold=None,\n map_file=None, map_column=None):\n super().__init__(ref_file, save_file, metric_name,\n predicted_dataset,\n target_dataset, batch_size,\n map_file, map_column)\n self.threshold = 0.5 if threshold is None else threshold\n self.beta = beta\n\n def calculate_metrics(self, y_true, y_pred, **kwargs):\n assert len(y_true) == len(y_pred), \"Shape not match\"\n eps = 1e-8\n size = len(y_true.shape)\n reduce_ax = tuple(range(1, size))\n\n y_pred = (y_pred > self.threshold).astype(y_pred.dtype)\n if y_pred.ndim - y_true.ndim == 1 and y_pred.shape[-1] == 1:\n y_pred = y_pred[..., 0]\n\n true_positive = np.sum(y_pred * y_true, axis=reduce_ax)\n target_positive = np.sum(y_true, axis=reduce_ax)\n predicted_positive = np.sum(y_pred, axis=reduce_ax)\n\n fb_numerator = (1 + self.beta ** 2) * true_positive + eps\n fb_denominator = (\n (self.beta ** 2) * target_positive + predicted_positive + eps\n )\n\n return fb_numerator / fb_denominator\n\n\nclass H5MetaDataMapping:\n def __init__(self, ref_file, save_file, folds, fold_prefix='fold',\n dataset_names=None):\n self.ref_file = ref_file\n self.save_file = save_file\n if fold_prefix:\n self.folds = ['{}_{}'.format(\n fold_prefix, fold) for fold in folds]\n else:\n self.folds = folds\n\n self.dataset_names = dataset_names\n\n def post_process(self, *args, **kwargs):\n data = {dataset_name: [] for dataset_name in self.dataset_names}\n for fold in self.folds:\n with h5py.File(self.ref_file, 'r') as f:\n for dataset_name in self.dataset_names:\n meta_data = f[fold][dataset_name][:]\n dtype = meta_data.dtype.name\n if 'int' not in dtype and 'float' not in dtype:\n meta_data = meta_data.astype(str)\n data[dataset_name].extend(meta_data)\n\n df = pd.DataFrame(data)\n df.to_csv(self.save_file, index=False)\n\n\nclass H5Merge2dSlice:\n def __init__(self, ref_file, map_file, map_column, merge_file, save_file,\n predicted_dataset='predicted', target_dataset='y',\n input_dataset='x'):\n self.ref_file = ref_file\n self.map_file = map_file\n self.map_column = map_column\n self.merge_file = merge_file\n self.save_file = save_file\n\n self.predicted = predicted_dataset\n self.target = target_dataset\n self.inputs = input_dataset\n\n with h5py.File(ref_file, 'r') as f:\n keys = list(f.keys())\n if input_dataset not in keys:\n self.predicted = [f'{key}/{predicted_dataset}' for key in keys]\n self.target = [f'{key}/{target_dataset}' for key in keys]\n self.inputs = [f'{key}/{input_dataset}' for key in keys]\n\n def post_process(self):\n map_df = pd.read_csv(self.map_file)\n map_data = map_df[self.map_column].values\n\n unique_val = []\n\n first, last = map_data[0], map_data[-1]\n\n tmp = np.concatenate([[first], map_data, [last]])\n indice = np.where(tmp[1:] != tmp[:-1])[0]\n indice = np.concatenate([[0], indice, [len(map_data)]])\n\n if type(self.inputs) == str:\n with h5py.File(self.merge_file, 'w') as mf:\n mf.create_group(self.inputs)\n mf.create_group(self.target)\n mf.create_group(self.predicted)\n\n for i in range(len(indice) - 1):\n start = indice[i]\n end = indice[i+1]\n\n unique_val.append(map_data[start])\n\n assert map_data[start] == map_data[end-1], \"id not match\"\n\n curr_name = str(map_data[start])\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.inputs][start:end]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.inputs].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.target][start:end]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.target].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.predicted][start:end]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.predicted].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n else:\n inputs = self.inputs[0].split('/')[-1]\n target = self.target[0].split('/')[-1]\n predicted = self.predicted[0].split('/')[-1]\n with h5py.File(self.merge_file, 'w') as mf:\n mf.create_group(inputs)\n mf.create_group(target)\n mf.create_group(predicted)\n\n offset = 0\n curr_data_idx = 0\n\n with h5py.File(self.ref_file, 'r') as f:\n total = f[self.inputs[curr_data_idx]].shape[0]\n\n for i in range(len(indice) - 1):\n if indice[i] - offset >= total:\n offset = indice[i]\n curr_data_idx += 1\n\n with h5py.File(self.ref_file, 'r') as f:\n total = f[self.inputs[curr_data_idx]].shape[0]\n\n map_start, map_end = indice[i], indice[i+1]\n\n start = indice[i] - offset\n end = indice[i+1] - offset\n\n unique_val.append(map_data[map_start])\n\n assert map_data[map_start] == map_data[map_end -\n 1], \"id not match\"\n\n curr_name = str(map_data[map_start])\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.inputs[curr_data_idx]][start:end]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[inputs].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.target[curr_data_idx]][start:end]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[target].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.predicted[curr_data_idx]][start:end]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[predicted].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n df = pd.DataFrame(data=unique_val, columns=[self.map_column])\n df.to_csv(self.save_file, index=False)\n\n\nclass H5Transform3d:\n def __init__(self, ref_file, map_file, map_column, merge_file,\n predicted_dataset='predicted', target_dataset='y',\n input_dataset='x'):\n self.ref_file = ref_file\n self.map_file = map_file\n self.map_column = map_column\n self.merge_file = merge_file\n\n self.predicted = predicted_dataset\n self.target = target_dataset\n self.inputs = input_dataset\n\n with h5py.File(ref_file, 'r') as f:\n keys = list(f.keys())\n if input_dataset not in keys:\n self.predicted = [f'{key}/{predicted_dataset}' for key in keys]\n self.target = [f'{key}/{target_dataset}' for key in keys]\n self.inputs = [f'{key}/{input_dataset}' for key in keys]\n\n def post_process(self):\n map_df = pd.read_csv(self.map_file)\n map_data = map_df[self.map_column].values\n\n first, last = map_data[0], map_data[-1]\n\n tmp = np.concatenate([[first], map_data, [last]])\n indice = np.where(tmp[1:] != tmp[:-1])[0]\n indice = np.concatenate([[0], indice, [len(map_data)]])\n\n if type(self.inputs) == str:\n with h5py.File(self.merge_file, 'w') as mf:\n mf.create_group(self.inputs)\n mf.create_group(self.target)\n mf.create_group(self.predicted)\n\n for i in range(len(map_data)):\n curr_name = str(map_data[i])\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.inputs][i]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.inputs].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.target][i]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.target].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.predicted][i]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.predicted].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n else: # pragma: no cover\n inputs = self.inputs[0].split('/')[-1]\n target = self.target[0].split('/')[-1]\n predicted = self.predicted[0].split('/')[-1]\n with h5py.File(self.merge_file, 'w') as mf:\n mf.create_group(inputs)\n mf.create_group(target)\n mf.create_group(predicted)\n\n offset = 0\n curr_data_idx = 0\n\n with h5py.File(self.ref_file, 'r') as f:\n total = f[self.inputs[curr_data_idx]].shape[0]\n\n for i in range(len(map_data)):\n if i - offset >= total:\n offset = i\n curr_data_idx += 1\n\n with h5py.File(self.ref_file, 'r') as f:\n total = f[self.inputs[curr_data_idx]].shape[0]\n\n curr_name = str(map_data[i])\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.inputs[curr_data_idx]][i-offset]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[inputs].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.target[curr_data_idx]][i-offset]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[target].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n with h5py.File(self.ref_file, 'r') as f:\n img = f[self.predicted[curr_data_idx]][i-offset]\n with h5py.File(self.merge_file, 'a') as mf:\n mf[predicted].create_dataset(\n curr_name, data=img, compression=\"gzip\")\n\n # df = pd.DataFrame(data=unique_val, columns=[self.map_column])\n # df.to_csv(self.save_file, index=False)\n\n\nclass H5MergePatches: # pragma: no cover\n def __init__(self, ref_file, predicted_file,\n map_column, merge_file, save_file,\n patch_size, overlap,\n folds, fold_prefix='fold',\n original_input_dataset='x',\n original_target_dataset='y',\n predicted_dataset='predicted', target_dataset='y',\n input_dataset='x'\n ):\n\n self.ref_file = ref_file\n self.predicted_file = predicted_file\n self.map_column = map_column\n self.merge_file = merge_file\n self.save_file = save_file\n\n self.ref_inputs = original_input_dataset\n self.ref_targets = original_target_dataset\n\n self.predicted = predicted_dataset\n self.target = target_dataset\n self.inputs = input_dataset\n\n if fold_prefix:\n self.folds = ['{}_{}'.format(\n fold_prefix, fold) for fold in folds]\n else:\n self.folds = folds\n\n self.patch_size = patch_size\n self.overlap = overlap\n\n print('merge images of patch', patch_size)\n\n def _save_inputs_target_to_merge_file(self, fold, meta, index):\n with h5py.File(self.ref_file, 'r') as f:\n inputs = f[fold][self.ref_inputs][index]\n targets = f[fold][self.ref_targets][index]\n\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.inputs].create_dataset(\n meta, data=inputs, compression=\"gzip\")\n mf[self.target].create_dataset(\n meta, data=targets, compression=\"gzip\")\n\n def _merge_patches_to_merge_file(self, meta, start_cursor):\n with h5py.File(self.merge_file, 'r') as mf:\n shape = mf[self.target][meta].shape[:-1]\n\n # fix patch size\n if '__iter__' not in dir(self.patch_size):\n self.patch_size = [self.patch_size] * len(shape)\n\n indice = get_patch_indice(shape, self.patch_size, self.overlap)\n next_cursor = start_cursor + len(indice)\n\n with h5py.File(self.predicted_file, 'r') as f:\n data = f[self.predicted][start_cursor: next_cursor]\n\n predicted = np.zeros(shape)\n weight = np.zeros(shape)\n\n for i in range(len(indice)):\n x, y, z = indice[i]\n w, h, d = self.patch_size\n predicted[x:x+w, y:y+h, z:z+d] = predicted[x:x+w, y:y+h, z:z+d] \\\n + data[i][..., 0]\n weight[x:x+w, y:y+h, z:z+d] = weight[x:x+w, y:y+h, z:z+d] \\\n + np.ones(self.patch_size)\n\n predicted = (predicted/weight)[..., np.newaxis]\n\n with h5py.File(self.merge_file, 'a') as mf:\n mf[self.predicted].create_dataset(\n meta, data=predicted, compression=\"gzip\")\n\n return next_cursor\n\n def post_process(self):\n # create merge file\n with h5py.File(self.merge_file, 'w') as mf:\n mf.create_group(self.inputs)\n mf.create_group(self.target)\n mf.create_group(self.predicted)\n\n data = []\n start_cursor = 0\n for fold in self.folds:\n with h5py.File(self.ref_file, 'r') as f:\n meta_data = f[fold][self.map_column][:]\n data.extend(meta_data)\n for index, meta in enumerate(meta_data):\n self._save_inputs_target_to_merge_file(\n fold, str(meta), index)\n start_cursor = self._merge_patches_to_merge_file(\n str(meta), start_cursor)\n\n # create map file\n df = pd.DataFrame(data, columns=[self.map_column])\n df.to_csv(self.save_file, index=False)\n\n\nclass AnalysisPerEpoch: # pragma: no cover\n _markers = ['o-', 'v-', '^-', '<-', '>-',\n '1-', '2-', 's-', 'p-', 'P-',\n '*-', '+-', 'x-', 'D-', 'd-'] * 10 + ['--']\n\n def __init__(self, save_path, log_file_templates, epochs,\n map_column='patient idx', monitor='', model_name=''):\n self.save_path = save_path\n self.log_file_templates = log_file_templates\n self.epochs = epochs\n self.map_column = map_column\n self.monitor = monitor\n self.model_name = model_name or save_path.split('/')[-2]\n\n def post_process(self):\n patient_dice_per_epoch = []\n monitor = self.monitor\n epochs = self.epochs\n map_column = self.map_column\n for epoch in epochs:\n # load each log file\n data = pd.read_csv(self.log_file_templates.format(epoch))\n\n # metric column\n if not monitor:\n monitor = data.columns[-1]\n\n patient_dice_per_epoch.append(data[monitor].values)\n\n # Plot dice per epoch\n patient_idx = data[map_column].values\n\n # print(patient_dice_per_epoch)\n all_data = np.vstack(patient_dice_per_epoch)\n\n df = pd.DataFrame(all_data, columns=patient_idx)\n df.index = epochs\n df.index.name = 'epoch'\n # df['mean'] = df.mean(axis=1)\n df['mean'] = df[[pid for pid in patient_idx]].mean(axis=1)\n best_epoch = df['mean'].idxmax()\n best_metric = df['mean'].max()\n\n plt.figure(figsize=(10, 8))\n df.plot(style=self._markers[:len(patient_idx) + 1], ax=plt.gca())\n plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n plt.title(\n f'Model {self.model_name}' +\n f'\\nBest Epoch {best_epoch} - Mean {monitor} {best_metric:.6f}')\n plt.savefig(self.save_path + '/dice_per_epoch.png')\n plt.savefig(self.save_path + '/dice_per_epoch.pdf')\n plt.close('all')\n\n # save to csv\n df.to_csv(self.save_path + '/dice_per_epoch.csv')\n\n violin_df = df[df.columns[:-1]]\n group_df = violin_df.reset_index().melt(\n id_vars=violin_df.columns[:-len(patient_idx)],\n var_name=map_column, value_name=monitor)\n\n def Q1(x):\n return x.quantile(0.25)\n\n def Q3(x):\n return x.quantile(0.75)\n\n def to_int(x):\n return x.astype(int)\n\n group_df.groupby('epoch').agg(\n {monitor: ['min', Q1, 'median', Q3, 'max', 'mean', 'std']})\n\n with open(self.save_path + '/val_summary.txt') as f:\n f.write(str(group_df))\n\n\nclass PostProcessor:\n MODEL_PATH = '/model'\n MODEL_NAME = '/model.{epoch:03d}.h5'\n BEST_MODEL_PATH = '/best'\n PREDICTION_PATH = '/prediction'\n PREDICTION_NAME = '/prediction.{epoch:03d}.h5'\n LOG_FILE = '/logs.csv'\n PERFORMANCE_PATH = '/performance'\n TEST_OUTPUT_PATH = '/test'\n PREDICT_TEST_NAME = '/prediction_test.h5'\n\n def __init__(self, log_base_path='logs',\n temp_base_path='',\n analysis_base_path='',\n run_test=False, new_dataset_params=None):\n self.temp_base_path = temp_base_path\n self.log_base_path = log_base_path\n\n self.update_data_reader(new_dataset_params)\n\n try:\n model_path = log_base_path + self.MODEL_PATH\n model_files = os.listdir(model_path)\n\n self.epochs = [int(filename[-6:-3])\n for filename in model_files]\n except Exception as e: # pragma: no cover\n print('No saved models', e)\n warnings.warn('load_best_model does not work')\n\n if len(self.epochs) == 0:\n print('No saved models in', model_path)\n warnings.warn('load_best_model does not work')\n\n self.run_test = run_test\n\n def update_data_reader(self, new_dataset_params):\n model_path = self.log_base_path + self.MODEL_PATH\n\n sample_model_filename = model_path + '/' + os.listdir(model_path)[0]\n\n with h5py.File(sample_model_filename, 'r') as f:\n config = f.attrs['deoxys_config']\n config = load_json_config(config)\n dataset_params = config['dataset_params']\n # update until level 2\n if new_dataset_params is not None:\n for key in new_dataset_params:\n if key in dataset_params:\n dataset_params[key].update(new_dataset_params[key])\n else:\n dataset_params[key] = new_dataset_params[key]\n\n self.dataset_filename = dataset_params['config']['filename']\n self.data_reader = load_data(dataset_params)\n self.dataset_params = dataset_params\n\n def _best_epoch_from_raw_log(self, monitor='', mode='max'):\n print(F'Finding best model based on the {mode}imum {monitor} from '\n 'raw logs')\n\n epochs = self.epochs\n if len(epochs) == 0:\n print('No saved models in', self.log_base_path)\n raise Exception('load_best_model does not work')\n\n logger_path = self.log_base_path + self.LOG_FILE\n if os.path.isfile(logger_path):\n df = read_csv(logger_path, usecols=['epoch', monitor])\n df['epoch'] = df['epoch'] + 1\n # only compare models that were saved\n min_df = df[df['epoch'].isin(epochs)].min()\n min_epoch = df[df['epoch'].isin(epochs)].idxmin()\n max_df = df[df['epoch'].isin(epochs)].max()\n max_epoch = df[df['epoch'].isin(epochs)].idxmax()\n if mode == 'min':\n val = min_df[monitor]\n best_epoch = min_epoch[monitor] + 1\n else:\n val = max_df[monitor]\n best_epoch = max_epoch[monitor] + 1\n else:\n warnings.warn('No log files to check for best model')\n\n print('Best epoch:', best_epoch, f', with {monitor}={val}')\n\n return best_epoch\n\n def get_best_model(self, monitor='', mode='max',\n keep_best_only=True): # pragma: no cover\n best_epoch = self._best_epoch_from_raw_log(monitor, mode)\n\n epochs = self.epochs\n\n for epoch in epochs:\n if epoch == best_epoch or not keep_best_only:\n shutil.copy(\n self.temp_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch),\n self.log_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch))\n\n return self.log_base_path + self.MODEL_PATH + \\\n self.MODEL_NAME.format(epoch=best_epoch)\n\n\nclass SegmentationPostProcessor(PostProcessor):\n MODEL_PATH = '/model'\n MODEL_NAME = '/model.{epoch:03d}.h5'\n BEST_MODEL_PATH = '/best'\n PREDICTION_PATH = '/prediction'\n PREDICTION_NAME = '/prediction.{epoch:03d}.h5'\n LOG_FILE = '/logs.csv'\n PERFORMANCE_PATH = '/performance'\n PREDICTED_IMAGE_PATH = '/images'\n TEST_OUTPUT_PATH = '/test'\n PREDICT_TEST_NAME = '/prediction_test.h5'\n SINGLE_MAP_PATH = '/single_map'\n SINGLE_MAP_NAME = '/logs.{epoch:03d}.csv'\n\n MAP_PATH = '/logs'\n MAP_NAME = '/logs.{epoch:03d}.csv'\n\n TEST_SINGLE_MAP_NAME = '/single_result.csv'\n TEST_MAP_NAME = '/result.csv'\n\n def __init__(self, log_base_path='logs',\n temp_base_path='',\n analysis_base_path='',\n map_meta_data=None, main_meta_data='',\n run_test=False, new_dataset_params=None):\n self.temp_base_path = temp_base_path\n self.log_base_path = log_base_path\n self.analysis_base_path = analysis_base_path or log_base_path\n\n if not os.path.exists(self.analysis_base_path):\n os.mkdir(self.analysis_base_path)\n\n if not os.path.exists(self.analysis_base_path + self.PREDICTION_PATH):\n os.mkdir(self.analysis_base_path + self.PREDICTION_PATH)\n\n self.update_data_reader(new_dataset_params)\n try:\n temp_prediction_path = temp_base_path + self.PREDICTION_PATH\n predicted_files = os.listdir(temp_prediction_path)\n\n self.epochs = [int(filename[-6:-3])\n for filename in predicted_files]\n except Exception as e: # pragma: no cover\n print(\"Error while getting epochs by temp folder:\", e)\n print(\"Using post-process log files as alternative\")\n try:\n log_files = os.listdir(self.log_base_path + self.MAP_PATH)\n self.epochs = [int(filename[-7:-4])\n for filename in log_files]\n except Exception as e:\n print(\"Error while getting epochs by log files:\", e)\n print(\"Using dummy epochs as alternative.\")\n self.epochs = [5]\n print(\"Post-process only works on test data.\")\n\n if map_meta_data:\n if type(map_meta_data) == str:\n self.map_meta_data = map_meta_data.split(',')\n else:\n self.map_meta_data = map_meta_data\n else:\n self.map_meta_data = ['patient_idx', 'slice_idx']\n\n if main_meta_data:\n self.main_meta_data = main_meta_data\n else:\n self.main_meta_data = self.map_meta_data[0]\n\n self.run_test = run_test\n\n # def update_data_reader(self, new_dataset_params):\n # model_path = self.log_base_path + self.MODEL_PATH\n\n # sample_model_filename = model_path + '/' + os.listdir(model_path)[0]\n\n # with h5py.File(sample_model_filename, 'r') as f:\n # config = f.attrs['deoxys_config']\n # config = load_json_config(config)\n # dataset_params = config['dataset_params']\n # # update until level 2\n # if new_dataset_params is not None:\n # for key in new_dataset_params:\n # if key in dataset_params:\n # dataset_params[key].update(new_dataset_params[key])\n # else:\n # dataset_params[key] = new_dataset_params[key]\n\n # self.dataset_filename = dataset_params['config']['filename']\n # self.data_reader = load_data(dataset_params)\n # self.dataset_params = dataset_params\n\n def map_2d_meta_data(self):\n print('mapping 2d meta data')\n if not self.run_test:\n map_folder = self.log_base_path + self.SINGLE_MAP_PATH\n\n if not os.path.exists(map_folder):\n os.makedirs(map_folder)\n map_filename = map_folder + self.SINGLE_MAP_NAME\n\n for epoch in self.epochs:\n H5MetaDataMapping(\n ref_file=self.dataset_filename,\n save_file=map_filename.format(epoch=epoch),\n folds=self.data_reader.val_folds,\n fold_prefix='',\n dataset_names=self.map_meta_data).post_process()\n else:\n test_folder = self.log_base_path + self.TEST_OUTPUT_PATH\n if not os.path.exists(test_folder):\n os.makedirs(test_folder)\n\n map_filename = test_folder + self.TEST_SINGLE_MAP_NAME\n H5MetaDataMapping(\n ref_file=self.dataset_filename,\n save_file=map_filename,\n folds=self.data_reader.test_folds,\n fold_prefix='',\n dataset_names=self.map_meta_data).post_process()\n\n return self\n\n def calculate_fscore_single(self):\n if not self.run_test:\n print('calculating dice score per items in val set')\n predicted_path = self.temp_base_path + \\\n self.PREDICTION_PATH + self.PREDICTION_NAME\n map_folder = self.log_base_path + self.SINGLE_MAP_PATH\n map_filename = map_folder + self.SINGLE_MAP_NAME\n for epoch in self.epochs:\n H5CalculateFScore(\n predicted_path.format(epoch=epoch),\n map_filename.format(epoch=epoch)\n ).post_process()\n else:\n print('calculating dice score per items in test set')\n predicted_path = self.temp_base_path + \\\n self.TEST_OUTPUT_PATH + self.PREDICT_TEST_NAME\n test_folder = self.log_base_path + self.TEST_OUTPUT_PATH\n map_filename = test_folder + self.TEST_SINGLE_MAP_NAME\n\n H5CalculateFScore(\n predicted_path,\n map_filename\n ).post_process()\n\n return self\n\n def calculate_fscore_single_3d(self):\n self.calculate_fscore_single()\n if not self.run_test:\n map_folder = self.log_base_path + self.SINGLE_MAP_PATH\n\n main_log_folder = self.log_base_path + self.MAP_PATH\n try:\n os.rename(map_folder, main_log_folder)\n except Exception as e:\n print(\"Files exist:\", e)\n print(\"Copying new logs file\")\n os.rename(main_log_folder,\n main_log_folder + '-' + str(time()))\n os.rename(map_folder, main_log_folder)\n\n for epoch in self.epochs:\n H5Transform3d(\n ref_file=self.temp_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch),\n map_file=main_log_folder +\n self.MAP_NAME.format(epoch=epoch),\n map_column=self.main_meta_data,\n merge_file=self.log_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch),\n ).post_process()\n else:\n test_folder = self.log_base_path + self.TEST_OUTPUT_PATH\n map_filename = test_folder + self.TEST_SINGLE_MAP_NAME\n\n main_result_file_name = test_folder + self.TEST_MAP_NAME\n try:\n os.rename(map_filename, main_result_file_name)\n except Exception as e:\n print(\"Files exist:\", e)\n print(\"Copying new result file\")\n os.rename(main_result_file_name,\n main_result_file_name + '-' + str(time()) + '.csv')\n os.rename(map_filename, main_result_file_name)\n\n H5Transform3d(\n ref_file=self.temp_base_path + self.TEST_OUTPUT_PATH +\n self.PREDICT_TEST_NAME,\n map_file=main_result_file_name,\n map_column=self.main_meta_data,\n merge_file=test_folder + self.PREDICT_TEST_NAME,\n ).post_process()\n\n def merge_2d_slice(self):\n print('merge 2d slice to 3d images')\n if not self.run_test:\n predicted_path = self.temp_base_path + \\\n self.PREDICTION_PATH + self.PREDICTION_NAME\n map_folder = self.log_base_path + self.SINGLE_MAP_PATH\n map_filename = map_folder + self.SINGLE_MAP_NAME\n\n merge_path = self.log_base_path + \\\n self.PREDICTION_PATH + self.PREDICTION_NAME\n\n main_log_folder = self.log_base_path + self.MAP_PATH\n\n if not os.path.exists(main_log_folder):\n os.makedirs(main_log_folder)\n main_log_filename = main_log_folder + self.MAP_NAME\n\n for epoch in self.epochs:\n H5Merge2dSlice(\n predicted_path.format(epoch=epoch),\n map_filename.format(epoch=epoch),\n self.main_meta_data,\n merge_path.format(epoch=epoch),\n main_log_filename.format(epoch=epoch)\n ).post_process()\n else:\n predicted_path = self.temp_base_path + \\\n self.TEST_OUTPUT_PATH + self.PREDICT_TEST_NAME\n test_folder = self.log_base_path + self.TEST_OUTPUT_PATH\n map_filename = test_folder + self.TEST_SINGLE_MAP_NAME\n merge_path = test_folder + self.PREDICT_TEST_NAME\n main_result_file_name = test_folder + self.TEST_MAP_NAME\n\n H5Merge2dSlice(\n predicted_path,\n map_filename,\n self.main_meta_data,\n merge_path,\n main_result_file_name\n ).post_process()\n\n return self\n\n def merge_3d_patches(self): # pragma: no cover\n print('merge 3d patches to 3d images')\n if not self.run_test:\n predicted_path = self.temp_base_path + \\\n self.PREDICTION_PATH + self.PREDICTION_NAME\n # map_folder = self.log_base_path + self.SINGLE_MAP_PATH\n # map_filename = map_folder + self.SINGLE_MAP_NAME\n\n merge_path = self.analysis_base_path + \\\n self.PREDICTION_PATH + self.PREDICTION_NAME\n\n main_log_folder = self.log_base_path + self.MAP_PATH\n\n if not os.path.exists(main_log_folder):\n os.makedirs(main_log_folder)\n main_log_filename = main_log_folder + self.MAP_NAME\n\n for epoch in self.epochs:\n H5MergePatches(\n ref_file=self.dataset_filename,\n predicted_file=predicted_path.format(epoch=epoch),\n map_column=self.main_meta_data,\n merge_file=merge_path.format(epoch=epoch),\n save_file=main_log_filename.format(epoch=epoch),\n patch_size=self.data_reader.patch_size,\n overlap=self.data_reader.overlap,\n folds=self.data_reader.val_folds,\n fold_prefix='',\n original_input_dataset=self.data_reader.x_name,\n original_target_dataset=self.data_reader.y_name,\n ).post_process()\n else:\n predicted_path = self.temp_base_path + \\\n self.TEST_OUTPUT_PATH + self.PREDICT_TEST_NAME\n test_folder = self.log_base_path + self.TEST_OUTPUT_PATH\n merge_path = test_folder + self.PREDICT_TEST_NAME\n main_result_file_name = test_folder + self.TEST_MAP_NAME\n\n if not os.path.exists(test_folder):\n os.makedirs(test_folder)\n\n H5MergePatches(\n ref_file=self.dataset_filename,\n predicted_file=predicted_path,\n map_column=self.main_meta_data,\n merge_file=merge_path,\n save_file=main_result_file_name,\n patch_size=self.data_reader.patch_size,\n overlap=self.data_reader.overlap,\n folds=self.data_reader.test_folds,\n fold_prefix='',\n original_input_dataset=self.data_reader.x_name,\n original_target_dataset=self.data_reader.y_name,\n ).post_process()\n\n return self\n\n def calculate_fscore(self):\n print('calculating dice score per 3d image')\n if not self.run_test:\n merge_path = self.analysis_base_path + \\\n self.PREDICTION_PATH + self.PREDICTION_NAME\n\n main_log_folder = self.log_base_path + self.MAP_PATH\n main_log_filename = main_log_folder + self.MAP_NAME\n\n for epoch in self.epochs:\n H5CalculateFScore(\n merge_path.format(epoch=epoch),\n main_log_filename.format(epoch=epoch),\n map_file=main_log_filename.format(epoch=epoch),\n map_column=self.main_meta_data\n ).post_process()\n else:\n test_folder = self.log_base_path + self.TEST_OUTPUT_PATH\n merge_path = test_folder + self.PREDICT_TEST_NAME\n main_result_file_name = test_folder + self.TEST_MAP_NAME\n\n H5CalculateFScore(\n merge_path,\n main_result_file_name,\n map_file=main_result_file_name,\n map_column=self.main_meta_data\n ).post_process()\n\n return self\n\n def get_best_model(self, monitor='', mode='max', keep_best_only=True,\n use_raw_log=False):\n print('finding best model')\n\n epochs = self.epochs\n\n if use_raw_log:\n best_epoch = self._best_epoch_from_raw_log(monitor, mode)\n\n else:\n res_df = pd.DataFrame(epochs, columns=['epochs'])\n\n results = []\n results_path = self.log_base_path + self.MAP_PATH + self.MAP_NAME\n\n for epoch in epochs:\n df = pd.read_csv(results_path.format(epoch=epoch))\n if not monitor:\n monitor = df.columns[-1]\n\n results.append(df[monitor].mean())\n\n res_df[monitor] = results\n if mode == 'max':\n best_epoch = epochs[res_df[monitor].argmax()]\n else:\n best_epoch = epochs[res_df[monitor].argmin()]\n\n res_df.to_csv(self.log_base_path + '/log_new.csv', index=False)\n\n print('Best epoch:', best_epoch)\n\n if keep_best_only:\n print('Keep best results only. Deleting prediction files...')\n for epoch in epochs:\n if epoch != best_epoch:\n predicted_file = self.analysis_base_path + \\\n self.PREDICTION_PATH + \\\n self.PREDICTION_NAME.format(epoch=epoch)\n if os.path.exists(predicted_file):\n os.remove(predicted_file)\n elif self.log_base_path != self.analysis_base_path:\n # move the best prediction to main folder\n if os.path.exists(self.analysis_base_path +\n self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch)\n ):\n shutil.copy(\n self.analysis_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch),\n self.log_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch))\n\n os.remove(self.analysis_base_path +\n self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch))\n elif self.log_base_path != self.analysis_base_path:\n # Copy the best prediction to the main folder\n shutil.copy(self.analysis_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=best_epoch),\n self.log_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=best_epoch))\n\n return self.log_base_path + self.MODEL_PATH + \\\n self.MODEL_NAME.format(epoch=best_epoch)\n\n def get_best_performance_images(self, monitor='', best_num=2, worst_num=2):\n epochs = self.epochs\n results_path = self.log_base_path + self.MAP_PATH + self.MAP_NAME\n\n results = []\n for epoch in epochs:\n # only plot things in prediction\n if os.path.exists(self.log_base_path + self.PREDICTION_PATH +\n self.PREDICTION_NAME.format(epoch=epoch)):\n df = pd.read_csv(results_path.format(epoch=epoch))\n\n if not monitor:\n monitor = df.columns[-1]\n largest_indice = df[monitor].nlargest(best_num, keep='all')\n smallest_indice = df[monitor].nsmallest(\n worst_num, keep='all')\n\n indice = list(largest_indice.index) + \\\n list(smallest_indice.index)\n\n # `values` will implicitly cast all item to the same type\n # take out each column first, then use `values`\n results.append(\n {'file_name': self.PREDICTION_NAME.format(epoch=epoch),\n 'ids': df[self.main_meta_data].values[indice],\n 'values': df[monitor].values[indice]})\n\n return results\n\n def get_best_performance_images_test_set(\n self, monitor='', best_num=2, worst_num=2):\n\n test_folder = self.log_base_path + self.TEST_OUTPUT_PATH\n main_result_file_name = test_folder + self.TEST_MAP_NAME\n\n df = pd.read_csv(main_result_file_name)\n\n if not monitor:\n monitor = df.columns[-1]\n largest_indice = df[monitor].nlargest(best_num, keep='all')\n smallest_indice = df[monitor].nsmallest(\n worst_num, keep='all')\n\n indice = list(largest_indice.index) + \\\n list(smallest_indice.index)\n\n # `values` will implicitly cast all item to the same type\n # take out each column first, then use `values`\n return {'ids': df[self.main_meta_data].values[indice],\n 'values': df[monitor].values[indice]}\n"
] | [
[
"numpy.vstack",
"numpy.sum",
"numpy.ones",
"matplotlib.pyplot.legend",
"numpy.zeros",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.title",
"numpy.where",
"numpy.expand_dims",
"matplotlib.pyplot.close",
"numpy.concatenate"
]
] |
jimilee/image-classification | [
"abd07abbbf3ed2e38bb7fda6f4bfeb28dd7ffaae"
] | [
"utils/losses.py"
] | [
"from torch import nn, Tensor\nfrom typing import Union\nfrom torch.nn import CrossEntropyLoss\n\n\nclass LabelSmoothCrossEntropy(nn.Module):\n def __init__(self, smoothing=0.1):\n super().__init__()\n assert smoothing < 1.0\n self.smoothing = smoothing\n self.confidence = 1. - smoothing\n self.log_softmax = nn.LogSoftmax(dim=-1)\n\n def forward(self, pred: Tensor, target: Tensor) -> Tensor:\n pred = self.log_softmax(pred)\n nll_loss = -pred.gather(dim=-1, index=target.unsqueeze(1)).squeeze(1)\n smooth_loss = -pred.mean(dim=-1)\n loss = self.confidence * nll_loss + self.smoothing * smooth_loss\n return loss.mean()\n\n\nclass DistillationLoss(nn.Module):\n \"\"\"Distilling the Knowledge in a Neural Network\n https://arxiv.org/pdf/1503.02531.pdf\n \"\"\"\n def __init__(self, alpha: float = 0.95, temp: Union[float, int] = 6) -> None:\n super().__init__()\n self.alpha = alpha\n self.temp = temp\n self.kd_loss = nn.KLDivLoss()\n self.entropy_loss = nn.CrossEntropyLoss()\n self.log_softmax = nn.LogSoftmax(dim=1)\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self, pred_student: Tensor, pred_teacher: Tensor, target: Tensor) -> Tensor:\n loss = self.kd_loss(self.log_softmax(pred_student / self.temp), self.softmax(pred_teacher / self.temp)) * (self.alpha * self.temp * self.temp)\n loss += self.entropy_loss(pred_student, target) * (1. - self.alpha)\n return loss\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.CrossEntropyLoss",
"torch.nn.LogSoftmax",
"torch.nn.KLDivLoss"
]
] |
kasoju2712/Science_of_success | [
"fbaa92a8d035f7869162bf45338fc9e174492b98"
] | [
"bibmatch/parse_wos.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\nimport os\nimport numpy as np\nimport pandas as pd\nfrom bibmatch.authorclass import author\n\ndef adf2author(aid, adf):\n author_dict = {}\n author_dict['all_names'] = set(adf['FullName'])\n author_dict['prefered_name'] = sorted(author_dict['all_names'], key = len)[-1]\n author_dict['articles'] = set([t for t in adf['Title'].dropna()])\n author_dict['co_authors'] = set([name.strip() for namelist in adf['CoAuthors'].dropna() for name in namelist.split('|') if len(name.strip()) > 0])\n author_dict['institutions'] = set([t for t in adf['Organization'].dropna()])\n a = author(author_dict)\n a.set_id(aid)\n a.process_names()\n return a\n\ndef parse_wos_authors(full_df, groupby_col='AuthorDAIS'):\n alist = [adf2author(aid, adf) for aid, adf in full_df.groupby(groupby_col)]\n return alist\n\ndef load_wos_data(name = 'article', year_list = None, columns = None,\n duplicate_subset = ['ArticleID'], path2rawdata = '',\n dropna = None, isindict = None, verbose = False):\n\n if year_list is None:\n year_list = [1900] + list(range(1945, 2017))\n year_list = map(str, year_list)\n\n file_df_list = []\n ifile = 0\n for year in year_list:\n for df_file in os.listdir(os.path.join(path2rawdata, name)):\n if \"WR_\" + year in df_file:\n\n fname = os.path.join(path2rawdata, name, df_file)\n subdf = pd.read_hdf(fname, mode = 'r')\n\n if type(columns) is list:\n subdf = subdf[columns]\n\n if type(dropna) is list:\n subdf.dropna(subset = dropna, inplace = True, how = 'any')\n\n if type(isindict) is dict:\n for isinkey, isinlist in isindict.items():\n subdf = subdf[isin_sorted(subdf[isinkey], isinlist)]\n\n # date tag to keep most recent entry\n filetag = df_file.split('_')[2]\n subdf['filetag'] = filetag\n\n file_df_list.append(subdf)\n ifile += 1\n if verbose and ifile % verbose == 0:\n print(ifile)\n\n df = pd.concat(file_df_list)\n\n # take most recent entries according to filetag\n df.sort_values(by = 'filetag', inplace = True)\n df.drop_duplicates(subset = duplicate_subset, keep = 'last', inplace = True)\n del df['filetag']\n\n if verbose:\n print(\"Final DF Shape\", df.shape)\n\n return df\n\ndef isin_sorted(values2check, masterlist):\n index = np.searchsorted(masterlist, values2check, side = 'left')\n index[index >= masterlist.shape[0]] = masterlist.shape[0] - 1\n return values2check == masterlist[index]"
] | [
[
"pandas.read_hdf",
"pandas.concat",
"numpy.searchsorted"
]
] |
lynnmunday/neml | [
"2c0e3db9f849345dba01d64fc8488e2b97e477dd"
] | [
"test/test_sliprules.py"
] | [
"#!/usr/bin/env python3\n\nfrom neml import history, interpolate\nfrom neml.math import tensors, rotations\nfrom neml.cp import crystallography, slipharden, sliprules\n\nfrom common import differentiate\nfrom nicediff import *\n\nimport unittest\nimport numpy as np\nimport numpy.linalg as la\n\nclass CommonSlipRule(object):\n def test_d_slip_d_stress(self):\n for g in range(self.L.ngroup):\n for i in range(self.L.nslip(g)):\n d = self.model.d_slip_d_s(g, i, self.S, self.Q, self.H, self.L, self.T, self.fixed)\n nd = diff_scalar_symmetric(lambda s: self.model.slip(g, i, s, self.Q, self.H, \n self.L, self.T, self.fixed), self.S)\n self.assertEqual(d, nd)\n\n def test_d_slip_d_hist(self):\n for g in range(self.L.ngroup):\n for i in range(self.L.nslip(g)):\n d = np.array(self.model.d_slip_d_h(g, i, self.S, self.Q, self.H, self.L, self.T, self.fixed))\n nd = np.array(diff_history_scalar(lambda h: self.model.slip(g, i, self.S, self.Q, h,\n self.L, self.T, self.fixed), self.H))\n self.assertTrue(np.allclose(nd.reshape(d.shape), d))\n\n def test_d_hist_rate_d_stress(self):\n d = np.array(self.model.d_hist_rate_d_stress(self.S, self.Q, self.H, self.L, self.T, self.fixed))\n nd = diff_history_symmetric(lambda s: self.model.hist_rate(s, self.Q, self.H, self.L,\n self.T, self.fixed), self.S)\n self.assertTrue(np.allclose(nd.reshape(d.shape), d))\n\n def test_d_hist_rate_d_hist(self):\n d = np.array(self.model.d_hist_rate_d_hist(self.S, self.Q, self.H, self.L, self.T, self.fixed))\n nd = diff_history_history(lambda h: self.model.hist_rate(self.S, self.Q, h, self.L,\n self.T, self.fixed), self.H)\n self.assertTrue(np.allclose(nd.reshape(d.shape), d))\n\nclass CommonSlipStrengthSlipRule(object):\n def test_init_hist(self):\n H1 = history.History()\n self.model.populate_history(H1)\n self.model.init_history(H1)\n\n H2 = history.History()\n self.strengthmodel.populate_history(H2)\n self.strengthmodel.init_history(H2)\n\n self.assertTrue(np.allclose(np.array(H1),\n np.array(H2)))\n\n def test_slip(self):\n for g in range(self.L.ngroup):\n for i in range(self.L.nslip(g)):\n rs = self.L.shear(g, i, self.Q, self.S)\n strength = self.strength + self.static\n self.assertTrue(np.isclose(self.model.slip(g, i, self.S, self.Q, self.H, self.L, self.T, self.fixed),\n self.model.sslip(g, i, rs, strength, self.T)))\n\n def test_d_hist_rate(self):\n self.assertTrue(np.allclose(\n np.array(self.model.hist_rate(self.S, self.Q, self.H, self.L, self.T, self.fixed)),\n np.array(self.strengthmodel.hist(self.S, self.Q, self.H, self.L, self.T, self.model, self.fixed))))\n\n def test_d_sslip_d_tau(self):\n for g in range(self.L.ngroup):\n for i in range(self.L.nslip(g)):\n nd = differentiate(lambda t: self.model.sslip(g, i, t, self.strength, self.T),\n self.tau)\n d = self.model.d_sslip_dtau(g, i, self.tau, self.strength, self.T)\n self.assertTrue(np.isclose(nd,d))\n\n def test_d_sslip_d_strength(self):\n for g in range(self.L.ngroup):\n for i in range(self.L.nslip(g)):\n nd = differentiate(lambda s: self.model.sslip(g, i, self.tau, s, self.T), self.strength)\n d = self.model.d_sslip_dstrength(g, i, self.tau, self.strength, self.T)\n print(nd)\n print(d)\n self.assertTrue(np.isclose(nd, d))\n\nclass TestPowerLawSlip(unittest.TestCase, CommonSlipStrengthSlipRule, CommonSlipRule):\n def setUp(self):\n self.L = crystallography.CubicLattice(1.0)\n self.L.add_slip_system([1,1,0],[1,1,1])\n \n self.Q = rotations.Orientation(35.0,17.0,14.0, angle_type = \"degrees\")\n self.S = tensors.Symmetric(np.array([\n [100.0,-25.0,10.0],\n [-25.0,-17.0,15.0],\n [10.0, 15.0,35.0]]))\n self.strength = 35.0\n self.H = history.History()\n self.H.add_scalar(\"strength\")\n self.H.set_scalar(\"strength\", self.strength)\n\n self.T = 300.0\n\n self.tau0 = 10.0\n self.tau_sat = 50.0\n self.b = 2.5\n\n self.strengthmodel = slipharden.VoceSlipHardening(self.tau_sat, self.b, self.tau0)\n\n self.static = self.tau0\n \n self.g0 = 1.0\n self.n = 3.0\n self.model = sliprules.PowerLawSlipRule(self.strengthmodel, self.g0, self.n)\n\n self.tau = 33.0\n\n self.fixed = history.History()\n\n def test_scalar_rate(self):\n for g in range(self.L.ngroup):\n for i in range(self.L.nslip(g)):\n self.assertTrue(np.isclose(self.model.sslip(g, i, self.tau, self.strength, self.T),\n self.g0 * np.abs(self.tau/self.strength)**(self.n-1.0) * self.tau/self.strength))\n\n\nclass TestBiVoceSlip(unittest.TestCase, CommonSlipStrengthSlipRule, CommonSlipRule):\n def setUp(self):\n self.L = crystallography.CubicLattice(1.0)\n self.L.add_slip_system([1,1,0],[1,1,1])\n \n self.Q = rotations.Orientation(35.0,17.0,14.0, angle_type = \"degrees\")\n self.S = tensors.Symmetric(np.array([\n [100.0,-25.0,10.0],\n [-25.0,-17.0,15.0],\n [10.0, 15.0,35.0]]))\n self.strength_1 = 35.0\n self.strength_2 = 25.0\n self.strength = self.strength_1 + self.strength_2\n self.H = history.History()\n self.H.add_scalar(\"strength0\")\n self.H.set_scalar(\"strength0\", self.strength_1)\n self.H.add_scalar(\"strength1\")\n self.H.set_scalar(\"strength1\", self.strength_2)\n\n self.T = 300.0\n\n self.tau0 = 10.0\n self.tau_sat = 50.0\n self.b = 2.5\n\n self.strengthmodel = slipharden.SumSlipSingleStrengthHardening(\n [slipharden.VoceSlipHardening(self.tau_sat, self.b, self.tau0),\n slipharden.VoceSlipHardening(self.tau_sat/2, self.b/2, self.tau0/2)])\n\n self.static = self.tau0 + self.tau0 / 2\n \n self.g0 = 1.0\n self.n = 3.0\n self.model = sliprules.PowerLawSlipRule(self.strengthmodel, self.g0, self.n)\n\n self.tau = 33.0\n\n self.fixed = history.History()\n"
] | [
[
"numpy.array",
"numpy.isclose",
"numpy.abs"
]
] |
hrk7531/scipy | [
"a62bf66b2a485fbb3e08fe52feecaca765bead1f"
] | [
"scipy/optimize/_linprog_util.py"
] | [
"\"\"\"\nMethod agnostic utility functions for linear progamming\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse as sps\nfrom warnings import warn\nfrom .optimize import OptimizeWarning\nfrom scipy.optimize._remove_redundancy import (\n _remove_redundancy, _remove_redundancy_sparse, _remove_redundancy_dense\n )\n\n\ndef _check_sparse_inputs(options, A_ub, A_eq):\n \"\"\"\n Check the provided ``A_ub`` and ``A_eq`` matrices conform to the specified\n optional sparsity variables.\n\n Parameters\n ----------\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n options : dict\n A dictionary of solver options. All methods accept the following\n generic options:\n\n maxiter : int\n Maximum number of iterations to perform.\n disp : bool\n Set to True to print convergence messages.\n\n For method-specific options, see :func:`show_options('linprog')`.\n\n Returns\n -------\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n options : dict\n A dictionary of solver options. All methods accept the following\n generic options:\n\n maxiter : int\n Maximum number of iterations to perform.\n disp : bool\n Set to True to print convergence messages.\n\n For method-specific options, see :func:`show_options('linprog')`.\n \"\"\"\n # This is an undocumented option for unit testing sparse presolve\n _sparse_presolve = options.pop('_sparse_presolve', False)\n if _sparse_presolve and A_eq is not None:\n A_eq = sps.coo_matrix(A_eq)\n if _sparse_presolve and A_ub is not None:\n A_ub = sps.coo_matrix(A_ub)\n\n sparse = options.get('sparse', False)\n if not sparse and (sps.issparse(A_eq) or sps.issparse(A_ub)):\n options['sparse'] = True\n warn(\"Sparse constraint matrix detected; setting 'sparse':True.\",\n OptimizeWarning)\n return options, A_ub, A_eq\n\n\ndef _format_A_constraints(A, n_x, sparse_lhs=False):\n \"\"\"Format the left hand side of the constraints to a 2D array\n\n Parameters\n ----------\n A : 2D array\n 2D array such that ``A @ x`` gives the values of the upper-bound\n (in)equality constraints at ``x``.\n n_x : int\n The number of variables in the linear programming problem.\n sparse_lhs : bool\n Whether either of `A_ub` or `A_eq` are sparse. If true return a\n coo_matrix instead of a numpy array.\n\n Returns\n -------\n np.ndarray or sparse.coo_matrix\n 2D array such that ``A @ x`` gives the values of the upper-bound\n (in)equality constraints at ``x``.\n\n \"\"\"\n if sparse_lhs:\n return sps.coo_matrix(\n (0, n_x) if A is None else A, dtype=float, copy=True\n )\n elif A is None:\n return np.zeros((0, n_x), dtype=float)\n else:\n return np.array(A, dtype=float, copy=True)\n\n\ndef _format_b_constraints(b):\n \"\"\"Format the upper bounds of the constraints to a 1D array\n\n Parameters\n ----------\n b : 1D array\n 1D array of values representing the upper-bound of each (in)equality\n constraint (row) in ``A``.\n\n Returns\n -------\n 1D np.array\n 1D array of values representing the upper-bound of each (in)equality\n constraint (row) in ``A``.\n\n \"\"\"\n if b is None:\n return np.array([], dtype=float)\n b = np.array(b, dtype=float, copy=True).squeeze()\n return b if b.size != 1 else b.reshape((-1))\n\n\ndef _clean_inputs(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None,\n x0=None):\n \"\"\"\n Given user inputs for a linear programming problem, return the\n objective vector, upper bound constraints, equality constraints,\n and simple bounds in a preferred format.\n\n Parameters\n ----------\n c : 1D array\n Coefficients of the linear objective function to be minimized.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence, optional\n ``(min, max)`` pairs for each element in ``x``, defining\n the bounds on that parameter. Use None for one of ``min`` or\n ``max`` when there is no bound in that direction. By default\n bounds are ``(0, None)`` (non-negative).\n If a sequence containing a single tuple is provided, then ``min`` and\n ``max`` will be applied to all variables in the problem.\n x0 : 1D array, optional\n Starting values of the independent variables, which will be refined by\n the optimization algorithm.\n\n Returns\n -------\n c : 1D array\n Coefficients of the linear objective function to be minimized.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence of tuples\n ``(min, max)`` pairs for each element in ``x``, defining\n the bounds on that parameter. Use None for each of ``min`` or\n ``max`` when there is no bound in that direction. By default\n bounds are ``(0, None)`` (non-negative).\n x0 : 1D array, optional\n Starting values of the independent variables, which will be refined by\n the optimization algorithm.\n \"\"\"\n if c is None:\n raise TypeError\n\n try:\n c = np.array(c, dtype=np.float, copy=True).squeeze()\n except ValueError:\n raise TypeError(\n \"Invalid input for linprog: c must be a 1D array of numerical \"\n \"coefficients\")\n else:\n # If c is a single value, convert it to a 1D array.\n if c.size == 1:\n c = c.reshape((-1))\n\n n_x = len(c)\n if n_x == 0 or len(c.shape) != 1:\n raise ValueError(\n \"Invalid input for linprog: c must be a 1D array and must \"\n \"not have more than one non-singleton dimension\")\n if not(np.isfinite(c).all()):\n raise ValueError(\n \"Invalid input for linprog: c must not contain values \"\n \"inf, nan, or None\")\n\n sparse_lhs = sps.issparse(A_eq) or sps.issparse(A_ub)\n try:\n A_ub = _format_A_constraints(A_ub, n_x, sparse_lhs=sparse_lhs)\n except ValueError:\n raise TypeError(\n \"Invalid input for linprog: A_ub must be a 2D array \"\n \"of numerical values\")\n else:\n n_ub = A_ub.shape[0]\n if len(A_ub.shape) != 2 or A_ub.shape[1] != n_x:\n raise ValueError(\n \"Invalid input for linprog: A_ub must have exactly two \"\n \"dimensions, and the number of columns in A_ub must be \"\n \"equal to the size of c\")\n if (sps.issparse(A_ub) and not np.isfinite(A_ub.data).all()\n or not sps.issparse(A_ub) and not np.isfinite(A_ub).all()):\n raise ValueError(\n \"Invalid input for linprog: A_ub must not contain values \"\n \"inf, nan, or None\")\n\n try:\n b_ub = _format_b_constraints(b_ub)\n except ValueError:\n raise TypeError(\n \"Invalid input for linprog: b_ub must be a 1D array of \"\n \"numerical values, each representing the upper bound of an \"\n \"inequality constraint (row) in A_ub\")\n else:\n if b_ub.shape != (n_ub,):\n raise ValueError(\n \"Invalid input for linprog: b_ub must be a 1D array; b_ub \"\n \"must not have more than one non-singleton dimension and \"\n \"the number of rows in A_ub must equal the number of values \"\n \"in b_ub\")\n if not(np.isfinite(b_ub).all()):\n raise ValueError(\n \"Invalid input for linprog: b_ub must not contain values \"\n \"inf, nan, or None\")\n\n try:\n A_eq = _format_A_constraints(A_eq, n_x, sparse_lhs=sparse_lhs)\n except ValueError:\n raise TypeError(\n \"Invalid input for linprog: A_eq must be a 2D array \"\n \"of numerical values\")\n else:\n n_eq = A_eq.shape[0]\n if len(A_eq.shape) != 2 or A_eq.shape[1] != n_x:\n raise ValueError(\n \"Invalid input for linprog: A_eq must have exactly two \"\n \"dimensions, and the number of columns in A_eq must be \"\n \"equal to the size of c\")\n\n if (sps.issparse(A_eq) and not np.isfinite(A_eq.data).all()\n or not sps.issparse(A_eq) and not np.isfinite(A_eq).all()):\n raise ValueError(\n \"Invalid input for linprog: A_eq must not contain values \"\n \"inf, nan, or None\")\n\n try:\n b_eq = _format_b_constraints(b_eq)\n except ValueError:\n raise TypeError(\n \"Invalid input for linprog: b_eq must be a 1D array of \"\n \"numerical values, each representing the upper bound of an \"\n \"inequality constraint (row) in A_eq\")\n else:\n if b_eq.shape != (n_eq,):\n raise ValueError(\n \"Invalid input for linprog: b_eq must be a 1D array; b_eq \"\n \"must not have more than one non-singleton dimension and \"\n \"the number of rows in A_eq must equal the number of values \"\n \"in b_eq\")\n if not(np.isfinite(b_eq).all()):\n raise ValueError(\n \"Invalid input for linprog: b_eq must not contain values \"\n \"inf, nan, or None\")\n\n # x0 gives a (optional) starting solution to the solver. If x0 is None,\n # skip the checks. Initial solution will be generated automatically.\n if x0 is not None:\n try:\n x0 = np.array(x0, dtype=float, copy=True).squeeze()\n except ValueError:\n raise TypeError(\n \"Invalid input for linprog: x0 must be a 1D array of \"\n \"numerical coefficients\")\n if x0.ndim == 0:\n x0 = x0.reshape((-1))\n if len(x0) == 0 or x0.ndim != 1:\n raise ValueError(\n \"Invalid input for linprog: x0 should be a 1D array; it \"\n \"must not have more than one non-singleton dimension\")\n if not x0.size == c.size:\n raise ValueError(\n \"Invalid input for linprog: x0 and c should contain the \"\n \"same number of elements\")\n if not np.isfinite(x0).all():\n raise ValueError(\n \"Invalid input for linprog: x0 must not contain values \"\n \"inf, nan, or None\")\n\n # \"If a sequence containing a single tuple is provided, then min and max\n # will be applied to all variables in the problem.\"\n # linprog doesn't treat this right: it didn't accept a list with one tuple\n # in it\n try:\n if isinstance(bounds, str):\n raise TypeError\n if bounds is None or len(bounds) == 0:\n bounds = [(0, None)] * n_x\n elif len(bounds) == 1:\n b = bounds[0]\n if len(b) != 2:\n raise ValueError(\n \"Invalid input for linprog: exactly one lower bound and \"\n \"one upper bound must be specified for each element of x\")\n bounds = [b] * n_x\n elif len(bounds) == n_x:\n try:\n len(bounds[0])\n except BaseException:\n bounds = [(bounds[0], bounds[1])] * n_x\n for i, b in enumerate(bounds):\n if len(b) != 2:\n raise ValueError(\n \"Invalid input for linprog, bound \" +\n str(i) +\n \" \" +\n str(b) +\n \": exactly one lower bound and one upper bound must \"\n \"be specified for each element of x\")\n elif (len(bounds) == 2 and np.isreal(bounds[0])\n and np.isreal(bounds[1])):\n bounds = [(bounds[0], bounds[1])] * n_x\n else:\n raise ValueError(\n \"Invalid input for linprog: exactly one lower bound and one \"\n \"upper bound must be specified for each element of x\")\n\n clean_bounds = [] # also creates a copy so user's object isn't changed\n for i, b in enumerate(bounds):\n if b[0] is not None and b[1] is not None and b[0] > b[1]:\n raise ValueError(\n \"Invalid input for linprog, bound \" +\n str(i) +\n \" \" +\n str(b) +\n \": a lower bound must be less than or equal to the \"\n \"corresponding upper bound\")\n if b[0] == np.inf:\n raise ValueError(\n \"Invalid input for linprog, bound \" +\n str(i) +\n \" \" +\n str(b) +\n \": infinity is not a valid lower bound\")\n if b[1] == -np.inf:\n raise ValueError(\n \"Invalid input for linprog, bound \" +\n str(i) +\n \" \" +\n str(b) +\n \": negative infinity is not a valid upper bound\")\n lb = float(b[0]) if b[0] is not None and b[0] != -np.inf else None\n ub = float(b[1]) if b[1] is not None and b[1] != np.inf else None\n clean_bounds.append((lb, ub))\n bounds = clean_bounds\n except ValueError as e:\n if \"could not convert string to float\" in e.args[0]:\n raise TypeError\n else:\n raise e\n except TypeError as e:\n print(e)\n raise TypeError(\n \"Invalid input for linprog: bounds must be a sequence of \"\n \"(min,max) pairs, each defining bounds on an element of x \")\n\n return c, A_ub, b_ub, A_eq, b_eq, bounds, x0\n\n\ndef _presolve(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, rr, tol=1e-9):\n \"\"\"\n Given inputs for a linear programming problem in preferred format,\n presolve the problem: identify trivial infeasibilities, redundancies,\n and unboundedness, tighten bounds where possible, and eliminate fixed\n variables.\n\n Parameters\n ----------\n c : 1D array\n Coefficients of the linear objective function to be minimized.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence of tuples\n ``(min, max)`` pairs for each element in ``x``, defining\n the bounds on that parameter. Use None for each of ``min`` or\n ``max`` when there is no bound in that direction.\n x0 : 1D array, optional\n Starting values of the independent variables, which will be refined by\n the optimization algorithm.\n rr : bool\n If ``True`` attempts to eliminate any redundant rows in ``A_eq``.\n Set False if ``A_eq`` is known to be of full row rank, or if you are\n looking for a potential speedup (at the expense of reliability).\n tol : float\n The tolerance which determines when a solution is \"close enough\" to\n zero in Phase 1 to be considered a basic feasible solution or close\n enough to positive to serve as an optimal solution.\n\n Returns\n -------\n c : 1D array\n Coefficients of the linear objective function to be minimized.\n c0 : 1D array\n Constant term in objective function due to fixed (and eliminated)\n variables.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence of tuples\n ``(min, max)`` pairs for each element in ``x``, defining\n the bounds on that parameter. Use None for each of ``min`` or\n ``max`` when there is no bound in that direction. Bounds have been\n tightened where possible.\n x : 1D array\n Solution vector (when the solution is trivial and can be determined\n in presolve)\n x0 : 1D array\n Starting values of the independent variables, which will be refined by\n the optimization algorithm (if solution is not determined in presolve)\n undo: list of tuples\n (index, value) pairs that record the original index and fixed value\n for each variable removed from the problem\n complete: bool\n Whether the solution is complete (solved or determined to be infeasible\n or unbounded in presolve)\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n message : str\n A string descriptor of the exit status of the optimization.\n\n References\n ----------\n .. [5] Andersen, Erling D. \"Finding all linearly dependent rows in\n large-scale linear programming.\" Optimization Methods and Software\n 6.3 (1995): 219-227.\n .. [8] Andersen, Erling D., and Knud D. Andersen. \"Presolving in linear\n programming.\" Mathematical Programming 71.2 (1995): 221-245.\n\n \"\"\"\n # ideas from Reference [5] by Andersen and Andersen\n # however, unlike the reference, this is performed before converting\n # problem to standard form\n # There are a few advantages:\n # * artificial variables have not been added, so matrices are smaller\n # * bounds have not been converted to constraints yet. (It is better to\n # do that after presolve because presolve may adjust the simple bounds.)\n # There are many improvements that can be made, namely:\n # * implement remaining checks from [5]\n # * loop presolve until no additional changes are made\n # * implement additional efficiency improvements in redundancy removal [2]\n\n undo = [] # record of variables eliminated from problem\n # constant term in cost function may be added if variables are eliminated\n c0 = 0\n complete = False # complete is True if detected infeasible/unbounded\n x = np.zeros(c.shape) # this is solution vector if completed in presolve\n\n status = 0 # all OK unless determined otherwise\n message = \"\"\n\n # Standard form for bounds (from _clean_inputs) is list of tuples\n # but numpy array is more convenient here\n # In retrospect, numpy array should have been the standard\n bounds = np.array(bounds)\n lb = bounds[:, 0]\n ub = bounds[:, 1]\n lb[np.equal(lb, None)] = -np.inf\n ub[np.equal(ub, None)] = np.inf\n bounds = bounds.astype(float)\n lb = lb.astype(float)\n ub = ub.astype(float)\n\n m_eq, n = A_eq.shape\n m_ub, n = A_ub.shape\n\n if (sps.issparse(A_eq)):\n A_eq = A_eq.tolil()\n A_ub = A_ub.tolil()\n\n def where(A):\n return A.nonzero()\n\n vstack = sps.vstack\n else:\n where = np.where\n vstack = np.vstack\n\n # zero row in equality constraints\n zero_row = np.array(np.sum(A_eq != 0, axis=1) == 0).flatten()\n if np.any(zero_row):\n if np.any(\n np.logical_and(\n zero_row,\n np.abs(b_eq) > tol)): # test_zero_row_1\n # infeasible if RHS is not zero\n status = 2\n message = (\"The problem is (trivially) infeasible due to a row \"\n \"of zeros in the equality constraint matrix with a \"\n \"nonzero corresponding constraint value.\")\n complete = True\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n else: # test_zero_row_2\n # if RHS is zero, we can eliminate this equation entirely\n A_eq = A_eq[np.logical_not(zero_row), :]\n b_eq = b_eq[np.logical_not(zero_row)]\n\n # zero row in inequality constraints\n zero_row = np.array(np.sum(A_ub != 0, axis=1) == 0).flatten()\n if np.any(zero_row):\n if np.any(np.logical_and(zero_row, b_ub < -tol)): # test_zero_row_1\n # infeasible if RHS is less than zero (because LHS is zero)\n status = 2\n message = (\"The problem is (trivially) infeasible due to a row \"\n \"of zeros in the equality constraint matrix with a \"\n \"nonzero corresponding constraint value.\")\n complete = True\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n else: # test_zero_row_2\n # if LHS is >= 0, we can eliminate this constraint entirely\n A_ub = A_ub[np.logical_not(zero_row), :]\n b_ub = b_ub[np.logical_not(zero_row)]\n\n # zero column in (both) constraints\n # this indicates that a variable isn't constrained and can be removed\n A = vstack((A_eq, A_ub))\n if A.shape[0] > 0:\n zero_col = np.array(np.sum(A != 0, axis=0) == 0).flatten()\n # variable will be at upper or lower bound, depending on objective\n x[np.logical_and(zero_col, c < 0)] = ub[\n np.logical_and(zero_col, c < 0)]\n x[np.logical_and(zero_col, c > 0)] = lb[\n np.logical_and(zero_col, c > 0)]\n if np.any(np.isinf(x)): # if an unconstrained variable has no bound\n status = 3\n message = (\"If feasible, the problem is (trivially) unbounded \"\n \"due to a zero column in the constraint matrices. If \"\n \"you wish to check whether the problem is infeasible, \"\n \"turn presolve off.\")\n complete = True\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n # variables will equal upper/lower bounds will be removed later\n lb[np.logical_and(zero_col, c < 0)] = ub[\n np.logical_and(zero_col, c < 0)]\n ub[np.logical_and(zero_col, c > 0)] = lb[\n np.logical_and(zero_col, c > 0)]\n\n # row singleton in equality constraints\n # this fixes a variable and removes the constraint\n singleton_row = np.array(np.sum(A_eq != 0, axis=1) == 1).flatten()\n rows = where(singleton_row)[0]\n cols = where(A_eq[rows, :])[1]\n if len(rows) > 0:\n for row, col in zip(rows, cols):\n val = b_eq[row] / A_eq[row, col]\n if not lb[col] - tol <= val <= ub[col] + tol:\n # infeasible if fixed value is not within bounds\n status = 2\n message = (\"The problem is (trivially) infeasible because a \"\n \"singleton row in the equality constraints is \"\n \"inconsistent with the bounds.\")\n complete = True\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n else:\n # sets upper and lower bounds at that fixed value - variable\n # will be removed later\n lb[col] = val\n ub[col] = val\n A_eq = A_eq[np.logical_not(singleton_row), :]\n b_eq = b_eq[np.logical_not(singleton_row)]\n\n # row singleton in inequality constraints\n # this indicates a simple bound and the constraint can be removed\n # simple bounds may be adjusted here\n # After all of the simple bound information is combined here, get_Abc will\n # turn the simple bounds into constraints\n singleton_row = np.array(np.sum(A_ub != 0, axis=1) == 1).flatten()\n cols = where(A_ub[singleton_row, :])[1]\n rows = where(singleton_row)[0]\n if len(rows) > 0:\n for row, col in zip(rows, cols):\n val = b_ub[row] / A_ub[row, col]\n if A_ub[row, col] > 0: # upper bound\n if val < lb[col] - tol: # infeasible\n complete = True\n elif val < ub[col]: # new upper bound\n ub[col] = val\n else: # lower bound\n if val > ub[col] + tol: # infeasible\n complete = True\n elif val > lb[col]: # new lower bound\n lb[col] = val\n if complete:\n status = 2\n message = (\"The problem is (trivially) infeasible because a \"\n \"singleton row in the upper bound constraints is \"\n \"inconsistent with the bounds.\")\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n A_ub = A_ub[np.logical_not(singleton_row), :]\n b_ub = b_ub[np.logical_not(singleton_row)]\n\n # identical bounds indicate that variable can be removed\n i_f = np.abs(lb - ub) < tol # indices of \"fixed\" variables\n i_nf = np.logical_not(i_f) # indices of \"not fixed\" variables\n\n # test_bounds_equal_but_infeasible\n if np.all(i_f): # if bounds define solution, check for consistency\n residual = b_eq - A_eq.dot(lb)\n slack = b_ub - A_ub.dot(lb)\n if ((A_ub.size > 0 and np.any(slack < 0)) or\n (A_eq.size > 0 and not np.allclose(residual, 0))):\n status = 2\n message = (\"The problem is (trivially) infeasible because the \"\n \"bounds fix all variables to values inconsistent with \"\n \"the constraints\")\n complete = True\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n\n ub_mod = ub\n lb_mod = lb\n if np.any(i_f):\n c0 += c[i_f].dot(lb[i_f])\n b_eq = b_eq - A_eq[:, i_f].dot(lb[i_f])\n b_ub = b_ub - A_ub[:, i_f].dot(lb[i_f])\n c = c[i_nf]\n x = x[i_nf]\n # user guess x0 stays separate from presolve solution x\n if x0 is not None:\n x0 = x0[i_nf]\n A_eq = A_eq[:, i_nf]\n A_ub = A_ub[:, i_nf]\n # record of variables to be added back in\n undo = [np.nonzero(i_f)[0], lb[i_f]]\n # don't remove these entries from bounds; they'll be used later.\n # but we _also_ need a version of the bounds with these removed\n lb_mod = lb[i_nf]\n ub_mod = ub[i_nf]\n\n # no constraints indicates that problem is trivial\n if A_eq.size == 0 and A_ub.size == 0:\n b_eq = np.array([])\n b_ub = np.array([])\n # test_empty_constraint_1\n if c.size == 0:\n status = 0\n message = (\"The solution was determined in presolve as there are \"\n \"no non-trivial constraints.\")\n elif (np.any(np.logical_and(c < 0, ub_mod == np.inf)) or\n np.any(np.logical_and(c > 0, lb_mod == -np.inf))):\n # test_no_constraints()\n # test_unbounded_no_nontrivial_constraints_1\n # test_unbounded_no_nontrivial_constraints_2\n status = 3\n message = (\"The problem is (trivially) unbounded \"\n \"because there are no non-trivial constraints and \"\n \"a) at least one decision variable is unbounded \"\n \"above and its corresponding cost is negative, or \"\n \"b) at least one decision variable is unbounded below \"\n \"and its corresponding cost is positive. \")\n else: # test_empty_constraint_2\n status = 0\n message = (\"The solution was determined in presolve as there are \"\n \"no non-trivial constraints.\")\n complete = True\n x[c < 0] = ub_mod[c < 0]\n x[c > 0] = lb_mod[c > 0]\n # where c is zero, set x to a finite bound or zero\n x_zero_c = ub_mod[c == 0]\n x_zero_c[np.isinf(x_zero_c)] = ub_mod[c == 0][np.isinf(x_zero_c)]\n x_zero_c[np.isinf(x_zero_c)] = 0\n x[c == 0] = x_zero_c\n # if this is not the last step of presolve, should convert bounds back\n # to array and return here\n\n # *sigh* - convert bounds back to their standard form (list of tuples)\n # again, in retrospect, numpy array would be standard form\n lb[np.equal(lb, -np.inf)] = None\n ub[np.equal(ub, np.inf)] = None\n bounds = np.hstack((lb[:, np.newaxis], ub[:, np.newaxis]))\n bounds = bounds.tolist()\n for i, row in enumerate(bounds):\n for j, col in enumerate(row):\n if str(col) == \"nan\":\n # comparing col to float(\"nan\") and np.nan doesn't work.\n # should use np.isnan\n bounds[i][j] = None\n\n # remove redundant (linearly dependent) rows from equality constraints\n n_rows_A = A_eq.shape[0]\n redundancy_warning = (\"A_eq does not appear to be of full row rank. To \"\n \"improve performance, check the problem formulation \"\n \"for redundant equality constraints.\")\n if (sps.issparse(A_eq)):\n if rr and A_eq.size > 0: # TODO: Fast sparse rank check?\n A_eq, b_eq, status, message = _remove_redundancy_sparse(A_eq, b_eq)\n if A_eq.shape[0] < n_rows_A:\n warn(redundancy_warning, OptimizeWarning)\n if status != 0:\n complete = True\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n\n # This is a wild guess for which redundancy removal algorithm will be\n # faster. More testing would be good.\n small_nullspace = 5\n if rr and A_eq.size > 0:\n try: # TODO: instead use results of first SVD in _remove_redundancy\n rank = np.linalg.matrix_rank(A_eq)\n except Exception: # oh well, we'll have to go with _remove_redundancy_dense\n rank = 0\n if rr and A_eq.size > 0 and rank < A_eq.shape[0]:\n warn(redundancy_warning, OptimizeWarning)\n dim_row_nullspace = A_eq.shape[0]-rank\n if dim_row_nullspace <= small_nullspace:\n A_eq, b_eq, status, message = _remove_redundancy(A_eq, b_eq)\n if dim_row_nullspace > small_nullspace or status == 4:\n A_eq, b_eq, status, message = _remove_redundancy_dense(A_eq, b_eq)\n if A_eq.shape[0] < rank:\n message = (\"Due to numerical issues, redundant equality \"\n \"constraints could not be removed automatically. \"\n \"Try providing your constraint matrices as sparse \"\n \"matrices to activate sparse presolve, try turning \"\n \"off redundancy removal, or try turning off presolve \"\n \"altogether.\")\n status = 4\n if status != 0:\n complete = True\n return (c, c0, A_ub, b_ub, A_eq, b_eq, bounds,\n x, x0, undo, complete, status, message)\n\n\ndef _parse_linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, options, x0):\n \"\"\"\n Parse the provided linear programming problem\n\n ``_parse_linprog`` employs two main steps ``_check_sparse_inputs`` and\n ``_clean_inputs``. ``_check_sparse_inputs`` checks for sparsity in the\n provided constraints (``A_ub`` and ``A_eq) and if these match the provided\n sparsity optional values.\n\n ``_clean inputs`` checks of the provided inputs. If no violations are\n identified the objective vector, upper bound constraints, equality\n constraints, and simple bounds are returned in the expected format.\n\n Parameters\n ----------\n c : 1D array\n Coefficients of the linear objective function to be minimized.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence\n ``(min, max)`` pairs for each element in ``x``, defining\n the bounds on that parameter. Use None for one of ``min`` or\n ``max`` when there is no bound in that direction. By default\n bounds are ``(0, None)`` (non-negative). If a sequence containing a\n single tuple is provided, then ``min`` and ``max`` will be applied to\n all variables in the problem.\n options : dict\n A dictionary of solver options. All methods accept the following\n generic options:\n\n maxiter : int\n Maximum number of iterations to perform.\n disp : bool\n Set to True to print convergence messages.\n\n For method-specific options, see :func:`show_options('linprog')`.\n x0 : 1D array, optional\n Starting values of the independent variables, which will be refined by\n the optimization algorithm. Currently compatible only with the\n 'revised simplex' method, and only if x0 is a basic feasible solution\n of the problem.\n\n Returns\n -------\n c : 1D array\n Coefficients of the linear objective function to be minimized.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence, optional\n ``(min, max)`` pairs for each element in ``x``, defining\n the bounds on that parameter. Use None for one of ``min`` or\n ``max`` when there is no bound in that direction. By default\n bounds are ``(0, None)`` (non-negative).\n If a sequence containing a single tuple is provided, then ``min`` and\n ``max`` will be applied to all variables in the problem.\n options : dict, optional\n A dictionary of solver options. All methods accept the following\n generic options:\n\n maxiter : int\n Maximum number of iterations to perform.\n disp : bool\n Set to True to print convergence messages.\n\n For method-specific options, see :func:`show_options('linprog')`.\n x0 : 1D array, optional\n Starting values of the independent variables, which will be refined by\n the optimization algorithm. Currently compatible only with the\n 'revised simplex' method, and only if x0 is a basic feasible solution\n of the problem.\n \"\"\"\n if options is None:\n options = {}\n\n solver_options = {k: v for k, v in options.items()}\n solver_options, A_ub, A_eq = _check_sparse_inputs(solver_options, A_ub, A_eq)\n # Convert lists to numpy arrays, etc...\n c, A_ub, b_ub, A_eq, b_eq, bounds, x0 = _clean_inputs(\n c, A_ub, b_ub, A_eq, b_eq, bounds, x0)\n return c, A_ub, b_ub, A_eq, b_eq, bounds, solver_options, x0\n\n\ndef _get_Abc(c, c0=0, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None,\n x0=None, undo=[]):\n \"\"\"\n Given a linear programming problem of the form:\n\n Minimize::\n\n c @ x\n\n Subject to::\n\n A_ub @ x <= b_ub\n A_eq @ x == b_eq\n lb <= x <= ub\n\n where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.\n\n Return the problem in standard form:\n\n Minimize::\n\n c @ x\n\n Subject to::\n\n A @ x == b\n x >= 0\n\n by adding slack variables and making variable substitutions as necessary.\n\n Parameters\n ----------\n c : 1D array\n Coefficients of the linear objective function to be minimized.\n Components corresponding with fixed variables have been eliminated.\n c0 : float\n Constant term in objective function due to fixed (and eliminated)\n variables.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence of tuples\n ``(min, max)`` pairs for each element in ``x``, defining\n the bounds on that parameter. Use None for each of ``min`` or\n ``max`` when there is no bound in that direction. Bounds have been\n tightened where possible.\n x0 : 1D array\n Starting values of the independent variables, which will be refined by\n the optimization algorithm\n undo: list of tuples\n (`index`, `value`) pairs that record the original index and fixed value\n for each variable removed from the problem\n\n Returns\n -------\n A : 2D array\n 2D array such that ``A`` @ ``x``, gives the values of the equality\n constraints at ``x``.\n b : 1D array\n 1D array of values representing the RHS of each equality constraint\n (row) in A (for standard form problem).\n c : 1D array\n Coefficients of the linear objective function to be minimized (for\n standard form problem).\n c0 : float\n Constant term in objective function due to fixed (and eliminated)\n variables.\n x0 : 1D array\n Starting values of the independent variables, which will be refined by\n the optimization algorithm\n\n References\n ----------\n .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. \"Introduction to linear\n programming.\" Athena Scientific 1 (1997): 997.\n\n \"\"\"\n\n if sps.issparse(A_eq):\n sparse = True\n A_eq = sps.lil_matrix(A_eq)\n A_ub = sps.lil_matrix(A_ub)\n\n def hstack(blocks):\n return sps.hstack(blocks, format=\"lil\")\n\n def vstack(blocks):\n return sps.vstack(blocks, format=\"lil\")\n\n zeros = sps.lil_matrix\n eye = sps.eye\n else:\n sparse = False\n hstack = np.hstack\n vstack = np.vstack\n zeros = np.zeros\n eye = np.eye\n\n fixed_x = set()\n if len(undo) > 0:\n # these are indices of variables removed from the problem\n # however, their bounds are still part of the bounds list\n fixed_x = set(undo[0])\n # they are needed elsewhere, but not here\n bounds = [bounds[i] for i in range(len(bounds)) if i not in fixed_x]\n # in retrospect, the standard form of bounds should have been an n x 2\n # array. maybe change it someday.\n\n # modify problem such that all variables have only non-negativity bounds\n\n bounds = np.array(bounds)\n lbs = bounds[:, 0]\n ubs = bounds[:, 1]\n m_ub, n_ub = A_ub.shape\n\n lb_none = np.equal(lbs, None)\n ub_none = np.equal(ubs, None)\n lb_some = np.logical_not(lb_none)\n ub_some = np.logical_not(ub_none)\n\n # if preprocessing is on, lb == ub can't happen\n # if preprocessing is off, then it would be best to convert that\n # to an equality constraint, but it's tricky to make the other\n # required modifications from inside here.\n\n # unbounded below: substitute xi = -xi' (unbounded above)\n l_nolb_someub = np.logical_and(lb_none, ub_some)\n i_nolb = np.nonzero(l_nolb_someub)[0]\n lbs[l_nolb_someub], ubs[l_nolb_someub] = (\n -ubs[l_nolb_someub], lbs[l_nolb_someub])\n lb_none = np.equal(lbs, None)\n ub_none = np.equal(ubs, None)\n lb_some = np.logical_not(lb_none)\n ub_some = np.logical_not(ub_none)\n c[i_nolb] *= -1\n if x0 is not None:\n x0[i_nolb] *= -1\n if len(i_nolb) > 0:\n if A_ub.shape[0] > 0: # sometimes needed for sparse arrays... weird\n A_ub[:, i_nolb] *= -1\n if A_eq.shape[0] > 0:\n A_eq[:, i_nolb] *= -1\n\n # upper bound: add inequality constraint\n i_newub = np.nonzero(ub_some)[0]\n ub_newub = ubs[ub_some]\n n_bounds = np.count_nonzero(ub_some)\n A_ub = vstack((A_ub, zeros((n_bounds, A_ub.shape[1]))))\n b_ub = np.concatenate((b_ub, np.zeros(n_bounds)))\n A_ub[range(m_ub, A_ub.shape[0]), i_newub] = 1\n b_ub[m_ub:] = ub_newub\n\n A1 = vstack((A_ub, A_eq))\n b = np.concatenate((b_ub, b_eq))\n c = np.concatenate((c, np.zeros((A_ub.shape[0],))))\n if x0 is not None:\n x0 = np.concatenate((x0, np.zeros((A_ub.shape[0],))))\n # unbounded: substitute xi = xi+ + xi-\n l_free = np.logical_and(lb_none, ub_none)\n i_free = np.nonzero(l_free)[0]\n n_free = len(i_free)\n A1 = hstack((A1, zeros((A1.shape[0], n_free))))\n c = np.concatenate((c, np.zeros(n_free)))\n if x0 is not None:\n x0 = np.concatenate((x0, np.zeros(n_free)))\n A1[:, range(n_ub, A1.shape[1])] = -A1[:, i_free]\n c[np.arange(n_ub, A1.shape[1])] = -c[i_free]\n if x0 is not None:\n i_free_neg = x0[i_free] < 0\n x0[np.arange(n_ub, A1.shape[1])[i_free_neg]] = -x0[i_free[i_free_neg]]\n x0[i_free[i_free_neg]] = 0\n\n # add slack variables\n A2 = vstack([eye(A_ub.shape[0]), zeros((A_eq.shape[0], A_ub.shape[0]))])\n A = hstack([A1, A2])\n\n # lower bound: substitute xi = xi' + lb\n # now there is a constant term in objective\n i_shift = np.nonzero(lb_some)[0]\n lb_shift = lbs[lb_some].astype(float)\n c0 += np.sum(lb_shift * c[i_shift])\n if sparse:\n b = b.reshape(-1, 1)\n A = A.tocsc()\n b -= (A[:, i_shift] * sps.diags(lb_shift)).sum(axis=1)\n b = b.ravel()\n else:\n b -= (A[:, i_shift] * lb_shift).sum(axis=1)\n if x0 is not None:\n x0[i_shift] -= lb_shift\n\n return A, b, c, c0, x0\n\n\ndef _display_summary(message, status, fun, iteration):\n \"\"\"\n Print the termination summary of the linear program\n\n Parameters\n ----------\n message : str\n A string descriptor of the exit status of the optimization.\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n fun : float\n Value of the objective function.\n iteration : iteration\n The number of iterations performed.\n \"\"\"\n print(message)\n if status in (0, 1):\n print(\" Current function value: {0: <12.6f}\".format(fun))\n print(\" Iterations: {0:d}\".format(iteration))\n\n\ndef _postsolve(x, c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None,\n complete=False, undo=[], tol=1e-8):\n \"\"\"\n Given solution x to presolved, standard form linear program x, add\n fixed variables back into the problem and undo the variable substitutions\n to get solution to original linear program. Also, calculate the objective\n function value, slack in original upper bound constraints, and residuals\n in original equality constraints.\n\n Parameters\n ----------\n x : 1D array\n Solution vector to the standard-form problem.\n c : 1D array\n Original coefficients of the linear objective function to be minimized.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence of tuples\n Bounds, as modified in presolve\n complete : bool\n Whether the solution is was determined in presolve (``True`` if so)\n undo: list of tuples\n (`index`, `value`) pairs that record the original index and fixed value\n for each variable removed from the problem\n tol : float\n Termination tolerance; see [1]_ Section 4.5.\n\n Returns\n -------\n x : 1D array\n Solution vector to original linear programming problem\n fun: float\n optimal objective value for original problem\n slack : 1D array\n The (non-negative) slack in the upper bound constraints, that is,\n ``b_ub - A_ub @ x``\n con : 1D array\n The (nominally zero) residuals of the equality constraints, that is,\n ``b - A_eq @ x``\n lb : 1D array\n The lower bound constraints on the original variables\n ub: 1D array\n The upper bound constraints on the original variables\n \"\"\"\n # note that all the inputs are the ORIGINAL, unmodified versions\n # no rows, columns have been removed\n # the only exception is bounds; it has been modified\n # we need these modified values to undo the variable substitutions\n # in retrospect, perhaps this could have been simplified if the \"undo\"\n # variable also contained information for undoing variable substitutions\n\n n_x = len(c)\n\n # we don't have to undo variable substitutions for fixed variables that\n # were removed from the problem\n no_adjust = set()\n\n # if there were variables removed from the problem, add them back into the\n # solution vector\n if len(undo) > 0:\n no_adjust = set(undo[0])\n x = x.tolist()\n for i, val in zip(undo[0], undo[1]):\n x.insert(i, val)\n x = np.array(x)\n\n # now undo variable substitutions\n # if \"complete\", problem was solved in presolve; don't do anything here\n if not complete and bounds is not None: # bounds are never none, probably\n n_unbounded = 0\n for i, b in enumerate(bounds):\n if i in no_adjust:\n continue\n lb, ub = b\n if lb is None and ub is None:\n n_unbounded += 1\n x[i] = x[i] - x[n_x + n_unbounded - 1]\n else:\n if lb is None:\n x[i] = ub - x[i]\n else:\n x[i] += lb\n\n n_x = len(c)\n x = x[:n_x] # all the rest of the variables were artificial\n fun = x.dot(c)\n slack = b_ub - A_ub.dot(x) # report slack for ORIGINAL UB constraints\n # report residuals of ORIGINAL EQ constraints\n con = b_eq - A_eq.dot(x)\n\n # Patch for bug #8664. Detecting this sort of issue earlier\n # (via abnormalities in the indicators) would be better.\n bounds = np.array(bounds) # again, this should have been the standard form\n lb = bounds[:, 0]\n ub = bounds[:, 1]\n lb[np.equal(lb, None)] = -np.inf\n ub[np.equal(ub, None)] = np.inf\n\n return x, fun, slack, con, lb, ub\n\n\ndef _check_result(x, fun, status, slack, con, lb, ub, tol, message):\n \"\"\"\n Check the validity of the provided solution.\n\n A valid (optimal) solution satisfies all bounds, all slack variables are\n negative and all equality constraint residuals are strictly non-zero.\n Further, the lower-bounds, upper-bounds, slack and residuals contain\n no nan values.\n\n Parameters\n ----------\n x : 1D array\n Solution vector to original linear programming problem\n fun: float\n optimal objective value for original problem\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n slack : 1D array\n The (non-negative) slack in the upper bound constraints, that is,\n ``b_ub - A_ub @ x``\n con : 1D array\n The (nominally zero) residuals of the equality constraints, that is,\n ``b - A_eq @ x``\n lb : 1D array\n The lower bound constraints on the original variables\n ub: 1D array\n The upper bound constraints on the original variables\n message : str\n A string descriptor of the exit status of the optimization.\n tol : float\n Termination tolerance; see [1]_ Section 4.5.\n\n Returns\n -------\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n message : str\n A string descriptor of the exit status of the optimization.\n \"\"\"\n # Somewhat arbitrary, but status 5 is very unusual\n tol = np.sqrt(tol) * 10\n\n contains_nans = (\n np.isnan(x).any()\n or np.isnan(fun)\n or np.isnan(slack).any()\n or np.isnan(con).any()\n )\n\n if contains_nans:\n is_feasible = False\n else:\n invalid_bounds = (x < lb - tol).any() or (x > ub + tol).any()\n invalid_slack = status != 3 and (slack < -tol).any()\n invalid_con = status != 3 and (np.abs(con) > tol).any()\n is_feasible = not (invalid_bounds or invalid_slack or invalid_con)\n\n if status == 0 and not is_feasible:\n status = 4\n message = (\"The solution does not satisfy the constraints within the \"\n \"required tolerance of \" + \"{:.2E}\".format(tol) + \", yet \"\n \"no errors were raised and there is no certificate of \"\n \"infeasibility or unboundedness. This is known to occur \"\n \"if the `presolve` option is False and the problem is \"\n \"infeasible. This can also occur due to the limited \"\n \"accuracy of the `interior-point` method. Check whether \"\n \"the slack and constraint residuals are acceptable; \"\n \"if not, consider enabling presolve, reducing option \"\n \"`tol`, and/or using method `revised simplex`. \"\n \"If you encounter this message under different \"\n \"circumstances, please submit a bug report.\")\n elif status == 0 and contains_nans:\n status = 4\n message = (\"Numerical difficulties were encountered but no errors \"\n \"were raised. This is known to occur if the 'presolve' \"\n \"option is False, 'sparse' is True, and A_eq includes \"\n \"redundant rows. If you encounter this under different \"\n \"circumstances, please submit a bug report. Otherwise, \"\n \"remove linearly dependent equations from your equality \"\n \"constraints or enable presolve.\")\n elif status == 2 and is_feasible:\n # Occurs if the simplex method exits after phase one with a very\n # nearly basic feasible solution. Postsolving can make the solution\n # basic, however, this solution is NOT optimal\n raise ValueError(message)\n\n return status, message\n\n\ndef _postprocess(x, c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None,\n complete=False, undo=[], status=0, message=\"\", tol=1e-8,\n iteration=None, disp=False):\n \"\"\"\n Given solution x to presolved, standard form linear program x, add\n fixed variables back into the problem and undo the variable substitutions\n to get solution to original linear program. Also, calculate the objective\n function value, slack in original upper bound constraints, and residuals\n in original equality constraints.\n\n Parameters\n ----------\n x : 1D array\n Solution vector to the standard-form problem.\n c : 1D array\n Original coefficients of the linear objective function to be minimized.\n A_ub : 2D array, optional\n 2D array such that ``A_ub @ x`` gives the values of the upper-bound\n inequality constraints at ``x``.\n b_ub : 1D array, optional\n 1D array of values representing the upper-bound of each inequality\n constraint (row) in ``A_ub``.\n A_eq : 2D array, optional\n 2D array such that ``A_eq @ x`` gives the values of the equality\n constraints at ``x``.\n b_eq : 1D array, optional\n 1D array of values representing the RHS of each equality constraint\n (row) in ``A_eq``.\n bounds : sequence of tuples\n Bounds, as modified in presolve\n complete : bool\n Whether the solution is was determined in presolve (``True`` if so)\n undo: list of tuples\n (`index`, `value`) pairs that record the original index and fixed value\n for each variable removed from the problem\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n message : str\n A string descriptor of the exit status of the optimization.\n tol : float\n Termination tolerance; see [1]_ Section 4.5.\n\n Returns\n -------\n x : 1D array\n Solution vector to original linear programming problem\n fun: float\n optimal objective value for original problem\n slack : 1D array\n The (non-negative) slack in the upper bound constraints, that is,\n ``b_ub - A_ub @ x``\n con : 1D array\n The (nominally zero) residuals of the equality constraints, that is,\n ``b - A_eq @ x``\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n message : str\n A string descriptor of the exit status of the optimization.\n\n \"\"\"\n\n x, fun, slack, con, lb, ub = _postsolve(\n x, c, A_ub, b_ub, A_eq, b_eq,\n bounds, complete, undo, tol\n )\n\n status, message = _check_result(\n x, fun, status, slack, con,\n lb, ub, tol, message\n )\n\n if disp:\n _display_summary(message, status, fun, iteration)\n\n return x, fun, slack, con, status, message\n"
] | [
[
"numpy.sum",
"numpy.any",
"scipy.optimize._remove_redundancy._remove_redundancy_sparse",
"numpy.isfinite",
"numpy.allclose",
"scipy.optimize._remove_redundancy._remove_redundancy",
"numpy.logical_and",
"numpy.abs",
"numpy.logical_not",
"numpy.linalg.matrix_rank",
"scipy.sparse.hstack",
"numpy.isnan",
"numpy.nonzero",
"numpy.zeros",
"numpy.equal",
"numpy.count_nonzero",
"numpy.arange",
"scipy.sparse.coo_matrix",
"numpy.hstack",
"numpy.all",
"scipy.optimize._remove_redundancy._remove_redundancy_dense",
"scipy.sparse.diags",
"scipy.sparse.lil_matrix",
"numpy.array",
"scipy.sparse.vstack",
"scipy.sparse.issparse",
"numpy.isinf",
"numpy.isreal",
"numpy.sqrt",
"numpy.concatenate"
]
] |
Francis777/agents | [
"24e878a697be418307cfbff69724d86be767719d"
] | [
"tf_agents/networks/nest_map_test.py"
] | [
"# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf_agents.networks.nest_map.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl import flags\nimport tensorflow.compat.v2 as tf\n\nfrom tf_agents.keras_layers import inner_reshape\nfrom tf_agents.networks import nest_map\nfrom tf_agents.networks import sequential\nfrom tf_agents.policies import policy_saver\nfrom tf_agents.policies import tf_policy\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.utils import common\nfrom tf_agents.utils import nest_utils\nfrom tf_agents.utils import test_utils\n\nFLAGS = flags.FLAGS\n\n\nclass MyPolicy(tf_policy.TFPolicy):\n\n def __init__(self, time_step_spec, net):\n super(MyPolicy, self).__init__(\n time_step_spec,\n action_spec=tf.TensorSpec((None,), tf.float32))\n self._net = net\n\n def _action(self, time_step, policy_state=(), seed=None):\n out, _ = self._net(time_step.observation)\n out = tf.math.add(*tf.nest.flatten(out))\n return policy_step.PolicyStep(out, (), ())\n\n\nclass NestFlattenTest(test_utils.TestCase):\n\n def testNestFlatten(self):\n layer = nest_map.NestFlatten()\n outputs = layer({'a': 1, 'b': 2})\n self.assertEqual(self.evaluate(outputs), [1, 2])\n\n\nclass NestMapTest(test_utils.TestCase):\n\n def setUp(self):\n if not common.has_eager_been_enabled():\n self.skipTest('Only supported in TF2.x.')\n super(NestMapTest, self).setUp()\n\n def testCreateAndCall(self):\n net = sequential.Sequential([\n nest_map.NestMap(\n {'inp1': tf.keras.layers.Dense(8),\n 'inp2': sequential.Sequential([\n tf.keras.layers.Conv2D(2, 3),\n # Convert 3 inner dimensions to [8] for RNN.\n inner_reshape.InnerReshape([None] * 3, [8]),\n ]),\n 'inp3': tf.keras.layers.LSTM(\n 8, return_state=True, return_sequences=True)}),\n nest_map.NestFlatten(),\n tf.keras.layers.Add()])\n self.assertEqual(\n net.state_spec,\n ({\n 'inp1': (),\n 'inp2': (),\n 'inp3': (2 * (tf.TensorSpec(shape=(8,), dtype=tf.float32),),),\n },))\n output_spec = net.create_variables(\n {\n 'inp1': tf.TensorSpec(shape=(3,), dtype=tf.float32),\n 'inp2': tf.TensorSpec(shape=(4, 4, 2,), dtype=tf.float32),\n 'inp3': tf.TensorSpec(shape=(3,), dtype=tf.float32),\n })\n self.assertEqual(output_spec, tf.TensorSpec(shape=(8,), dtype=tf.float32))\n\n inputs = {\n 'inp1': tf.ones((8, 10, 3), dtype=tf.float32),\n 'inp2': tf.ones((8, 10, 4, 4, 2), dtype=tf.float32),\n 'inp3': tf.ones((8, 10, 3), dtype=tf.float32)\n }\n output, next_state = net(inputs)\n self.assertEqual(output.shape, tf.TensorShape([8, 10, 8]))\n self.assertEqual(\n tf.nest.map_structure(lambda t: t.shape, next_state),\n ({\n 'inp1': (),\n 'inp2': (),\n 'inp3': (2 * (tf.TensorShape([8, 8]),),),\n },))\n\n # Test passing in a state.\n output, next_state = net(inputs, next_state)\n self.assertEqual(output.shape, tf.TensorShape([8, 10, 8]))\n self.assertEqual(\n tf.nest.map_structure(lambda t: t.shape, next_state),\n ({\n 'inp1': (),\n 'inp2': (),\n 'inp3': (2 * (tf.TensorShape([8, 8]),),),\n },))\n\n def testNestedNest(self):\n # layer structure: {'a': {'b': .}}\n net = nest_map.NestMap(\n {'a': nest_map.NestMap(\n {'b': tf.keras.layers.Dense(8)})})\n net.create_variables({'a': {'b': tf.TensorSpec((1,), dtype=tf.float32)}})\n\n def testNestedNestWithNestedState(self):\n # layer structure: (., {'a': {'b': .}})\n net = nest_map.NestMap(\n (tf.keras.layers.Dense(7),\n {'a': nest_map.NestMap(\n {'b': tf.keras.layers.LSTM(\n 8, return_state=True, return_sequences=True)})}))\n # TODO(b/177337002): remove the forced tuple wrapping the LSTM\n # state once we make a generic KerasWrapper network and clean up\n # Sequential and NestMap to use that instead of singleton Sequential.\n out, state = net(\n (tf.ones((1, 2)), {'a': {'b': tf.ones((1, 2))}}),\n network_state=((), {'a': {'b': ((tf.ones((1, 8)), tf.ones((1, 8))),)}}))\n nest_utils.assert_matching_dtypes_and_inner_shapes(\n out,\n (\n tf.TensorSpec(dtype=tf.float32, shape=(7,)),\n {'a': {'b': tf.TensorSpec(dtype=tf.float32, shape=(8,))}}\n ),\n caller=self, tensors_name='out', specs_name='out_expected')\n nest_utils.assert_matching_dtypes_and_inner_shapes(\n state,\n (\n (),\n {'a': {'b': ((tf.TensorSpec(dtype=tf.float32, shape=(8,)),\n tf.TensorSpec(dtype=tf.float32, shape=(8,))),)}}\n ),\n caller=self, tensors_name='state', specs_name='state_expected')\n\n def testIncompatibleStructureInputs(self):\n with self.assertRaisesRegex(\n TypeError,\n r'`nested_layers` and `input_spec` do not have matching structures'):\n nest_map.NestMap(\n [tf.keras.layers.Dense(8)],\n input_spec={'ick': tf.TensorSpec(8, tf.float32)})\n\n with self.assertRaisesRegex(\n TypeError,\n r'`self.nested_layers` and `inputs` do not have matching structures'):\n net = nest_map.NestMap([tf.keras.layers.Dense(8)])\n net.create_variables({'ick': tf.TensorSpec((1,), dtype=tf.float32)})\n\n with self.assertRaisesRegex(\n TypeError,\n r'`self.nested_layers` and `inputs` do not have matching structures'):\n net = nest_map.NestMap([tf.keras.layers.Dense(8)])\n net({'ick': tf.constant([[1.0]])})\n\n with self.assertRaisesRegex(\n ValueError,\n r'`network_state` and `state_spec` do not have matching structures'):\n net = nest_map.NestMap(\n tf.keras.layers.LSTM(8, return_state=True, return_sequences=True))\n net(tf.ones((1, 2)), network_state=(tf.ones((1, 1)), ()))\n\n def testPolicySaverCompatibility(self):\n observation_spec = {\n 'a': tf.TensorSpec(4, tf.float32),\n 'b': tf.TensorSpec(3, tf.float32)\n }\n time_step_tensor_spec = ts.time_step_spec(observation_spec)\n net = nest_map.NestMap(\n {'a': tf.keras.layers.LSTM(8, return_state=True, return_sequences=True),\n 'b': tf.keras.layers.Dense(8)})\n net.create_variables(observation_spec)\n policy = MyPolicy(time_step_tensor_spec, net)\n\n sample = tensor_spec.sample_spec_nest(\n time_step_tensor_spec, outer_dims=(5,))\n\n step = policy.action(sample)\n self.assertEqual(step.action.shape.as_list(), [5, 8])\n\n train_step = common.create_variable('train_step')\n saver = policy_saver.PolicySaver(policy, train_step=train_step)\n self.initialize_v1_variables()\n\n with self.cached_session():\n saver.save(os.path.join(FLAGS.test_tmpdir, 'nest_map_model'))\n\n\nif __name__ == '__main__':\n test_utils.main()\n"
] | [
[
"tensorflow.compat.v2.keras.layers.LSTM",
"tensorflow.compat.v2.keras.layers.Conv2D",
"tensorflow.compat.v2.nest.flatten",
"tensorflow.compat.v2.keras.layers.Dense",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.keras.layers.Add",
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.ones",
"tensorflow.compat.v2.TensorSpec",
"tensorflow.compat.v2.nest.map_structure"
]
] |
ozen/pytorch-lightning | [
"3b0b402d30fa19e0fef7d150c30ff4bb14a64230"
] | [
"pytorch_lightning/accelerators/ddp_spawn_backend.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\nimport os\nimport re\n\nimport torch\nimport torch.multiprocessing as mp\nimport torch.distributed as torch_distrib\nimport torch.distributed as dist\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.accelerators.base_backend import Accelerator\nfrom pytorch_lightning.utilities import AMPType\nfrom pytorch_lightning.utilities.cloud_io import atomic_save, load as pl_load\nfrom pytorch_lightning.utilities.distributed import rank_zero_only, rank_zero_warn\nfrom pytorch_lightning.utilities.seed import seed_everything\nfrom pytorch_lightning.distributed.dist import LightningDistributed\nfrom pytorch_lightning.utilities.distributed import find_free_network_port\n\n\ntry:\n from hydra.core.hydra_config import HydraConfig\n from hydra.utils import get_original_cwd, to_absolute_path\nexcept ImportError:\n HYDRA_AVAILABLE = False\nelse:\n HYDRA_AVAILABLE = True\n\n\nclass DDPSpawnBackend(Accelerator):\n\n def __init__(self, trainer, nprocs, cluster_environment=None):\n super().__init__(trainer, cluster_environment)\n self.mp_queue = None\n self.nprocs = nprocs\n self.dist = LightningDistributed()\n\n def setup(self, model):\n os.environ['MASTER_PORT'] = os.environ.get('MASTER_PORT', str(find_free_network_port()))\n\n # pass in a state q\n smp = mp.get_context('spawn')\n self.mp_queue = smp.SimpleQueue()\n\n self.trainer.model = model\n\n def train(self):\n model = self.trainer.model\n\n # train in children process\n mp.spawn(self.ddp_train, nprocs=self.nprocs, args=(self.mp_queue, model,))\n\n # restore main state with best weights\n best_path = self.mp_queue.get()\n results = self.mp_queue.get()\n last_path = self.mp_queue.get()\n\n # recover the weights of the processes trained in the children\n self.__recover_child_process_weights(model, best_path, last_path)\n return results\n\n def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0):\n \"\"\"\n Entry point for ddp\n\n Args:\n process_idx:\n mp_queue: multiprocessing queue\n model:\n\n Returns:\n\n \"\"\"\n seed = os.environ.get(\"PL_GLOBAL_SEED\")\n if seed is not None:\n seed_everything(int(seed))\n\n # offset the process id if requested\n process_idx = process_idx + proc_offset\n\n # show progressbar only on progress_rank 0\n if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:\n self.trainer.progress_bar_callback.disable()\n\n # determine which process we are and world size\n self.set_world_ranks(process_idx)\n\n # set warning rank\n rank_zero_only.rank = self.trainer.global_rank\n\n # set up server using proc 0's ip address\n # try to init for 20 times at max in case ports are taken\n # where to store ip_table\n model.trainer = self.trainer\n self.init_ddp_connection(\n self.trainer.global_rank,\n self.trainer.world_size,\n self.trainer.is_slurm_managing_tasks\n )\n\n # call setup after the ddp process has connected\n self.trainer.call_setup_hook(model)\n\n # on world_size=0 let everyone know training is starting\n if self.trainer.is_global_zero and not torch.distributed.is_initialized():\n log.info('-' * 100)\n log.info(f'distributed_backend={self.trainer.distributed_backend}')\n log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes')\n log.info('-' * 100)\n\n # call sync_bn before .cuda(), configure_apex and configure_ddp\n if self.trainer.sync_batchnorm:\n model = model.configure_sync_batchnorm(model)\n\n # move the model to the correct device\n self.model_to_device(model, process_idx, is_master)\n\n # CHOOSE OPTIMIZER\n # allow for lr schedulers as well\n self.setup_optimizers(model)\n\n # set model properties before going into wrapper\n self.trainer.model_connector.copy_trainer_model_properties(model)\n\n # 16-bit\n model = self.trainer.precision_connector.connect(model)\n\n # device ids change depending on the DDP setup\n device_ids = self.get_device_ids()\n\n # allow user to configure ddp\n model = model.configure_ddp(model, device_ids)\n\n # set up training routine\n self.trainer.train_loop.setup_training(model)\n\n # train or test\n results = self.train_or_test()\n\n # get original model\n model = self.trainer.get_model()\n\n # persist info in ddp_spawn\n self.transfer_distrib_spawn_state_on_fit_end(model, mp_queue, results)\n\n # clean up memory\n torch.cuda.empty_cache()\n\n def set_world_ranks(self, process_idx):\n self.trainer.local_rank = process_idx\n self.trainer.global_rank = self.trainer.node_rank * self.trainer.num_processes + process_idx\n self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes\n\n def model_to_device(self, model, process_idx, is_master):\n gpu_idx = process_idx\n self.trainer.root_gpu = gpu_idx\n torch.cuda.set_device(self.trainer.root_gpu)\n model.cuda(self.trainer.root_gpu)\n\n def get_device_ids(self):\n device_ids = [self.trainer.root_gpu]\n return device_ids\n\n def training_step(self, args):\n if self.trainer.amp_backend == AMPType.NATIVE:\n with torch.cuda.amp.autocast():\n output = self.trainer.model(*args)\n else:\n output = self.trainer.model(*args)\n return output\n\n def validation_step(self, args):\n output = self.training_step(args)\n return output\n\n def test_step(self, args):\n output = self.training_step(args)\n return output\n\n def barrier(self, name: str = None):\n if torch_distrib.is_initialized():\n torch_distrib.barrier()\n\n def early_stopping_should_stop(self, pl_module):\n stop = torch.tensor(int(self.trainer.should_stop), device=pl_module.device)\n dist.all_reduce(stop, op=dist.reduce_op.SUM)\n dist.barrier()\n should_stop = stop == self.trainer.world_size\n return should_stop\n\n def broadcast(self, obj, src=0):\n return self.dist.broadcast(obj)\n\n def __recover_child_process_weights(self, model, best_path, last_path):\n # transfer back the best path to the trainer\n if self.trainer.checkpoint_callback:\n self.trainer.checkpoint_callback.best_model_path = best_path\n # todo, pass also best score\n\n # load last weights\n if last_path is not None and not self.trainer.testing:\n ckpt = pl_load(last_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(ckpt)\n\n self.trainer.model = model\n\n def transfer_distrib_spawn_state_on_fit_end(self, model, mp_queue, results):\n best_model_path = None\n if self.trainer.checkpoint_callback is not None:\n best_model_path = self.trainer.checkpoint_callback.best_model_path\n\n if self.trainer.global_rank == 0 and mp_queue is not None:\n rank_zero_warn('cleaning up ddp environment...')\n # todo, pass complete checkpoint as state dictionary\n mp_queue.put(best_model_path)\n mp_queue.put(results)\n\n # save the last weights\n last_path = None\n if not self.trainer.testing and best_model_path is not None and len(best_model_path) > 0:\n last_path = re.sub('.ckpt', '.tmp_end.ckpt', best_model_path)\n atomic_save(model.state_dict(), last_path)\n mp_queue.put(last_path)\n"
] | [
[
"torch.cuda.empty_cache",
"torch.multiprocessing.spawn",
"torch.distributed.is_initialized",
"torch.distributed.barrier",
"torch.multiprocessing.get_context",
"torch.cuda.amp.autocast",
"torch.distributed.all_reduce",
"torch.cuda.set_device"
]
] |
xta0/Python-Playground | [
"513ebd2ad7f0a8c69f2f04b4f7524b31e76fa5bc"
] | [
"dl/pytorch/rnn/char-lstm.py"
] | [
"import numpy as np\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n# open text file and read in data as `text`\nwith open('data/anna.txt', 'r') as f:\n text = f.read()\n\n# print(text[:100])\n\n# encode the text and map each character to an integer and vice versa\n\n# we create two dictionaries:\n# 1. int2char, which maps integers to characters\n# 2. char2int, which maps characters to unique integers\n# text = text[:100]\nchars = tuple(set(text)) #(1', 'v', 'H', '.', 'i', 'E', 'a', 'r', 'C', 'p',...)\nint2char = dict(enumerate(chars))\nchar2int = {ch: ii for ii, ch in int2char.items()}\n\n# encode the text\nencoded = np.array([char2int[ch] for ch in text])\nprint(encoded[:100])\n\ndef one_hot_encode(arr, n_labels):\n # Initialize the the encoded array\n # arr is a multi-dim array\n one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.float32)\n # Fill the appropriate elements with ones\n one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.\n # Finally reshape it to get back to the original array\n one_hot = one_hot.reshape((*arr.shape, n_labels))\n return one_hot\n\n# check that the function works as expected\ntest_seq = np.array([[3, 5, 1]])\none_hot = np.zeros((np.multiply(*test_seq.shape), 8), dtype=np.float32)\n\n# one_hot = one_hot_encode(test_seq, 8)\n\nprint(one_hot)\n\ndef get_batches(arr, batch_size, seq_length):\n '''Create a generator that returns batches of size\n batch_size x seq_length from arr.\n \n Arguments\n ---------\n arr: Array you want to make batches from\n batch_size: Batch size, the number of sequences per batch\n seq_length: Number of encoded chars in a sequence\n '''\n total = batch_size * seq_length\n n_batches = len(arr) // total\n arr = arr[:n_batches * total]\n arr = arr.reshape(batch_size, -1)\n for n in range(0, arr.shape[1], seq_length):\n # The features\n x = arr[:,n:n+seq_length]\n # The targets, shifted by one\n y = np.zeros_like(x)\n try:\n y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n+seq_length]\n except IndexError:\n y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]\n yield x, y\n\nbatches = get_batches(encoded, 8, 50)\nx, y = next(batches)\n# printing out the first 10 items in a sequence\nprint('x\\n', x[:10, :10])\nprint('\\ny\\n', y[:10, :10])\n\n# check if GPU is available\ntrain_on_gpu = torch.cuda.is_available()\nif(train_on_gpu):\n print('Training on GPU!')\nelse: \n print('No GPU available, training on CPU; consider making n_epochs very small.')\n \nclass CharRNN(nn.Module):\n \n def __init__(self, tokens, n_hidden=256, n_layers=2,\n drop_prob=0.5, lr=0.001):\n super().__init__()\n self.drop_prob = drop_prob\n self.n_layers = n_layers\n self.n_hidden = n_hidden\n self.lr = lr\n \n # creating character dictionaries\n self.chars = tokens\n self.int2char = dict(enumerate(self.chars))\n self.char2int = {ch: ii for ii, ch in self.int2char.items()}\n \n ## TODO: define the layers of the model\n self.lstm = nn.LSTM(len(tokens), n_hidden, n_layers, dropout=drop_prob, batch_first=True)\n self.dropout = nn.Dropout(drop_prob)\n self.fc = nn.Linear (n_hidden, len(tokens))\n \n def forward(self, x, hidden):\n ''' Forward pass through the network. \n These inputs are x, and the hidden/cell state `hidden`. '''\n \n ## TODO: Get the outputs and the new hidden state from the lstm\n x, hidden = self.lstm(x,hidden)\n x = self.dropout(x)\n x = x.contiguous().view(-1, n_hidden)\n x = self.fc(x)\n \n # return the final output and the hidden state\n return x, hidden\n \n \n def init_hidden(self, batch_size):\n ''' Initializes hidden state '''\n # Create two new tensors with sizes n_layers x batch_size x n_hidden,\n # initialized to zero, for hidden state and cell state of LSTM\n weight = next(self.parameters()).data\n \n if (train_on_gpu):\n hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),\n weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())\n else:\n hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),\n weight.new(self.n_layers, batch_size, self.n_hidden).zero_())\n \n return hidden\n\ndef train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10):\n ''' Training a network \n \n Arguments\n ---------\n \n net: CharRNN network\n data: text data to train the network\n epochs: Number of epochs to train\n batch_size: Number of mini-sequences per mini-batch, aka batch size\n seq_length: Number of character steps per mini-batch\n lr: learning rate\n clip: gradient clipping\n val_frac: Fraction of data to hold out for validation\n print_every: Number of steps for printing training and validation loss\n \n '''\n net.train()\n \n opt = torch.optim.Adam(net.parameters(), lr=lr)\n criterion = nn.CrossEntropyLoss()\n \n # create training and validation data\n val_idx = int(len(data)*(1-val_frac))\n data, val_data = data[:val_idx], data[val_idx:]\n \n if(train_on_gpu):\n net.cuda()\n \n counter = 0\n n_chars = len(net.chars)\n for e in range(epochs):\n # initialize hidden state\n h = net.init_hidden(batch_size)\n \n for x, y in get_batches(data, batch_size, seq_length):\n counter += 1\n \n # One-hot encode our data and make them Torch tensors\n x = one_hot_encode(x, n_chars)\n inputs, targets = torch.from_numpy(x), torch.from_numpy(y)\n \n if(train_on_gpu):\n inputs, targets = inputs.cuda(), targets.cuda()\n\n # Creating new variables for the hidden state, otherwise\n # we'd backprop through the entire training history\n h = tuple([each.data for each in h])\n\n # zero accumulated gradients\n net.zero_grad()\n \n # get the output from the model\n output, h = net(inputs, h)\n \n # calculate the loss and perform backprop\n loss = criterion(output, targets.view(batch_size*seq_length))\n loss.backward()\n # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.\n nn.utils.clip_grad_norm_(net.parameters(), clip)\n opt.step()\n \n # loss stats\n if counter % print_every == 0:\n # Get validation loss\n val_h = net.init_hidden(batch_size)\n val_losses = []\n net.eval()\n for x, y in get_batches(val_data, batch_size, seq_length):\n # One-hot encode our data and make them Torch tensors\n x = one_hot_encode(x, n_chars)\n x, y = torch.from_numpy(x), torch.from_numpy(y)\n \n # Creating new variables for the hidden state, otherwise\n # we'd backprop through the entire training history\n val_h = tuple([each.data for each in val_h])\n \n inputs, targets = x, y\n if(train_on_gpu):\n inputs, targets = inputs.cuda(), targets.cuda()\n\n output, val_h = net(inputs, val_h)\n val_loss = criterion(output, targets.view(batch_size*seq_length))\n \n val_losses.append(val_loss.item())\n \n net.train() # reset to train mode after iterationg through validation data\n \n print(\"Epoch: {}/{}...\".format(e+1, epochs),\n \"Step: {}...\".format(counter),\n \"Loss: {:.4f}...\".format(loss.item()),\n \"Val Loss: {:.4f}\".format(np.mean(val_losses)))\n \nn_hidden=512\nn_layers=2\n\nnet = CharRNN(chars, n_hidden, n_layers)\nprint(net)\n\nbatch_size = 128\nseq_length = 100\nn_epochs = 20 # start smaller if you are just testing initial behavior\n\n# train the model\ntrain(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=10)\n\n# change the name, for saving multiple files\nmodel_name = 'rnn_20_epoch.net'\n\ncheckpoint = {'n_hidden': net.n_hidden,\n 'n_layers': net.n_layers,\n 'state_dict': net.state_dict(),\n 'tokens': net.chars}\n\nwith open(model_name, 'wb') as f:\n torch.save(checkpoint, f)\n \n## Making Predictions\n\ndef predict(net, char, h=None, top_k=None):\n ''' Given a character, predict the next character.\n Returns the predicted character and the hidden state.\n '''\n \n # tensor inputs\n x = np.array([[net.char2int[char]]])\n x = one_hot_encode(x, len(net.chars))\n inputs = torch.from_numpy(x)\n \n if(train_on_gpu):\n inputs = inputs.cuda()\n \n # detach hidden state from history\n h = tuple([each.data for each in h])\n # get the output of the model\n out, h = net(inputs, h)\n\n # get the character probabilities\n p = F.softmax(out, dim=1).data\n if(train_on_gpu):\n p = p.cpu() # move to cpu\n \n # get top characters\n if top_k is None:\n top_ch = np.arange(len(net.chars))\n else:\n p, top_ch = p.topk(top_k)\n top_ch = top_ch.numpy().squeeze()\n \n # select the likely next character with some element of randomness\n p = p.numpy().squeeze()\n char = np.random.choice(top_ch, p=p/p.sum())\n \n # return the encoded value of the predicted char and the hidden state\n return net.int2char[char], h\n \ndef sample(net, size, prime='The', top_k=None):\n #prime is the arg that we want to start our model with\n \n if(train_on_gpu):\n net.cuda()\n else:\n net.cpu()\n \n net.eval() # eval mode\n \n # First off, run through the prime characters\n chars = [ch for ch in prime]\n h = net.init_hidden(1)\n for ch in prime:\n char, h = predict(net, ch, h, top_k=top_k)\n\n chars.append(char)\n \n # Now pass in the previous character and get a new one\n for ii in range(size):\n char, h = predict(net, chars[-1], h, top_k=top_k)\n chars.append(char)\n\n return ''.join(chars)\n\nprint(sample(net, 1000, prime='Anna', top_k=5))\n\n## Loading a checkpoint\nwith open('rnn_20_epoch.net', 'rb') as f:\n checkpoint = torch.load(f)\n \nloaded = CharRNN(checkpoint['tokens'], n_hidden=checkpoint['n_hidden'], n_layers=checkpoint['n_layers'])\nloaded.load_state_dict(checkpoint['state_dict'])\n\n# Sample using a loaded model\nprint(sample(loaded, 2000, top_k=5, prime=\"And Levin said\"))"
] | [
[
"numpy.zeros_like",
"numpy.multiply",
"torch.load",
"torch.nn.functional.softmax",
"torch.save",
"torch.nn.CrossEntropyLoss",
"numpy.arange",
"torch.cuda.is_available",
"torch.from_numpy",
"numpy.array",
"torch.nn.Dropout",
"numpy.mean"
]
] |
diana-hep/shredtypes | [
"bb7c17eea849f8934c449c3fa260af54b3532736"
] | [
"oamap/schema.py"
] | [
"#!/usr/bin/env python\n\n# Copyright (c) 2017, DIANA-HEP\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# \n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n# \n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport bisect\nimport codecs\nimport copy\nimport fnmatch\nimport json\nimport numbers\nimport re\nimport sys\nfrom types import ModuleType\n\nimport numpy\n\nimport oamap.generator\nimport oamap.inference\nimport oamap.backend.packing\nimport oamap.extension.common\nimport oamap.proxy\nimport oamap.util\nfrom oamap.util import OrderedDict\n\nif sys.version_info[0] > 2:\n basestring = str\n unicode = str\n\n# Common extensions\nfrom oamap.extension.common import ByteString\nfrom oamap.extension.common import UTF8String\n\n# The \"PLURTP\" type system: Primitives, Lists, Unions, Records, Tuples, and Pointers\n\nclass Schema(object):\n _identifier = re.compile(\"[a-zA-Z][a-zA-Z_0-9]*\") # forbid starting with underscore in field names\n _baddelimiter = re.compile(\"[a-zA-Z_0-9]\") # could be confused with field names or integers\n\n def __init__(self, *args, **kwds):\n raise TypeError(\"Kind cannot be instantiated directly\")\n\n @property\n def nullable(self):\n return self._nullable\n\n @nullable.setter\n def nullable(self, value):\n if value is not True and value is not False:\n raise TypeError(\"nullable must be True or False, not {0}\".format(repr(value)))\n self._nullable = value\n\n @property\n def mask(self):\n return self._mask\n\n @mask.setter\n def mask(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"mask must be None or an array name (string), not {0}\".format(repr(value)))\n self._mask = value\n\n @property\n def namespace(self):\n return self._namespace\n\n @namespace.setter\n def namespace(self, value):\n if not isinstance(value, basestring):\n raise TypeError(\"namespace must be a string, not {0}\".format(repr(value)))\n self._namespace = value\n\n @property\n def packing(self):\n return self._packing\n\n @packing.setter\n def packing(self, value):\n if not (value is None or isinstance(value, oamap.backend.packing.PackedSource)):\n raise TypeError(\"packing must be None or a PackedSource, not {0}\".format(repr(value)))\n self._packing = value\n\n def _packingcopy(self, source=None):\n if self._packing is None:\n return source\n else:\n return self._packing.anchor(source)\n\n def _packingtojson(self):\n if self._packing is None:\n return None\n else:\n return self._packing.tojson()\n\n @staticmethod\n def _packingfromjson(packing):\n if packing is None:\n return None\n else:\n return oamap.backend.packing.PackedSource.fromjson(packing)\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n if value is None:\n self._name = value\n return\n if isinstance(value, basestring):\n match = self._identifier.match(value)\n if match is not None and len(match.group(0)) == len(value):\n self._name = value\n return\n raise TypeError(\"name must be None or a string matching /{0}/, not {1}\".format(self._identifier.pattern, repr(value)))\n\n @property\n def doc(self):\n return self._doc\n\n @doc.setter\n def doc(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"doc must be None or a string, not {0}\".format(repr(value)))\n self._doc = value\n\n @property\n def metadata(self):\n return self._metadata\n\n @metadata.setter\n def metadata(self, value):\n self._metadata = value\n\n def _labels(self):\n labels = []\n self._collectlabels(set(), labels)\n return labels\n \n def _label(self, labels):\n for index, label in enumerate(labels):\n if label is self:\n return \"#{0}\".format(index)\n return None\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def show(self, stream=sys.stdout):\n out = self.__repr__(indent=\"\")\n if stream is None:\n return out\n else:\n stream.write(out)\n stream.write(\"\\n\")\n\n @property\n def hasarraynames(self):\n return self._hasarraynames(set())\n\n def tojsonfile(self, file, explicit=False, *args, **kwds):\n json.dump(self.tojson(explicit=explicit), file, *args, **kwds)\n\n def tojsonstring(self, explicit=False, *args, **kwds):\n return json.dumps(self.tojson(explicit=explicit), *args, **kwds)\n\n def tojson(self, explicit=False):\n return self._tojson(explicit, self._labels(), set())\n\n @staticmethod\n def fromjsonfile(file, *args, **kwds):\n return Schema.fromjson(json.load(file, *args, **kwds))\n\n @staticmethod\n def fromjsonstring(data, *args, **kwds):\n return Schema.fromjson(json.loads(data, *args, **kwds))\n\n @staticmethod\n def fromjson(data):\n if isinstance(data, (basestring, dict)):\n labels = {}\n out = Schema._fromjson(data, labels)\n if not isinstance(out, Schema):\n raise TypeError(\"unresolved label: {0}\".format(repr(out)))\n out._finalizefromjson(labels)\n return out\n else:\n raise TypeError(\"JSON for a Schema must be a string or a dict, not {0}\".format(repr(data)))\n\n @staticmethod\n def _fromjson(data, labels):\n if isinstance(data, basestring) and data.startswith(\"#\"):\n return data\n\n elif isinstance(data, basestring):\n return Primitive._fromjson(data, labels)\n\n elif isinstance(data, dict):\n tpe = data.get(\"type\", \"primitive\")\n if tpe == \"primitive\":\n return Primitive._fromjson(data, labels)\n elif tpe == \"list\":\n return List._fromjson(data, labels)\n elif tpe == \"union\":\n return Union._fromjson(data, labels)\n elif tpe == \"record\":\n return Record._fromjson(data, labels)\n elif tpe == \"tuple\":\n return Tuple._fromjson(data, labels)\n elif tpe == \"pointer\":\n return Pointer._fromjson(data, labels)\n else:\n raise TypeError(\"unrecognized type argument for Schema from JSON: {0}\".format(repr(tpe)))\n\n else:\n raise TypeError(\"unrecognized type for Schema from JSON: {0}\".format(repr(data)))\n\n def renamespace(self, nullto=None, **to):\n if nullto is not None:\n to[\"\"] = nullto\n\n def replacement(node):\n node.namespace = to.get(node.namespace, node.namespace)\n return node\n\n return self.replace(replacement)\n\n def replace(self, fcn, *args, **kwds):\n return self._replace(fcn, args, kwds, {})\n\n def deepcopy(self, **replacements):\n return self.replace(lambda x: x, **replacements)\n\n def path(self, path, parents=False, allowtop=True):\n out = None\n for nodes in self._path((), path, (), allowtop, set()):\n if out is None:\n if parents:\n out = nodes\n else:\n out = nodes[0]\n else:\n raise ValueError(\"path {0} matches more than one field in schema\".format(repr(path)))\n\n if out is None:\n raise ValueError(\"path {0} does not match any fields in the schema\".format(repr(path)))\n else:\n return out\n\n def paths(self, *paths, **options):\n parents = options.pop(\"parents\", False)\n allowtop = options.pop(\"allowtop\", True)\n if len(options) > 0:\n raise TypeError(\"unrecognized options: {0}\".format(\", \".join(options)))\n for path in paths:\n for nodes in self._path((), path, (), allowtop, set()):\n if parents:\n yield nodes\n else:\n yield nodes[0]\n\n def _path(self, loc, path, parents, allowtop, memo):\n if allowtop and fnmatch.fnmatchcase(\"/\".join(loc), path):\n yield (self,) + parents\n\n def nodes(self, parents=False, bottomup=True):\n if parents:\n for x in self._nodes((), bottomup, set()):\n yield x\n else:\n for x in self._nodes((), bottomup, set()):\n yield x[0]\n\n def project(self, path):\n return self._keep((), [path], True, {})\n\n def keep(self, *paths):\n return self._keep((), paths, False, {})\n\n def drop(self, *paths):\n return self._drop((), paths, {})\n\n def contains(self, schema):\n return self._contains(schema, set())\n\n def _normalize_extension(self, extension):\n if isinstance(extension, ModuleType):\n recurse = False\n extension = extension.__dict__\n else:\n recurse = True\n\n if isinstance(extension, dict):\n extension = [extension[n] for n in sorted(extension)]\n\n try:\n iter(extension)\n except TypeError:\n raise TypeError(\"extension must be a module containing ExtendedGenerator classes or a dict or list (recursively) containing ExtendedGenerator classes\")\n else:\n out = []\n for x in extension:\n if isinstance(x, type) and issubclass(x, oamap.generator.ExtendedGenerator):\n out.append(x)\n elif recurse:\n out.extend(self._normalize_extension(x))\n return out\n\n def fromdata(self, value, pointer_fromequal=False):\n import oamap.fill\n return self(oamap.fill.fromdata(value, generator=self, pointer_fromequal=pointer_fromequal))\n\n def fromiterdata(self, values, limit=lambda entries, arrayitems, arraybytes: False, pointer_fromequal=False):\n import oamap.fill\n return self(oamap.fill.fromiterdata(values, generator=self, limit=limit, pointer_fromequal=pointer_fromequal))\n\n def __call__(self, arrays, prefix=\"object\", delimiter=\"-\", extension=oamap.extension.common, packing=None):\n return self.generator(prefix=prefix, delimiter=delimiter, extension=self._normalize_extension(extension), packing=packing)(arrays)\n\n def generator(self, prefix=\"object\", delimiter=\"-\", extension=oamap.extension.common, packing=None):\n if self._baddelimiter.match(delimiter) is not None:\n raise ValueError(\"delimiters must not contain /{0}/\".format(self._baddelimiter.pattern))\n cacheidx = [0]\n memo = OrderedDict()\n extension = self._normalize_extension(extension)\n if packing is not None:\n packing = packing.copy()\n return self._finalizegenerator(self._generator(prefix, delimiter, cacheidx, memo, set(), extension, packing), cacheidx, memo, extension, packing)\n\n def _get_name(self, prefix, delimiter):\n if self._name is not None:\n return prefix + delimiter + \"N\" + self._name\n else:\n return prefix\n\n def _get_mask(self, prefix, delimiter):\n if self._mask is None:\n return self._get_name(prefix, delimiter) + delimiter + \"M\"\n else:\n return self._mask\n\n def _finalizegenerator(self, out, cacheidx, memo, extension, packing):\n allgenerators = list(memo.values())\n for generator in memo.values():\n if isinstance(generator, oamap.generator.PointerGenerator):\n # only assign pointer targets after all other types have been resolved\n target, prefix, delimiter = generator.target\n if id(target) in memo:\n # the target points elsewhere in the type tree: link to that\n generator._internal = True\n if generator.schema.positions is None:\n generator.positions = generator.positions + delimiter + memo[id(target)].derivedname\n generator.target = memo[id(target)]\n generator.schema.target = generator.target.schema\n else:\n # the target is not in the type tree: resolve it now\n memo2 = OrderedDict() # new memo, but same cacheidx\n generator._internal = False\n generator.target = target._finalizegenerator(target._generator(generator.schema._get_external(prefix, delimiter), delimiter, cacheidx, memo2, set(), extension, packing), cacheidx, memo2, extension, packing)\n generator.schema.target = generator.target.schema\n for generator2 in memo2.values():\n allgenerators.append(generator2)\n\n for generator in allgenerators:\n generator._cachelen = cacheidx[0]\n\n return out\n\n def case(self, obj):\n return obj in self\n\n def cast(self, obj):\n if obj in self:\n return obj\n else:\n raise TypeError(\"object is not a member of {0}\".format(self))\n\n################################################################ Primitives can be any Numpy type\n\nclass Primitive(Schema):\n def __init__(self, dtype, nullable=False, data=None, mask=None, namespace=\"\", packing=None, name=None, doc=None, metadata=None):\n self.dtype = dtype\n self.nullable = nullable\n self.data = data\n self.mask = mask\n self.namespace = namespace\n self.packing = packing\n self.name = name\n self.doc = doc\n self.metadata = metadata\n\n @property\n def dtype(self):\n return self._dtype\n\n @dtype.setter\n def dtype(self, value):\n if not isinstance(value, numpy.dtype):\n value = numpy.dtype(value)\n if value.hasobject:\n raise TypeError(\"dtypes containing objects are not allowed\")\n if value.names is not None:\n for n in value.names:\n if self._identifier.match(n) is None:\n raise TypeError(\"dtype names must be identifier strings; the name {0} is not an identifier (/{1}/)\".format(repr(n), self._identifier.pattern))\n raise NotImplementedError(\"record-array dtypes are not supported yet\")\n if value.subdtype is not None:\n raise NotImplementedError(\"multidimensional dtypes are not supported yet\")\n self._dtype = value\n\n _byteorder_transform = {\"!\": True, \">\": True, \"<\": False, \"|\": False, \"=\": numpy.dtype(\">f8\").isnative}\n\n @staticmethod\n def _dtype2str(dtype, delimiter):\n if dtype.names is not None:\n return delimiter.join(Primitive._dtype2str(dtype[n], delimiter) + delimiter + n for n in dtype.names)\n if dtype.subdtype is not None:\n subdtype, dims = dtype.subdtype\n else:\n subdtype, dims = dtype, ()\n return \"D\" + \"\".join(repr(x) + delimiter for x in dims) + (subdtype.kind.upper() if Primitive._byteorder_transform[subdtype.byteorder] else subdtype.kind) + repr(subdtype.itemsize)\n\n @staticmethod\n def _str2dtype(string, delimiter):\n out = []\n for _, dims, _, kind, itemsize, name in re.findall(\"(D(([1-9][0-9]*{0})*)([a-zA-Z])([1-9][0-9]*)({0}[a-zA-Z][a-zA-Z_0-9]*)?)\".format(delimiter), string):\n if dims == \"\":\n dims = ()\n else:\n dims = tuple(int(x) for x in dims[:-len(delimiter)].split(delimiter))\n itemsize = itemsize\n name = name[len(delimiter):]\n if ord(\"A\") <= ord(kind) <= ord(\"Z\"):\n byteorder = \">\"\n else:\n byteorder = \"<\"\n if kind == \"S\":\n descr = (kind + itemsize, dims)\n else:\n descr = (byteorder + kind.lower() + itemsize, dims)\n if name == \"\":\n out.append(descr)\n else:\n out.append((name,) + descr)\n if len(out) == 1:\n return numpy.dtype(out[0])\n else:\n return numpy.dtype(out)\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"data must be None or an array name (string), not {0}\".format(repr(value)))\n self._data = value\n\n def _hasarraynames(self, memo):\n return self._data is not None and (not self._nullable or self._mask is not None)\n\n def __repr__(self, labels=None, shown=None, indent=None):\n eq = \"=\"\n\n if labels is None:\n labels = self._labels()\n shown = set()\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n\n args = [repr(self._dtype)]\n if self._nullable is not False:\n args.append(\"nullable\" + eq + repr(self._nullable))\n if self._data is not None:\n args.append(\"data\" + eq + repr(self._data))\n if self._mask is not None:\n args.append(\"mask\" + eq + repr(self._mask))\n if self._namespace != \"\":\n args.append(\"namespace\" + eq + repr(self._namespace))\n if self._packing is not None:\n args.append(\"packing\" + eq + repr(self._packing))\n if self._name is not None:\n args.append(\"name\" + eq + repr(self._name))\n if self._doc is not None:\n args.append(\"doc\" + eq + repr(self._doc))\n if self._metadata is not None:\n args.append(\"metadata\" + eq + repr(self._metadata))\n\n if label is None:\n return \"Primitive(\" + \", \".join(args) + \")\"\n else:\n return label + \": Primitive(\" + \", \".join(args) + \")\"\n\n else:\n return label\n\n def _collectlabels(self, collection, labels):\n if id(self) not in collection:\n collection.add(id(self))\n else:\n labels.append(self)\n\n def _tojson(self, explicit, labels, shown):\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n if not explicit and self._nullable is False and self._data is None and self._mask is None and self._namespace == \"\" and self._packing is None and self._name is None and self._doc is None and self._metadata is None:\n return str(self._dtype)\n else:\n out = {\"type\": \"primitive\", \"dtype\": self._dtype2str(self._dtype, \"-\")}\n if explicit or self._nullable is not False:\n out[\"nullable\"] = self._nullable\n if explicit or self._data is not None:\n out[\"data\"] = self._data\n if explicit or self._mask is not None:\n out[\"mask\"] = self._mask\n if explicit or self._namespace != \"\":\n out[\"namespace\"] = self._namespace\n if explicit or self._packing is not None:\n out[\"packing\"] = self._packingtojson()\n if explicit or self._name is not None:\n out[\"name\"] = self._name\n if explicit or self._doc is not None:\n out[\"doc\"] = self._doc\n if explicit or self._metadata is not None:\n out[\"metadata\"] = oamap.util.python2json(self._metadata)\n if explicit or label is not None:\n out[\"label\"] = label\n return out\n else:\n return label\n\n @staticmethod\n def _fromjson(data, labels):\n if isinstance(data, basestring):\n return Primitive(data)\n else:\n if \"dtype\" not in data:\n raise TypeError(\"Primitive Schema from JSON is missing argument 'dtype'\")\n out = Primitive(Primitive._str2dtype(data[\"dtype\"], \"-\"), nullable=data.get(\"nullable\", False), data=data.get(\"data\", None), mask=data.get(\"mask\", None), namespace=data.get(\"namespace\", \"\"), packing=Schema._packingfromjson(data.get(\"packing\", None)), name=data.get(\"name\", None), doc=data.get(\"doc\", None), metadata=oamap.util.json2python(data.get(\"metadata\", None)))\n if \"label\" in data:\n labels[data[\"label\"]] = out\n return out\n\n def _finalizefromjson(self, labels):\n pass\n\n def copy(self, **replacements):\n if \"dtype\" not in replacements:\n replacements[\"dtype\"] = self._dtype\n if \"nullable\" not in replacements:\n replacements[\"nullable\"] = self._nullable\n if \"data\" not in replacements:\n replacements[\"data\"] = self._data\n if \"mask\" not in replacements:\n replacements[\"mask\"] = self._mask\n if \"namespace\" not in replacements:\n replacements[\"namespace\"] = self._namespace\n if \"packing\" not in replacements:\n replacements[\"packing\"] = self._packing\n if \"name\" not in replacements:\n replacements[\"name\"] = self._name\n if \"doc\" not in replacements:\n replacements[\"doc\"] = self._doc\n if \"metadata\" not in replacements:\n replacements[\"metadata\"] = self._metadata\n return Primitive(**replacements)\n\n def _replace(self, fcn, args, kwds, memo):\n return fcn(Primitive(self._dtype, nullable=self._nullable, data=self._data, mask=self._mask, namespace=self._namespace, packing=self._packingcopy(), name=self._name, doc=self._doc, metadata=copy.deepcopy(self._metadata)), *args, **kwds)\n\n def _nodes(self, loc, bottomup, memo):\n yield (self,) + loc\n\n def _keep(self, loc, paths, project, memo):\n return self.deepcopy()\n\n def _drop(self, loc, paths, memo):\n return self.deepcopy()\n\n def _contains(self, schema, memo):\n return self == schema\n\n def __hash__(self):\n return hash((Primitive, self._dtype, self._nullable, self._data, self._mask, self._namespace, self._packing, self._name, self._doc, oamap.util.python2hashable(self._metadata)))\n\n def __eq__(self, other, memo=None):\n return isinstance(other, Primitive) and self._dtype == other._dtype and self._nullable == other._nullable and self._data == other._data and self._mask == other._mask and self._namespace == other._namespace and self._packing == other._packing and self._name == other._name and self._doc == other._doc and self._metadata == other._metadata\n\n def __contains__(self, value, memo=None):\n if value is None:\n return self.nullable\n\n def recurse(value, dims):\n if dims == ():\n if issubclass(self.dtype.type, (numpy.bool_, numpy.bool)):\n return value is True or value is False\n\n elif issubclass(self.dtype.type, numpy.integer):\n iinfo = numpy.iinfo(self.dtype.type)\n return isinstance(value, (numbers.Integral, numpy.integer)) and iinfo.min <= value <= iinfo.max\n\n elif issubclass(self.dtype.type, numpy.floating):\n return isinstance(value, (numbers.Real, numpy.floating))\n\n elif issubclass(self.dtype.type, numpy.complex):\n return isinstance(value, (numbers.Complex, numpy.complex))\n\n else:\n raise TypeError(\"unexpected dtype: {0}\".format(self.dtype))\n\n else:\n try:\n iter(value)\n len(value)\n except TypeError:\n return False\n else:\n return len(value) == dims[0] and all(recurse(x, dims[1:]) for x in value)\n\n if self._dtype.subdtype is None:\n return recurse(value, ())\n else:\n subdtype, dims = self._dtype.subdtype\n return recurse(value, dims)\n\n def _get_data(self, prefix, delimiter):\n if self._data is None:\n return self._get_name(prefix, delimiter) + delimiter + self._dtype2str(self._dtype, delimiter)\n else:\n return self._data\n\n def _generator(self, prefix, delimiter, cacheidx, memo, nesting, extension, packing):\n if id(self) in nesting:\n raise TypeError(\"types may not be defined in terms of themselves:\\n\\n {0}\".format(repr(self)))\n args = []\n\n if self._nullable:\n cls = oamap.generator.MaskedPrimitiveGenerator\n args.append(self._get_mask(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n else:\n cls = oamap.generator.PrimitiveGenerator\n\n args.append(self._get_data(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n\n args.append(self._dtype)\n args.append(self._namespace)\n args.append(self._packingcopy(packing))\n args.append(self._name)\n args.append(prefix)\n args.append(self.copy(packing=None))\n\n for ext in extension:\n if ext.matches(self):\n args.insert(0, cls)\n cls = ext\n break\n\n memo[id(self)] = cls(*args)\n return memo[id(self)]\n\n################################################################ Lists may have arbitrary length\n\nclass List(Schema):\n def __init__(self, content, nullable=False, starts=None, stops=None, mask=None, namespace=\"\", packing=None, name=None, doc=None, metadata=None):\n self.content = content\n self.nullable = nullable\n self.starts = starts\n self.stops = stops\n self.mask = mask\n self.namespace = namespace\n self.packing = packing\n self.name = name\n self.doc = doc\n self.metadata = metadata\n\n @property\n def content(self):\n return self._content\n\n @content.setter\n def content(self, value):\n if isinstance(value, basestring):\n value = Primitive(value)\n if not isinstance(value, Schema):\n raise TypeError(\"content must be a Schema, not {0}\".format(repr(value)))\n self._content = value\n\n @property\n def starts(self):\n return self._starts\n\n @starts.setter\n def starts(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"starts must be None or an array name (string), not {0}\".format(repr(value)))\n self._starts = value\n\n @property\n def stops(self):\n return self._stops\n\n @stops.setter\n def stops(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"stops must be None or an array name (string), not {0}\".format(repr(value)))\n self._stops = value\n\n def _hasarraynames(self, memo):\n if id(self) in memo:\n return True\n else:\n memo.add(id(self))\n return self._starts is not None and self._stops is not None and (not self._nullable or self._mask is not None) and self._content._hasarraynames(memo)\n\n def __repr__(self, labels=None, shown=None, indent=None):\n eq = \"=\" if indent is None else \" = \"\n\n if labels is None:\n labels = self._labels()\n shown = set()\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n\n args = []\n if indent is None:\n args.append(self._content.__repr__(labels, shown, indent))\n if self._nullable is not False:\n args.append(\"nullable\" + eq + repr(self._nullable))\n if self._starts is not None:\n args.append(\"starts\" + eq + repr(self._starts))\n if self._stops is not None:\n args.append(\"stops\" + eq + repr(self._stops))\n if self._mask is not None:\n args.append(\"mask\" + eq + repr(self._mask))\n if self._namespace != \"\":\n args.append(\"namespace\" + eq + repr(self._namespace))\n if self._packing is not None:\n args.append(\"packing\" + eq + repr(self._packing))\n if self._name is not None:\n args.append(\"name\" + eq + repr(self._name))\n if self._doc is not None:\n args.append(\"doc\" + eq + repr(self._doc))\n if self._metadata is not None:\n args.append(\"metadata\" + eq + repr(self._metadata))\n\n if indent is None:\n argstr = \", \".join(args)\n else:\n args.append(\"content\" + eq + self._content.__repr__(labels, shown, indent + \" \").lstrip() + \"\\n\" + indent)\n args[0] = \"\\n\" + indent + \" \" + args[0]\n argstr = (\",\" + \"\\n\" + indent + \" \").join(args)\n\n if label is None:\n return \"List(\" + argstr + \")\"\n else:\n return label + \": List(\" + argstr + \")\"\n\n else:\n return label\n\n def _tojson(self, explicit, labels, shown):\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n out = {\"type\": \"list\", \"content\": self._content._tojson(explicit, labels, shown)}\n if explicit or self._nullable is not False:\n out[\"nullable\"] = self._nullable\n if explicit or self._starts is not None:\n out[\"starts\"] = self._starts\n if explicit or self._stops is not None:\n out[\"stops\"] = self._stops\n if explicit or self._mask is not None:\n out[\"mask\"] = self._mask\n if explicit or self._namespace != \"\":\n out[\"namespace\"] = self._namespace\n if explicit or self._packing is not None:\n out[\"packing\"] = self._packingtojson()\n if explicit or self._name is not None:\n out[\"name\"] = self._name\n if explicit or self._doc is not None:\n out[\"doc\"] = self._doc\n if explicit or self._metadata is not None:\n out[\"metadata\"] = oamap.util.python2json(self._metadata)\n if explicit or label is not None:\n out[\"label\"] = label\n return out\n else:\n return label\n\n @staticmethod\n def _fromjson(data, labels):\n if \"content\" not in data:\n raise TypeError(\"List Schema from JSON is missing argument 'content'\")\n out = List.__new__(List)\n out._content = Schema._fromjson(data[\"content\"], labels)\n out.nullable = data.get(\"nullable\", False)\n out.starts = data.get(\"starts\", None)\n out.stops = data.get(\"stops\", None)\n out.mask = data.get(\"mask\", None)\n out.namespace = data.get(\"namespace\", \"\")\n out.packing = Schema._packingfromjson(data.get(\"packing\", None))\n out.name = data.get(\"name\", None)\n out.doc = data.get(\"doc\", None)\n out.metadata = oamap.util.json2python(data.get(\"metadata\", None))\n if \"label\" in data:\n labels[data[\"label\"]] = out\n return out\n\n def _finalizefromjson(self, labels):\n if isinstance(self._content, basestring):\n if self._content not in labels:\n raise TypeError(\"unresolved label: {0}\".format(repr(self._content)))\n self._content = labels[self._content]\n else:\n self._content._finalizefromjson(labels)\n\n def _collectlabels(self, collection, labels):\n if id(self) not in collection:\n collection.add(id(self))\n self._content._collectlabels(collection, labels)\n else:\n labels.append(self)\n\n def copy(self, **replacements):\n if \"content\" not in replacements:\n replacements[\"content\"] = self._content\n if \"nullable\" not in replacements:\n replacements[\"nullable\"] = self._nullable\n if \"starts\" not in replacements:\n replacements[\"starts\"] = self._starts\n if \"stops\" not in replacements:\n replacements[\"stops\"] = self._stops\n if \"mask\" not in replacements:\n replacements[\"mask\"] = self._mask\n if \"namespace\" not in replacements:\n replacements[\"namespace\"] = self._namespace\n if \"packing\" not in replacements:\n replacements[\"packing\"] = self._packing\n if \"name\" not in replacements:\n replacements[\"name\"] = self._name\n if \"doc\" not in replacements:\n replacements[\"doc\"] = self._doc\n if \"metadata\" not in replacements:\n replacements[\"metadata\"] = self._metadata\n return List(**replacements)\n\n def _replace(self, fcn, args, kwds, memo):\n return fcn(List(self._content._replace(fcn, args, kwds, memo), nullable=self._nullable, starts=self._starts, stops=self._stops, mask=self._mask, namespace=self._namespace, packing=self._packingcopy(), name=self._name, doc=self._doc, metadata=copy.deepcopy(self._metadata)), *args, **kwds)\n\n def _path(self, loc, path, parents, allowtop, memo):\n nodes = None\n for nodes in Schema._path(self, loc, path, parents, allowtop, memo):\n yield nodes\n if nodes is None:\n for nodes in self._content._path(loc, path, (self,) + parents, allowtop, memo):\n yield nodes\n\n def _nodes(self, loc, bottomup, memo):\n if bottomup:\n for x in self._content._nodes((self,) + loc, bottomup, memo):\n yield x\n yield (self,) + loc\n if not bottomup:\n for x in self._content._nodes((self,) + loc, bottomup, memo):\n yield x\n\n def _keep(self, loc, paths, project, memo):\n content = self.content._keep(loc, paths, project, memo)\n if content is None:\n return None\n else:\n return self.copy(content=content)\n\n def _drop(self, loc, paths, memo):\n content = self.content._drop(loc, paths, memo)\n if content is None:\n return None\n else:\n return self.copy(content=content)\n\n def _contains(self, schema, memo):\n if self == schema:\n return True\n else:\n return self._content._contains(schema, memo)\n\n def __hash__(self):\n return hash((List, self._content, self._nullable, self._starts, self._stops, self._mask, self._namespace, self._packing, self._name, self._doc, oamap.util.python2hashable(self._metadata)))\n\n def __eq__(self, other, memo=None):\n if memo is None:\n memo = {}\n if id(self) in memo:\n return memo[id(self)] == id(other)\n if not (isinstance(other, List) and self._nullable == other._nullable and self._starts == other._starts and self._stops == other._stops and self._mask == other._mask and self._namespace == other._namespace and self._packing == other._packing and self._name == other._name and self._doc == other._doc and self._metadata == other._metadata):\n return False\n memo[id(self)] = id(other)\n return self.content.__eq__(other.content, memo)\n\n def __contains__(self, value, memo=None):\n if memo is None:\n memo = {}\n if value is None:\n return self.nullable\n try:\n iter(value)\n except TypeError:\n return False\n else:\n for x in value:\n memo2 = dict(memo) if len(memo) > 0 else memo\n if not self.content.__contains__(x, memo2):\n return False\n return True\n\n def _get_starts(self, prefix, delimiter):\n if self._starts is None:\n return self._get_name(prefix, delimiter) + delimiter + \"B\"\n else:\n return self._starts\n\n def _get_stops(self, prefix, delimiter):\n if self._stops is None:\n return self._get_name(prefix, delimiter) + delimiter + \"E\"\n else:\n return self._stops\n\n def _get_content(self, prefix, delimiter):\n return self._get_name(prefix, delimiter) + delimiter + \"L\"\n\n def __call__(self, arrays, prefix=\"object\", delimiter=\"-\", extension=oamap.extension.common, packing=None, numentries=None):\n generator = self.generator(prefix=prefix, delimiter=delimiter, extension=self._normalize_extension(extension), packing=packing)\n import oamap.generator\n if isinstance(generator, oamap.generator.ListGenerator):\n return generator(arrays, numentries=numentries)\n else:\n return generator(arrays)\n\n def _generator(self, prefix, delimiter, cacheidx, memo, nesting, extension, packing):\n if id(self) in nesting:\n raise TypeError(\"types may not be defined in terms of themselves:\\n\\n {0}\".format(repr(self)))\n args = []\n\n if self._nullable:\n cls = oamap.generator.MaskedListGenerator\n args.append(self._get_mask(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n else:\n cls = oamap.generator.ListGenerator\n\n args.append(self._get_starts(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n\n args.append(self._get_stops(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n\n contentgen = self._content._generator(self._get_content(prefix, delimiter), delimiter, cacheidx, memo, nesting.union(set([id(self)])), extension, packing)\n args.append(contentgen)\n args.append(self._namespace)\n args.append(self._packingcopy(packing))\n args.append(self._name)\n args.append(prefix)\n args.append(self.copy(content=contentgen.schema, packing=None))\n\n for ext in extension:\n if ext.matches(self):\n args.insert(0, cls)\n cls = ext\n break\n\n memo[id(self)] = cls(*args)\n return memo[id(self)]\n\n################################################################ Unions may be one of several types\n\nclass Union(Schema):\n def __init__(self, possibilities, nullable=False, tags=None, offsets=None, mask=None, namespace=\"\", packing=None, name=None, doc=None, metadata=None):\n self.possibilities = possibilities\n self.nullable = nullable\n self.tags = tags\n self.offsets = offsets\n self.mask = mask\n self.namespace = namespace\n self.packing = packing\n self.name = name\n self.doc = doc\n self.metadata = metadata\n\n @property\n def possibilities(self):\n return tuple(self._possibilities)\n\n @possibilities.setter\n def possibilities(self, value):\n self._extend(value, [])\n\n @property\n def tags(self):\n return self._tags\n\n @tags.setter\n def tags(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"tags must be None or an array name (string), not {0}\".format(repr(value)))\n self._tags = value\n\n @property\n def offsets(self):\n return self._offsets\n\n @offsets.setter\n def offsets(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"offsets must be None or an array name (string), not {0}\".format(repr(value)))\n self._offsets = value\n\n def _extend(self, possibilities, start):\n trial = []\n try:\n for i, x in enumerate(possibilities):\n if isinstance(x, basestring):\n x = Primitive(x)\n assert isinstance(x, Schema), \"possibilities must be an iterable of Schemas; item at {0} is {1}\".format(i, repr(x))\n trial.append(x)\n except TypeError:\n raise TypeError(\"possibilities must be an iterable of Schemas, not {0}\".format(repr(possibilities)))\n except AssertionError as err:\n raise TypeError(err.message)\n self._possibilities = start + trial\n\n def append(self, possibility):\n if isinstance(possibility, basestring):\n possibility = Primitive(possibility)\n if not isinstance(possibility, Schema):\n raise TypeError(\"possibilities must be Schemas, not {0}\".format(repr(possibility)))\n self._possibilities.append(possibility)\n\n def insert(self, index, possibility):\n if isinstance(possibility, basestring):\n possibility = Primitive(possibility)\n if not isinstance(possibility, Schema):\n raise TypeError(\"possibilities must be Schemas, not {0}\".format(repr(possibility)))\n self._possibilities.insert(index, possibility)\n\n def extend(self, possibilities):\n self._extend(possibilities, self._possibilities)\n\n def __getitem__(self, index):\n return self._possibilities[index]\n\n def __setitem__(self, index, value):\n if not isinstance(index, (numbers.Integral, numpy.integer)):\n raise TypeError(\"possibility index must be an integer, not {0}\".format(repr(index)))\n if isinstance(value, basestring):\n value = Primitive(value)\n if not isinstance(value, Schema):\n raise TypeError(\"possibilities must be Schemas, not {0}\".format(repr(value)))\n self._possibilities[index] = value\n\n def _hasarraynames(self, memo):\n if id(self) in memo:\n return True\n else:\n memo.add(id(self))\n return self._tags is not None and self._offsets is not None and (not self._nullable or self._mask is not None) and all(x._hasarraynames(memo) for x in self._possibilities)\n\n def __repr__(self, labels=None, shown=None, indent=None):\n eq = \"=\" if indent is None else \" = \"\n\n if labels is None:\n labels = self._labels()\n shown = set()\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n\n args = []\n if indent is None:\n args.append(\"[\" + \", \".join(x.__repr__(labels, shown, indent) for x in self._possibilities) + \"]\")\n if self._nullable is not False:\n args.append(\"nullable\" + eq + repr(self._nullable))\n if self._tags is not None:\n args.append(\"tags\" + eq + repr(self._tags))\n if self._offsets is not None:\n args.append(\"offsets\" + eq + repr(self._offsets))\n if self._mask is not None:\n args.append(\"mask\" + eq + repr(self._mask))\n if self._namespace != \"\":\n args.append(\"namespace\" + eq + repr(self._namespace))\n if self._packing is not None:\n args.append(\"packing\" + eq + repr(self._packing))\n if self._name is not None:\n args.append(\"name\" + eq + repr(self._name))\n if self._doc is not None:\n args.append(\"doc\" + eq + repr(self._doc))\n if self._metadata is not None:\n args.append(\"metadata\" + eq + repr(self._metadata))\n\n if indent is None:\n argstr = \", \".join(args)\n else:\n args.append(\"possibilities\" + eq + \"[\\n\" + indent + \" \" + (\",\\n\" + indent + \" \").join(x.__repr__(labels, shown, indent + \" \").lstrip() for x in self._possibilities) + \"\\n\" + indent + \" ]\")\n args[0] = \"\\n\" + indent + \" \" + args[0]\n argstr = (\",\" + \"\\n\" + indent + \" \").join(args)\n\n if label is None:\n return \"Union(\" + argstr + \")\"\n else:\n return label + \": Union(\" + argstr + \")\"\n\n else:\n return label\n\n def _tojson(self, explicit, labels, shown):\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n out = {\"type\": \"union\", \"possibilities\": [x._tojson(explicit, labels, shown) for x in self._possibilities]}\n if explicit or self._nullable is not False:\n out[\"nullable\"] = self._nullable\n if explicit or self._tags is not None:\n out[\"tags\"] = self._tags\n if explicit or self._offsets is not None:\n out[\"offsets\"] = self._offsets\n if explicit or self._mask is not None:\n out[\"mask\"] = self._mask\n if explicit or self._namespace != \"\":\n out[\"namespace\"] = self._namespace\n if explicit or self._packing is not None:\n out[\"packing\"] = self._packingtojson()\n if explicit or self._name is not None:\n out[\"name\"] = self._name\n if explicit or self._doc is not None:\n out[\"doc\"] = self._doc\n if explicit or self._metadata is not None:\n out[\"metadata\"] = oamap.util.python2json(self._metadata)\n if explicit or label is not None:\n out[\"label\"] = label\n return out\n else:\n return label\n\n @staticmethod\n def _fromjson(data, labels):\n if \"possibilities\" not in data:\n raise TypeError(\"Union Schema from JSON is missing argument 'possibilities'\")\n if not isinstance(data[\"possibilities\"], list):\n raise TypeError(\"argument 'possibilities' for Union Schema from JSON should be a list, not {0}\".format(repr(data[\"possibilities\"])))\n out = Union.__new__(Union)\n out.possibilities = [Schema._fromjson(x, labels) for x in data[\"possibilities\"]]\n out.nullable = data.get(\"nullable\", False)\n out.tags = data.get(\"tags\", None)\n out.offsets = data.get(\"offsets\", None)\n out.mask = data.get(\"mask\", None)\n out.namespace = data.get(\"namespace\", \"\")\n out.packing = Schema._packingfromjson(data.get(\"packing\", None))\n out.name = data.get(\"name\", None)\n out.doc = data.get(\"doc\", None)\n out.metadata = oamap.util.json2python(data.get(\"metadata\", None))\n if \"label\" in data:\n labels[data[\"label\"]] = out\n return out\n\n def _finalizefromjson(self, labels):\n for i in range(len(self._possibilities)):\n if isinstance(self._possibilities[i], basestring):\n if self._possibilities[i] not in labels:\n raise TypeError(\"unresolved label: {0}\".format(repr(self._possibilities[i])))\n self._possibilities[i] = labels[self._possibilities[i]]\n else:\n self._possibilities[i]._finalizefromjson(labels)\n\n def _collectlabels(self, collection, labels):\n if id(self) not in collection:\n collection.add(id(self))\n for possibility in self._possibilities:\n possibility._collectlabels(collection, labels)\n else:\n labels.append(self)\n\n def copy(self, **replacements):\n if \"possibilities\" not in replacements:\n replacements[\"possibilities\"] = self._possibilities\n if \"nullable\" not in replacements:\n replacements[\"nullable\"] = self._nullable\n if \"tags\" not in replacements:\n replacements[\"tags\"] = self._tags\n if \"offsets\" not in replacements:\n replacements[\"offsets\"] = self._offsets\n if \"mask\" not in replacements:\n replacements[\"mask\"] = self._mask\n if \"namespace\" not in replacements:\n replacements[\"namespace\"] = self._namespace\n if \"packing\" not in replacements:\n replacements[\"packing\"] = self._packing\n if \"name\" not in replacements:\n replacements[\"name\"] = self._name\n if \"doc\" not in replacements:\n replacements[\"doc\"] = self._doc\n if \"metadata\" not in replacements:\n replacements[\"metadata\"] = self._metadata\n return Union(**replacements)\n\n def _replace(self, fcn, args, kwds, memo):\n return fcn(Union([x._replace(fcn, args, kwds, memo) for x in self._possibilities], nullable=self._nullable, tags=self._tags, offsets=self._offsets, mask=self._mask, namespace=self._namespace, packing=self._packingcopy(), name=self._name, doc=self._doc, metadata=copy.deepcopy(self._metadata)), *args, **kwds)\n\n def _path(self, loc, path, parents, allowtop, memo):\n nodes = None\n for nodes in Schema._path(self, loc, path, parents, allowtop, memo):\n yield nodes\n if nodes is None:\n for possibility in self._possibilities:\n for nodes in possibility._path(loc, path, (self,) + parents, allowtop, memo):\n yield nodes\n\n def _nodes(self, loc, bottomup, memo):\n if bottomup:\n for possibility in self._possibilities:\n for x in possibility._nodes((self,) + loc, bottomup, memo):\n yield x\n yield (self,) + loc\n if not bottomup:\n for possibility in self._possibilities:\n for x in possibility._nodes((self,) + loc, bottomup, memo):\n yield x\n\n def _keep(self, loc, paths, project, memo):\n possibilities = []\n for x in self._possibilities:\n p = self._keep(loc, paths, project, memo)\n if p is None:\n return None\n else:\n possibilities.append(p)\n return self.copy(possibilities)\n\n def _drop(self, loc, paths, memo):\n possibilities = []\n for x in self._possibilities:\n p = self._drop(loc, paths, memo)\n if p is None:\n return None\n else:\n possibilities.append(p)\n return self.copy(possibilities)\n\n def _contains(self, schema, memo):\n if self == schema:\n return True\n else:\n return any(x._contains(schema, memo) for x in self._possibilities)\n\n def __hash__(self):\n return hash((Union, self._possibilities, self._nullable, self._tags, self._offsets, self._mask, self._namespace, self._packing, self._name, self._doc, oamap.util.python2hashable(self._metadata)))\n\n def __eq__(self, other, memo=None):\n if memo is None:\n memo = {}\n if id(self) in memo:\n return memo[id(self)] == id(other)\n if not (isinstance(other, Union) and len(self._possibilities) == len(other._possibilities) and self._nullable == other._nullable and self._tags == other._tags and self._offsets == other._offsets and self._mask == other._mask and self._namespace == other._namespace and self._packing == other._packing and self._name == other._name and self._doc == other._doc and self._metadata == other._metadata):\n return False\n memo[id(self)] = id(other)\n return all(x.__eq__(y, memo) for x, y in zip(self.possibilities, other.possibilities))\n\n def __contains__(self, value, memo=None):\n if memo is None:\n memo = {}\n if value is None:\n return self._nullable or any(x._nullable for x in self._possibilities)\n return any(x.__contains__(value, memo) for x in self.possibilities)\n\n def _get_tags(self, prefix, delimiter):\n if self._tags is None:\n return self._get_name(prefix, delimiter) + delimiter + \"T\"\n else:\n return self._tags\n\n def _get_offsets(self, prefix, delimiter):\n if self._offsets is None:\n return self._get_name(prefix, delimiter) + delimiter + \"O\"\n else:\n return self._offsets\n\n def _get_possibility(self, prefix, delimiter, i):\n return self._get_name(prefix, delimiter) + delimiter + \"U\" + repr(i)\n\n def _generator(self, prefix, delimiter, cacheidx, memo, nesting, extension, packing):\n if id(self) in nesting:\n raise TypeError(\"types may not be defined in terms of themselves:\\n\\n {0}\".format(repr(self)))\n args = []\n\n if self._nullable:\n cls = oamap.generator.MaskedUnionGenerator\n args.append(self._get_mask(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n else:\n cls = oamap.generator.UnionGenerator\n\n args.append(self._get_tags(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n\n args.append(self._get_offsets(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n\n possibilitiesgen = [x._generator(self._get_possibility(prefix, delimiter, i), delimiter, cacheidx, memo, nesting.union(set([id(self)])), extension, packing) for i, x in enumerate(self._possibilities)]\n args.append(possibilitiesgen)\n args.append(self._namespace)\n args.append(self._packingcopy(packing))\n args.append(self._name)\n args.append(prefix)\n args.append(self.copy(possibilities=[x.schema for x in possibilitiesgen], packing=None))\n\n for ext in extension:\n if ext.matches(self):\n args.insert(0, cls)\n cls = ext\n break\n\n memo[id(self)] = cls(*args)\n return memo[id(self)]\n\n################################################################ Records contain fields of known types\n\nclass Record(Schema):\n def __init__(self, fields, nullable=False, mask=None, namespace=\"\", packing=None, name=None, doc=None, metadata=None):\n self.fields = fields\n self.nullable = nullable\n self.mask = mask\n self.namespace = namespace\n self.packing = packing\n self.name = name\n self.doc = doc\n self.metadata = metadata\n\n @property\n def fields(self):\n return dict(self._fields)\n\n @fields.setter\n def fields(self, value):\n self._extend(value, [])\n\n def keys(self):\n return self._fields.keys()\n\n def values(self):\n return self._fields.values()\n\n def items(self):\n return self._fields.items()\n \n def _extend(self, fields, start):\n trial = []\n try:\n for n, x in fields.items():\n assert isinstance(n, basestring), \"fields must be a dict from identifier strings to Schemas; the key {0} is not a string\".format(repr(n))\n matches = self._identifier.match(n)\n assert matches is not None and len(matches.group(0)) == len(n), \"fields must be a dict from identifier strings to Schemas; the key {0} is not an identifier (/{1}/)\".format(repr(n), self._identifier.pattern)\n if isinstance(x, basestring):\n x = Primitive(x)\n assert isinstance(x, Schema), \"fields must be a dict from identifier strings to Schemas; the value at key {0} is {1}\".format(repr(n), repr(x))\n trial.append((n, x))\n except AttributeError:\n raise TypeError(\"fields must be a dict from strings to Schemas; {0} is not a dict\".format(repr(fields)))\n except AssertionError as err:\n raise TypeError(err.message)\n self._fields = OrderedDict(start + trial)\n\n def __getitem__(self, index):\n return self._fields[index]\n\n def __setitem__(self, index, value):\n if not isinstance(index, basestring):\n raise TypeError(\"field keys must be strings, not {0}\".format(repr(index)))\n if isinstance(value, basestring):\n value = Primitive(value)\n if not isinstance(value, Schema):\n raise TypeError(\"field values must be Schemas, not {0}\".format(repr(value)))\n self._fields[index] = value\n\n def __delitem__(self, index):\n del self._fields[index]\n\n def _hasarraynames(self, memo):\n if id(self) in memo:\n return True\n else:\n memo.add(id(self))\n return (not self._nullable or self._mask is not None) and all(x._hasarraynames(memo) for x in self._fields.values())\n\n def __repr__(self, labels=None, shown=None, indent=None):\n eq = \"=\" if indent is None else \" = \"\n\n if labels is None:\n labels = self._labels()\n shown = set()\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n\n args = []\n if indent is None:\n args.append(\"{\" + \", \".join(\"{0}: {1}\".format(repr(n), x.__repr__(labels, shown, indent)) for n, x in self._fields.items()) + \"}\")\n if self._nullable is not False:\n args.append(\"nullable\" + eq + repr(self._nullable))\n if self._mask is not None:\n args.append(\"mask\" + eq + repr(self._mask))\n if self._namespace != \"\":\n args.append(\"namespace\" + eq + repr(self._namespace))\n if self._packing is not None:\n args.append(\"packing\" + eq + repr(self._packing))\n if self._name is not None:\n args.append(\"name\" + eq + repr(self._name))\n if self._doc is not None:\n args.append(\"doc\" + eq + repr(self._doc))\n if self._metadata is not None:\n args.append(\"metadata\" + eq + repr(self._metadata))\n\n if indent is None:\n argstr = \", \".join(args)\n else:\n args.append(\"fields\" + eq + \"{\\n\" + indent + \" \" + (\",\\n\" + indent + \" \").join(\"{0}: {1}\".format(repr(n), x.__repr__(labels, shown, indent + \" \").lstrip()) for n, x in self._fields.items()) + \"\\n\" + indent + \" }\")\n args[0] = \"\\n\" + indent + \" \" + args[0]\n argstr = (\",\" + \"\\n\" + indent + \" \").join(args)\n\n if label is None:\n return \"Record(\" + argstr + \")\"\n else:\n return label + \": Record(\" + argstr + \")\"\n\n else:\n return label\n\n def _tojson(self, explicit, labels, shown):\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n out = {\"type\": \"record\", \"fields\": [[n, x._tojson(explicit, labels, shown)] for n, x in self._fields.items()]}\n if explicit or self._nullable is not False:\n out[\"nullable\"] = self._nullable\n if explicit or self._mask is not None:\n out[\"mask\"] = self._mask\n if explicit or self._namespace != \"\":\n out[\"namespace\"] = self._namespace\n if explicit or self._packing is not None:\n out[\"packing\"] = self._packingtojson()\n if explicit or self._name is not None:\n out[\"name\"] = self._name\n if explicit or self._doc is not None:\n out[\"doc\"] = self._doc\n if explicit or self._metadata is not None:\n out[\"metadata\"] = oamap.util.python2json(self._metadata)\n if explicit or label is not None:\n out[\"label\"] = label\n return out\n else:\n return label\n\n @staticmethod\n def _fromjson(data, labels):\n if \"fields\" not in data:\n raise TypeError(\"Record Schema from JSON is missing argument 'fields'\")\n out = Record.__new__(Record)\n if isinstance(data[\"fields\"], list) and all(len(x) == 2 and isinstance(x[0], basestring) for x in data[\"fields\"]):\n out._fields = OrderedDict((n, Schema._fromjson(x, labels)) for n, x in data[\"fields\"])\n elif isinstance(data[\"fields\"], dict) and all(isinstance(x, basestring) for x in data[\"fields\"]):\n out._fields = OrderedDict((n, Schema._fromjson(data[\"fields\"][n], labels)) for n in sorted(data[\"fields\"]))\n else:\n raise TypeError(\"argument 'fields' for Record Schema from JSON should be a list or dict of key-value pairs (in which the keys are strings), not {0}\".format(repr(data[\"fields\"])))\n out.nullable = data.get(\"nullable\", False)\n out.mask = data.get(\"mask\", None)\n out.namespace = data.get(\"namespace\", \"\")\n out.packing = Schema._packingfromjson(data.get(\"packing\", None))\n out.name = data.get(\"name\", None)\n out.doc = data.get(\"doc\", None)\n out.metadata = oamap.util.json2python(data.get(\"metadata\", None))\n if \"label\" in data:\n labels[data[\"label\"]] = out\n return out\n\n def _finalizefromjson(self, labels):\n for n in list(self._fields.keys()):\n if isinstance(self._fields[n], basestring):\n if self._fields[n] not in labels:\n raise TypeError(\"unresolved label: {0}\".format(repr(self._fields[n])))\n self._fields[n] = labels[self._fields[n]]\n else:\n self._fields[n]._finalizefromjson(labels)\n\n def _collectlabels(self, collection, labels):\n if id(self) not in collection:\n collection.add(id(self))\n for field in self._fields.values():\n field._collectlabels(collection, labels)\n else:\n labels.append(self)\n\n def copy(self, **replacements):\n if \"fields\" not in replacements:\n replacements[\"fields\"] = self._fields\n if \"nullable\" not in replacements:\n replacements[\"nullable\"] = self._nullable\n if \"mask\" not in replacements:\n replacements[\"mask\"] = self._mask\n if \"namespace\" not in replacements:\n replacements[\"namespace\"] = self._namespace\n if \"packing\" not in replacements:\n replacements[\"packing\"] = self._packing\n if \"name\" not in replacements:\n replacements[\"name\"] = self._name\n if \"doc\" not in replacements:\n replacements[\"doc\"] = self._doc\n if \"metadata\" not in replacements:\n replacements[\"metadata\"] = self._metadata\n return Record(**replacements)\n\n def _replace(self, fcn, args, kwds, memo):\n return fcn(Record(OrderedDict((n, x._replace(fcn, args, kwds, memo)) for n, x in self._fields.items()), nullable=self._nullable, mask=self._mask, namespace=self._namespace, packing=self._packingcopy(), name=self._name, doc=self._doc, metadata=copy.deepcopy(self._metadata)), *args, **kwds)\n\n def _path(self, loc, path, parents, allowtop, memo):\n nodes = None\n for nodes in Schema._path(self, loc, path, parents, allowtop, memo):\n yield nodes\n if nodes is None:\n for n, x in self._fields.items():\n for nodes in x._path(loc + (n,), path, (self,) + parents, True, memo):\n yield nodes\n\n def _nodes(self, loc, bottomup, memo):\n if bottomup:\n for field in self._fields.values():\n for x in field._nodes((self,) + loc, bottomup, memo):\n yield x\n yield (self,) + loc\n if not bottomup:\n for field in self._fields.values():\n for x in field._nodes((self,) + loc, bottomup, memo):\n yield x\n\n def _keep(self, loc, paths, project, memo):\n fields = OrderedDict()\n for n, x in self._fields.items():\n if any(fnmatch.fnmatchcase(\"/\".join(loc + (n,)), p) for p in paths):\n fields[n] = x\n elif any(fnmatch.fnmatchcase(\"/\".join(loc + (n,)), \"/\".join(p.split(\"/\")[:len(loc) + 1])) for p in paths):\n f = x._keep(loc + (n,), paths, project, memo)\n if f is not None:\n fields[n] = f\n if len(fields) == 0:\n return None\n elif project and len(fields) == 1:\n out, = fields.values()\n return out\n else:\n return self.copy(fields=fields)\n\n def _drop(self, loc, paths, memo):\n fields = OrderedDict()\n for n, x in self._fields.items():\n if not any(fnmatch.fnmatchcase(\"/\".join(loc + (n,)), p) for p in paths):\n f = x._drop(loc + (n,), paths, memo)\n if f is not None:\n fields[n] = f\n if len(fields) == 0:\n return None\n else:\n return self.copy(fields=fields)\n\n def _contains(self, schema, memo):\n if self == schema:\n return True\n else:\n return any(x._contains(schema, memo) for x in self._fields.values())\n\n def __hash__(self):\n return hash((Record, tuple(self._fields.items()), self._nullable, self._mask, self._namespace, self._packing, self._name, self._doc, oamap.util.python2hashable(self._metadata)))\n\n def __eq__(self, other, memo=None):\n if memo is None:\n memo = {}\n if id(self) in memo:\n return memo[id(self)] == id(other)\n if not (isinstance(other, Record) and set(self._fields) == set(other._fields) and self._nullable == other._nullable and self._mask == other._mask and self._namespace == other._namespace and self._packing == other._packing and self._name == other._name and self._doc == other._doc and self._metadata == other._metadata):\n return False\n memo[id(self)] = id(other)\n return all(self._fields[n].__eq__(other._fields[n], memo) for n in self._fields)\n\n def __contains__(self, value, memo=None):\n if memo is None:\n memo = {}\n if value is None:\n return self.nullable\n if isinstance(value, dict):\n return all(n in value and x.__contains__(value[n], memo) for n, x in self._fields.items())\n elif isinstance(value, tuple) and hasattr(value, \"_fields\"):\n return all(n in value._fields and x.__contains__(getattr(value, n), memo) for n, x in self._fields.items())\n elif isinstance(value, (list, tuple)):\n return False\n else:\n return all(hasattr(value, n) and x.__contains__(getattr(value, n), memo) for n, x in self._fields.items())\n\n def _get_field(self, prefix, delimiter, n):\n return self._get_name(prefix, delimiter) + delimiter + \"F\" + n\n\n def _generator(self, prefix, delimiter, cacheidx, memo, nesting, extension, packing):\n if len(self._fields) == 0:\n raise TypeError(\"Record has no fields\")\n if id(self) in nesting:\n raise TypeError(\"types may not be defined in terms of themselves:\\n\\n {0}\".format(repr(self)))\n args = []\n\n if self._nullable:\n cls = oamap.generator.MaskedRecordGenerator\n args.append(self._get_mask(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n else:\n cls = oamap.generator.RecordGenerator\n\n fieldsgen = OrderedDict([(n, self._fields[n]._generator(self._get_field(prefix, delimiter, n), delimiter, cacheidx, memo, nesting.union(set([id(self)])), extension, packing)) for n in sorted(self._fields)])\n args.append(fieldsgen)\n args.append(self._namespace)\n args.append(self._packingcopy(packing))\n args.append(self._name)\n args.append(prefix)\n args.append(self.copy(fields=OrderedDict((n, x.schema) for n, x in fieldsgen.items()), packing=None))\n\n for ext in extension:\n if ext.matches(self):\n args.insert(0, cls)\n cls = ext\n break\n\n memo[id(self)] = cls(*args)\n return memo[id(self)]\n\n################################################################ Tuples are like records but with an order instead of field names\n\nclass Tuple(Schema):\n def __init__(self, types, nullable=False, mask=None, namespace=\"\", packing=None, name=None, doc=None, metadata=None):\n self.types = types\n self.nullable = nullable\n self.mask = mask\n self.namespace = namespace\n self.packing = packing\n self.name = name\n self.doc = doc\n self.metadata = metadata\n\n @property\n def types(self):\n return tuple(self._types)\n\n @types.setter\n def types(self, value):\n self._extend(value, [])\n\n def _extend(self, types, start):\n trial = []\n try:\n for i, x in enumerate(types):\n if isinstance(x, basestring):\n x = Primitive(x)\n assert isinstance(x, Schema), \"types must be an iterable of Schemas; item at {0} is {1}\".format(i, repr(x))\n trial.append(x)\n except TypeError:\n raise TypeError(\"types must be an iterable of Schemas, not {0}\".format(repr(types)))\n except AssertionError as err:\n raise TypeError(err.message)\n self._types = start + trial\n\n def append(self, item):\n if isinstance(item, basestring):\n item = Primitive(item)\n if not isinstance(item, Schema):\n raise TypeError(\"types must be Schemas, not {0}\".format(repr(item)))\n self._types.append(item)\n\n def insert(self, index, item):\n if isinstance(item, basestring):\n item = Primitive(item)\n if not isinstance(item, Schema):\n raise TypeError(\"types must be Schemas, not {0}\".format(repr(item)))\n self._types.insert(index, item)\n\n def extend(self, types):\n self._extend(types, self._types)\n\n def __getitem__(self, index):\n return self._types[index]\n\n def __setitem__(self, index, value):\n if not isinstance(index, (numbers.Integral, numpy.integer)):\n raise TypeError(\"types index must be an integer, not {0}\".format(repr(index)))\n if isinstance(value, basestring):\n value = Primitive(value)\n if not isinstance(item, Schema):\n raise TypeError(\"types must be Schemas, not {0}\".format(repr(value)))\n self._types[index] = value\n\n def _hasarraynames(self, memo):\n if id(self) in memo:\n return True\n else:\n memo.add(id(self))\n return (not self._nullable or self._mask is not None) and all(x._hasarraynames(memo) for x in self._types)\n\n def __repr__(self, labels=None, shown=None, indent=None):\n eq = \"=\" if indent is None else \" = \"\n\n if labels is None:\n labels = self._labels()\n shown = set()\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n\n args = []\n if indent is None:\n args.append(\"[\" + \", \".join(x.__repr__(labels, shown) for x in self._types) + \"]\")\n if self._nullable is not False:\n args.append(\"nullable\" + eq + repr(self._nullable))\n if self._mask is not None:\n args.append(\"mask\" + eq + repr(self._mask))\n if self._namespace != \"\":\n args.append(\"namespace\" + eq + repr(self._namespace))\n if self._packing is not None:\n args.append(\"packing\" + eq + repr(self._packing))\n if self._name is not None:\n args.append(\"name\" + eq + repr(self._name))\n if self._doc is not None:\n args.append(\"doc\" + eq + repr(self._doc))\n if self._metadata is not None:\n args.append(\"metadata\" + eq + repr(self._metadata))\n\n if indent is None:\n argstr = \", \".join(args)\n else:\n args.append(\"types\" + eq + \"[\\n\" + indent + \" \" + (\",\\n\" + indent + \" \").join(x.__repr__(labels, shown, indent + \" \").lstrip() for x in self._types) + \"\\n\" + indent + \" ]\")\n args[0] = \"\\n\" + indent + \" \" + args[0]\n argstr = (\",\" + \"\\n\" + indent + \" \").join(args)\n\n if label is None:\n return \"Tuple(\" + argstr + \")\"\n else:\n return label + \": Tuple(\" + argstr + \")\"\n\n return label\n\n def _tojson(self, explicit, labels, shown):\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n out = {\"type\": \"tuple\", \"types\": [x._tojson(explicit, labels, shown) for x in self._types]}\n if explicit or self._nullable is not False:\n out[\"nullable\"] = self._nullable\n if explicit or self._mask is not None:\n out[\"mask\"] = self._mask\n if explicit or self._namespace != \"\":\n out[\"namespace\"] = self._namespace\n if explicit or self._packing is not None:\n out[\"packing\"] = self._packingtojson()\n if explicit or self._name is not None:\n out[\"name\"] = self._name\n if explicit or self._doc is not None:\n out[\"doc\"] = self._doc\n if explicit or self._metadata is not None:\n out[\"metadata\"] = oamap.util.python2json(self._metadata)\n if explicit or label is not None:\n out[\"label\"] = label\n return out\n else:\n return label\n\n @staticmethod\n def _fromjson(data, labels):\n if \"types\" not in data:\n raise TypeError(\"Tuple Schema from JSON is missing argument 'types'\")\n if not isinstance(data[\"types\"], list):\n raise TypeError(\"argument 'types' for Tuple Schema from JSON should be a list, not {0}\".format(repr(data[\"types\"])))\n out = Tuple.__new__(Tuple)\n out._types = [Schema._fromjson(x, labels) for x in data[\"types\"]]\n out.nullable = data.get(\"nullable\", False)\n out.mask = data.get(\"mask\", None)\n out.namespace = data.get(\"namespace\", \"\")\n out.packing = Schema._packingfromjson(data.get(\"packing\", None))\n out.name = data.get(\"name\", None)\n out.doc = data.get(\"doc\", None)\n out.metadata = oamap.util.json2python(data.get(\"metadata\", None))\n if \"label\" in data:\n labels[data[\"label\"]] = out\n return out\n\n def _finalizefromjson(self, labels):\n for i in range(len(self._types)):\n if isinstance(self._types[i], basestring):\n if self._types[i] not in labels:\n raise TypeError(\"unresolved label: {0}\".format(repr(self._types[i])))\n self._types[i] = labels[self._types[i]]\n else:\n self._types[i]._finalizefromjson(labels)\n\n def _collectlabels(self, collection, labels):\n if id(self) not in collection:\n collection.add(id(self))\n for item in self._types:\n item._collectlabels(collection, labels)\n else:\n labels.append(self)\n\n def copy(self, **replacements):\n if \"types\" not in replacements:\n replacements[\"types\"] = self._types\n if \"nullable\" not in replacements:\n replacements[\"nullable\"] = self._nullable\n if \"mask\" not in replacements:\n replacements[\"mask\"] = self._mask\n if \"namespace\" not in replacements:\n replacements[\"namespace\"] = self._namespace\n if \"packing\" not in replacements:\n replacements[\"packing\"] = self._packing\n if \"name\" not in replacements:\n replacements[\"name\"] = self._name\n if \"doc\" not in replacements:\n replacements[\"doc\"] = self._doc\n if \"metadata\" not in replacements:\n replacements[\"metadata\"] = self._metadata\n return Tuple(**replacements)\n\n def _replace(self, fcn, args, kwds, memo):\n return fcn(Tuple([x._replace(fcn, args, kwds, memo) for x in self._types], nullable=self._nullable, mask=self._mask, namespace=self._namespace, packing=self._packingcopy(), name=self._name, doc=self._doc, metadata=copy.deepcopy(self._metadata)), *args, **kwds)\n\n def _path(self, loc, path, parents, allowtop, memo):\n nodes = None\n for nodes in Schema._path(self, loc, path, parents, allowtop, memo):\n yield nodes\n if nodes is None:\n for i, x in enumerate(self._types):\n for nodes in x._path(loc + (str(i),), path, (self,) + parents, True, memo):\n yield nodes\n\n def _nodes(self, loc, bottomup, memo):\n if bottomup:\n for field in self._types:\n for x in field._nodes((self,) + loc, bottomup, memo):\n yield x\n yield (self,) + loc\n if not bottomup:\n for field in self._types:\n for x in field._nodes((self,) + loc, bottomup, memo):\n yield x\n\n def _keep(self, loc, paths, project, memo):\n types = []\n for i, x in enumerate(self._types):\n n = str(i)\n if any(fnmatch.fnmatchcase(\"/\".join(loc + (n,)), p) for p in paths):\n types.append(x)\n elif any(fnmatch.fnmatchcase(\"/\".join(loc + (n,)), \"/\".join(p.split(\"/\")[:len(loc) + 1])) for p in paths):\n f = x._keep(loc + (n,), paths, project, memo)\n if f is not None:\n types.append(f)\n if len(types) == 0:\n return None\n elif project and len(fields) == 1:\n out, = fields.values()\n return out\n else:\n return self.copy(types=types)\n\n def _drop(self, loc, paths, memo):\n types = []\n for i, x in enumerate(self._types):\n n = str(i)\n if not any(fnmatch.fnmatchcase(\"/\".join(loc + (n,)), p) for p in paths):\n f = x._drop(loc + (n,), paths, memo)\n if f is not None:\n types.append(f)\n if len(types) == 0:\n return None\n else:\n return self.copy(types=types)\n\n def _contains(self, schema, memo):\n if self == schema:\n return True\n else:\n return any(x._contains(schema, memo) for x in self._types)\n\n def __hash__(self):\n return hash((Tuple, self._types, self._nullable, self._mask, self._namespace, self._packing, self._name, self._doc, oamap.util.python2hashable(self._metadata)))\n\n def __eq__(self, other, memo=None):\n if memo is None:\n memo = {}\n if id(self) in memo:\n return memo[id(self)] == id(other)\n if not (isinstance(other, Tuple) and len(self._types) == len(other._types) and self._nullable == other._nullable and self._mask == other._mask and self._namespace == other._namespace and self._packing == other._packing and self._name == other._name and self._doc == other._doc and self._metadata == other._metadata):\n return False\n memo[id(self)] = id(other)\n return all(x.__eq__(y, memo) for x, y in zip(self._types, other._types))\n\n def __contains__(self, value, memo=None):\n if memo is None:\n memo = {}\n if value is None:\n return self.nullable\n if isinstance(value, tuple) and len(value) == len(self._types):\n return all(x.__contains__(v, memo) for v, x in zip(value, self._types))\n else:\n return False\n\n def _get_field(self, prefix, delimiter, i):\n return self._get_name(prefix, delimiter) + delimiter + \"F\" + repr(i)\n\n def _generator(self, prefix, delimiter, cacheidx, memo, nesting, extension, packing):\n if len(self._types) == 0:\n raise TypeError(\"Tuple has no types\")\n if id(self) in nesting:\n raise TypeError(\"types may not be defined in terms of themselves:\\n\\n {0}\".format(repr(self)))\n args = []\n\n if self._nullable:\n cls = oamap.generator.MaskedTupleGenerator\n args.append(self._get_mask(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n else:\n cls = oamap.generator.TupleGenerator\n\n typesgen = [x._generator(self._get_field(prefix, delimiter, i), delimiter, cacheidx, memo, nesting.union(set([id(self)])), extension, packing) for i, x in enumerate(self._types)]\n args.append(typesgen)\n args.append(self._namespace)\n args.append(self._packingcopy(packing))\n args.append(self._name)\n args.append(prefix)\n args.append(self.copy(types=[x.schema for x in typesgen], packing=None))\n\n for ext in extension:\n if ext.matches(self):\n args.insert(0, cls)\n cls = ext\n break\n\n memo[id(self)] = cls(*args)\n return memo[id(self)]\n\n################################################################ Pointers redirect to the contents of other types\n\nclass Pointer(Schema):\n def __init__(self, target, nullable=False, positions=None, mask=None, namespace=\"\", packing=None, name=None, doc=None, metadata=None):\n self.target = target\n self.nullable = nullable\n self.positions = positions\n self.mask = mask\n self.namespace = namespace\n self.packing = packing\n self.name = name\n self.doc = doc\n self.metadata = metadata\n\n @property\n def target(self):\n return self._target\n\n @target.setter\n def target(self, value):\n if isinstance(value, basestring):\n value = Primitive(value)\n if not (value is None or isinstance(value, Schema)):\n raise TypeError(\"target must be None or a Schema, not {0}\".format(repr(value)))\n if value is self:\n raise TypeError(\"Pointer may not point directly at itself (it would never resolve to a value)\")\n self._target = value\n\n @property\n def positions(self):\n return self._positions\n\n @positions.setter\n def positions(self, value):\n if not (value is None or isinstance(value, basestring)):\n raise TypeError(\"positions must be None or an array name (string), not {0}\".format(repr(value)))\n self._positions = value\n\n def _hasarraynames(self, memo):\n if id(self) in memo:\n return True\n else:\n memo.add(id(self))\n return self._positions is not None and (not self._nullable or self._mask is not None) and self._target._hasarraynames(memo)\n\n def __repr__(self, labels=None, shown=None, indent=None):\n eq = \"=\" if indent is None else \" = \"\n\n if labels is None:\n labels = self._labels()\n shown = set()\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n\n args = []\n if indent is None:\n if self._target is None:\n args.append(repr(None))\n else:\n args.append(self._target.__repr__(labels, shown, indent))\n if self._nullable is not False:\n args.append(\"nullable\" + eq + repr(self._nullable))\n if self._positions is not None:\n args.append(\"positions\" + eq + repr(self._positions))\n if self._mask is not None:\n args.append(\"mask\" + eq + repr(self._mask))\n if self._namespace != \"\":\n args.append(\"namespace\" + eq + repr(self._namespace))\n if self._packing is not None:\n args.append(\"packing\" + eq + repr(self._packing))\n if self._name is not None:\n args.append(\"name\" + eq + repr(self._name))\n if self._doc is not None:\n args.append(\"doc\" + eq + repr(self._doc))\n if self._metadata is not None:\n args.append(\"metadata\" + eq + repr(self._metadata))\n\n if indent is None:\n argstr = \", \".join(args)\n else:\n if self._target is None:\n args.append(\"target\" + eq + repr(None) + \"\\n\" + indent)\n else:\n args.append(\"target\" + eq + self._target.__repr__(labels, shown, indent + \" \").lstrip() + \"\\n\" + indent)\n args[0] = \"\\n\" + indent + \" \" + args[0]\n argstr = (\",\" + \"\\n\" + indent + \" \").join(args)\n \n if label is None:\n return \"Pointer(\" + argstr + \")\"\n else:\n return label + \": Pointer(\" + argstr + \")\"\n\n else:\n return label\n\n def _tojson(self, explicit, labels, shown):\n label = self._label(labels)\n\n if label is None or id(self) not in shown:\n shown.add(id(self))\n if self._target is None:\n raise TypeError(\"pointer target is still None; must be resolved before it can be stored\")\n out = {\"type\": \"pointer\", \"target\": self._target._tojson(explicit, labels, shown)}\n if explicit or self._nullable is not False:\n out[\"nullable\"] = self._nullable\n if explicit or self._positions is not None:\n out[\"positions\"] = self._positions\n if explicit or self._mask is not None:\n out[\"mask\"] = self._mask\n if explicit or self._namespace != \"\":\n out[\"namespace\"] = self._namespace\n if explicit or self._packing is not None:\n out[\"packing\"] = self._packingtojson()\n if explicit or self._name is not None:\n out[\"name\"] = self._name\n if explicit or self._doc is not None:\n out[\"doc\"] = self._doc\n if explicit or self._metadata is not None:\n out[\"metadata\"] = oamap.util.python2json(self._metadata)\n if explicit or label is not None:\n out[\"label\"] = label\n return out\n else:\n return label\n\n @staticmethod\n def _fromjson(data, labels):\n if \"target\" not in data:\n raise TypeError(\"Pointer Schema from JSON is missing argument 'target'\")\n out = Pointer.__new__(Pointer)\n out._target = Schema._fromjson(data[\"target\"], labels)\n out.nullable = data.get(\"nullable\", False)\n out.positions = data.get(\"positions\", None)\n out.mask = data.get(\"mask\", None)\n out.namespace = data.get(\"namespace\", \"\")\n out.packing = Schema._packingfromjson(data.get(\"packing\", None))\n out.name = data.get(\"name\", None)\n out.doc = data.get(\"doc\", None)\n out.metadata = oamap.util.json2python(data.get(\"metadata\", None))\n if \"label\" in data:\n labels[data[\"label\"]] = out\n return out\n\n def _finalizefromjson(self, labels):\n if isinstance(self._target, basestring):\n if self._target not in labels:\n raise TypeError(\"unresolved label: {0}\".format(repr(self._target)))\n self._target = labels[self._target]\n else:\n self._target._finalizefromjson(labels)\n\n def _collectlabels(self, collection, labels):\n if id(self) not in collection:\n collection.add(id(self))\n if self._target is not None:\n self._target._collectlabels(collection, labels)\n else:\n labels.append(self)\n\n def copy(self, **replacements):\n if \"target\" not in replacements:\n replacements[\"target\"] = self._target\n if \"nullable\" not in replacements:\n replacements[\"nullable\"] = self._nullable\n if \"positions\" not in replacements:\n replacements[\"positions\"] = self._positions\n if \"mask\" not in replacements:\n replacements[\"mask\"] = self._mask\n if \"namespace\" not in replacements:\n replacements[\"namespace\"] = self._namespace\n if \"packing\" not in replacements:\n replacements[\"packing\"] = self._packing\n if \"name\" not in replacements:\n replacements[\"name\"] = self._name\n if \"doc\" not in replacements:\n replacements[\"doc\"] = self._doc\n if \"metadata\" not in replacements:\n replacements[\"metadata\"] = self._metadata\n return Pointer(**replacements)\n\n def _replace(self, fcn, args, kwds, memo):\n if id(self) in memo:\n return fcn(memo[id(self)], *args, **kwds)\n memo[id(self)] = Pointer(None, nullable=self._nullable, positions=self._positions, mask=self._mask, namespace=self._namespace, packing=self._packingcopy(), name=self._name, doc=self._doc, metadata=copy.deepcopy(self._metadata))\n memo[id(self)]._target = self._target._replace(fcn, args, kwds, memo)\n return fcn(memo[id(self)], *args, **kwds)\n\n def _path(self, loc, path, parents, allowtop, memo):\n nodes = None\n for nodes in Schema._path(self, loc, path, parents, allowtop, memo):\n yield nodes\n if nodes is None:\n if id(self) not in memo:\n memo.add(id(self))\n for nodes in self._target._path(loc, path, (self,) + parents, allowtop, memo):\n yield nodes\n\n def _nodes(self, loc, bottomup, memo):\n if id(self) not in memo:\n memo.add(id(self))\n if bottomup:\n for x in self._target._nodes((self,) + loc, bottomup, memo):\n yield x\n yield (self,) + loc\n if not bottomup:\n for x in self._target._nodes((self,) + loc, bottomup, memo):\n yield x\n\n def _keep(self, loc, paths, project, memo):\n if id(self) in memo:\n return memo[id(self)]\n memo[id(self)] = self.copy(target=None)\n target = self._target._keep(loc, paths, project, memo)\n if target is None:\n return None\n else:\n memo[id(self)]._target = target\n return memo[id(self)]\n\n def _drop(self, loc, paths, memo):\n if id(self) in memo:\n return memo[id(self)]\n memo[id(self)] = self.copy(target=None)\n target = self._target._drop(loc, paths, memo)\n if target is None:\n return None\n else:\n memo[id(self)]._target = target\n return memo[id(self)]\n\n def _contains(self, schema, memo):\n if id(self) in memo:\n return False\n memo.add(id(self))\n if self == schema:\n return True\n else:\n return self._target._contains(schema, memo)\n\n def __hash__(self):\n return hash((Pointer, self._target, self._nullable, self._positions, self._mask, self._namespace, self._packing, self._name, self._doc, oamap.util.python2hashable(self._metadata)))\n\n def __eq__(self, other, memo=None):\n if memo is None:\n memo = {}\n if id(self) in memo:\n return memo[id(self)] == id(other)\n if not (isinstance(other, Pointer) and self._nullable == other._nullable and self._positions == other._positions and self._mask == other._mask and self._namespace == other._namespace and self._packing == other._packing and self._name == other._name and self._doc == other._doc and self._metadata == other._metadata):\n return False\n memo[id(self)] = id(other)\n return self.target.__eq__(other.target, memo)\n\n def __contains__(self, value, memo=None):\n if memo is None:\n memo = {}\n if id(value) in memo:\n return memo[id(value)] == id(self)\n memo[id(value)] = id(self)\n if value is None:\n return self._nullable\n return self.target.__contains__(value, memo)\n\n def _get_positions(self, prefix, delimiter):\n if self._positions is None:\n return self._get_name(prefix, delimiter) + delimiter + \"P\"\n else:\n return self._positions\n\n def _get_external(self, prefix, delimiter):\n return self._get_name(prefix, delimiter) + delimiter + \"X\"\n\n def _generator(self, prefix, delimiter, cacheidx, memo, nesting, extension, packing):\n if self._target is None:\n raise TypeError(\"when creating a Pointer type from a Pointer schema, target must be set to a value other than None\")\n args = []\n\n if self._nullable:\n cls = oamap.generator.MaskedPointerGenerator\n args.append(self._get_mask(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n else:\n cls = oamap.generator.PointerGenerator\n\n args.append(self._get_positions(prefix, delimiter))\n args.append(cacheidx[0]); cacheidx[0] += 1\n\n args.append((self._target, prefix, delimiter)) # placeholder! see _finalizegenerator!\n args.append(self._namespace)\n args.append(self._packingcopy(packing))\n args.append(self._name)\n args.append(prefix)\n args.append(self.copy(packing=None))\n\n for ext in extension:\n if ext.matches(self):\n args.insert(0, cls)\n cls = ext\n break\n\n memo[id(self)] = cls(*args)\n return memo[id(self)]\n"
] | [
[
"numpy.dtype",
"numpy.iinfo"
]
] |
domoritz/solas | [
"23878fed9efbf14781791dafec26705c6762cfd1"
] | [
"tests/test_maintainence.py"
] | [
"# Copyright 2019-2020 The Solas Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom .context import solas\nimport pytest\nimport pandas as pd\nfrom solas.vis.Vis import Vis\n\n\ndef test_metadata_subsequent_display(global_var):\n df = pytest.car_df\n df._ipython_display_()\n assert df._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n df._ipython_display_()\n assert df._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n\n\ndef test_metadata_subsequent_vis(global_var):\n df = pytest.car_df\n df._ipython_display_()\n assert df._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n vis = Vis([\"Acceleration\", \"Horsepower\"], df)\n assert df._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n\n\ndef test_metadata_inplace_operation(global_var):\n df = pytest.car_df\n df._ipython_display_()\n assert df._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n df.dropna(inplace=True)\n assert df._metadata_fresh == False, \"Failed to expire metadata after in-place Pandas operation\"\n\n\ndef test_metadata_new_df_operation(global_var):\n df = pytest.car_df\n df._ipython_display_()\n assert df._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n df[[\"MilesPerGal\", \"Acceleration\"]]\n assert df._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n df2 = df[[\"MilesPerGal\", \"Acceleration\"]]\n assert not hasattr(df2, \"_metadata_fresh\")\n\n\ndef test_metadata_column_group_reset_df(global_var):\n df = pd.read_csv(\"solas/data/car.csv\")\n assert not hasattr(df, \"_metadata_fresh\")\n df[\"Year\"] = pd.to_datetime(df[\"Year\"], format=\"%Y\")\n assert hasattr(df, \"_metadata_fresh\")\n result = df.groupby(\"Cylinders\").mean()\n assert not hasattr(result, \"_metadata_fresh\")\n # Note that this should trigger two compute metadata (one for df, and one for an intermediate df.reset_index used to feed inside created Vis)\n result._ipython_display_()\n assert result._metadata_fresh == True, \"Failed to maintain metadata after display df\"\n\n colgroup_recs = result.recommendation[\"Column Groups\"]\n assert len(colgroup_recs) == 5\n for rec in colgroup_recs:\n assert rec.mark == \"bar\", \"Column Group not displaying bar charts\"\n\n\ndef test_recs_inplace_operation(global_var):\n df = pytest.college_df\n df._ipython_display_()\n assert df._recs_fresh == True, \"Failed to maintain recommendation after display df\"\n assert len(df.recommendation[\"Occurrence\"]) == 6\n df.drop(columns=[\"Name\"], inplace=True)\n assert \"Name\" not in df.columns, \"Failed to perform `drop` operation in-place\"\n assert df._recs_fresh == False, \"Failed to maintain recommendation after in-place Pandas operation\"\n df._ipython_display_()\n assert len(df.recommendation[\"Occurrence\"]) == 5\n assert df._recs_fresh == True, \"Failed to maintain recommendation after display df\"\n\n\ndef test_intent_cleared_after_vis_data():\n df = pd.read_csv(\n \"https://github.com/lux/solas-datasets/blob/master/data/real_estate_tutorial.csv?raw=true\"\n )\n df[\"Month\"] = pd.to_datetime(df[\"Month\"], format=\"%m\")\n df[\"Year\"] = pd.to_datetime(df[\"Year\"], format=\"%Y\")\n df.intent = [\n solas.Clause(\"Year\"),\n solas.Clause(\"PctForeclosured\"),\n solas.Clause(\"City=Crofton\"),\n ]\n df._ipython_display_()\n\n vis = df.recommendation[\"Similarity\"][0]\n visdata = vis.data\n visdata.data_type[\"PctForeclosured\"] = \"quantitative\"\n # otherwise because of the small size of the dataframe, the cardinality of PctForeclosured is less than 20\n # and thereby this attribute will be considered as nominal\n visdata._ipython_display_()\n all_column_vis = visdata.current_vis[0]\n assert all_column_vis.get_attr_by_channel(\"x\")[0].attribute == \"Year\"\n assert all_column_vis.get_attr_by_channel(\"y\")[0].attribute == \"PctForeclosured\"\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime"
]
] |
yetyetanotherusername/zarr-python | [
"e3cdd1a50e1047304be2c91a017da6362f8df533"
] | [
"zarr/core.py"
] | [
"import binascii\nimport hashlib\nimport itertools\nimport math\nimport operator\nimport re\nfrom functools import reduce\n\nimport numpy as np\nfrom numcodecs.compat import ensure_bytes, ensure_ndarray\n\nfrom zarr.attrs import Attributes\nfrom zarr.codecs import AsType, get_codec\nfrom zarr.errors import ArrayNotFoundError, ReadOnlyError\nfrom zarr.indexing import (BasicIndexer, CoordinateIndexer, MaskIndexer,\n OIndex, OrthogonalIndexer, VIndex, check_fields,\n check_no_multi_fields, ensure_tuple,\n err_too_many_indices, is_contiguous_selection,\n is_scalar, pop_fields)\nfrom zarr.meta import decode_array_metadata, encode_array_metadata\nfrom zarr.storage import array_meta_key, attrs_key, getsize, listdir\nfrom zarr.util import (InfoReporter, check_array_shape, human_readable_size,\n is_total_slice, nolock, normalize_chunks,\n normalize_resize_args, normalize_shape,\n normalize_storage_path)\n\n\n# noinspection PyUnresolvedReferences\nclass Array(object):\n \"\"\"Instantiate an array from an initialized store.\n\n Parameters\n ----------\n store : MutableMapping\n Array store, already initialized.\n path : string, optional\n Storage path.\n read_only : bool, optional\n True if array should be protected against modification.\n chunk_store : MutableMapping, optional\n Separate storage for chunks. If not provided, `store` will be used\n for storage of both chunks and metadata.\n synchronizer : object, optional\n Array synchronizer.\n cache_metadata : bool, optional\n If True (default), array configuration metadata will be cached for the\n lifetime of the object. If False, array metadata will be reloaded\n prior to all data access and modification operations (may incur\n overhead depending on storage and data access pattern).\n cache_attrs : bool, optional\n If True (default), user attributes will be cached for attribute read\n operations. If False, user attributes are reloaded from the store prior\n to all attribute read operations.\n\n Attributes\n ----------\n store\n path\n name\n read_only\n chunk_store\n shape\n chunks\n dtype\n compression\n compression_opts\n fill_value\n order\n synchronizer\n filters\n attrs\n size\n itemsize\n nbytes\n nbytes_stored\n cdata_shape\n nchunks\n nchunks_initialized\n is_view\n info\n vindex\n oindex\n\n Methods\n -------\n __getitem__\n __setitem__\n get_basic_selection\n set_basic_selection\n get_orthogonal_selection\n set_orthogonal_selection\n get_mask_selection\n set_mask_selection\n get_coordinate_selection\n set_coordinate_selection\n digest\n hexdigest\n resize\n append\n view\n astype\n\n \"\"\"\n\n def __init__(self, store, path=None, read_only=False, chunk_store=None,\n synchronizer=None, cache_metadata=True, cache_attrs=True):\n # N.B., expect at this point store is fully initialized with all\n # configuration metadata fully specified and normalized\n\n self._store = store\n self._chunk_store = chunk_store\n self._path = normalize_storage_path(path)\n if self._path:\n self._key_prefix = self._path + '/'\n else:\n self._key_prefix = ''\n self._read_only = bool(read_only)\n self._synchronizer = synchronizer\n self._cache_metadata = cache_metadata\n self._is_view = False\n\n # initialize metadata\n self._load_metadata()\n\n # initialize attributes\n akey = self._key_prefix + attrs_key\n self._attrs = Attributes(store, key=akey, read_only=read_only,\n synchronizer=synchronizer, cache=cache_attrs)\n\n # initialize info reporter\n self._info_reporter = InfoReporter(self)\n\n # initialize indexing helpers\n self._oindex = OIndex(self)\n self._vindex = VIndex(self)\n\n def _load_metadata(self):\n \"\"\"(Re)load metadata from store.\"\"\"\n if self._synchronizer is None:\n self._load_metadata_nosync()\n else:\n mkey = self._key_prefix + array_meta_key\n with self._synchronizer[mkey]:\n self._load_metadata_nosync()\n\n def _load_metadata_nosync(self):\n try:\n mkey = self._key_prefix + array_meta_key\n meta_bytes = self._store[mkey]\n except KeyError:\n raise ArrayNotFoundError(self._path)\n else:\n\n # decode and store metadata as instance members\n meta = decode_array_metadata(meta_bytes)\n self._meta = meta\n self._shape = meta['shape']\n self._chunks = meta['chunks']\n self._dtype = meta['dtype']\n self._fill_value = meta['fill_value']\n self._order = meta['order']\n\n # setup compressor\n config = meta['compressor']\n if config is None:\n self._compressor = None\n else:\n self._compressor = get_codec(config)\n\n # setup filters\n filters = meta['filters']\n if filters:\n filters = [get_codec(config) for config in filters]\n self._filters = filters\n\n def _refresh_metadata(self):\n if not self._cache_metadata:\n self._load_metadata()\n\n def _refresh_metadata_nosync(self):\n if not self._cache_metadata and not self._is_view:\n self._load_metadata_nosync()\n\n def _flush_metadata_nosync(self):\n if self._is_view:\n raise PermissionError('operation not permitted for views')\n\n if self._compressor:\n compressor_config = self._compressor.get_config()\n else:\n compressor_config = None\n if self._filters:\n filters_config = [f.get_config() for f in self._filters]\n else:\n filters_config = None\n meta = dict(shape=self._shape, chunks=self._chunks, dtype=self._dtype,\n compressor=compressor_config, fill_value=self._fill_value,\n order=self._order, filters=filters_config)\n mkey = self._key_prefix + array_meta_key\n self._store[mkey] = encode_array_metadata(meta)\n\n @property\n def store(self):\n \"\"\"A MutableMapping providing the underlying storage for the array.\"\"\"\n return self._store\n\n @property\n def path(self):\n \"\"\"Storage path.\"\"\"\n return self._path\n\n @property\n def name(self):\n \"\"\"Array name following h5py convention.\"\"\"\n if self.path:\n # follow h5py convention: add leading slash\n name = self.path\n if name[0] != '/':\n name = '/' + name\n return name\n return None\n\n @property\n def basename(self):\n \"\"\"Final component of name.\"\"\"\n if self.name is not None:\n return self.name.split('/')[-1]\n return None\n\n @property\n def read_only(self):\n \"\"\"A boolean, True if modification operations are not permitted.\"\"\"\n return self._read_only\n\n @read_only.setter\n def read_only(self, value):\n self._read_only = bool(value)\n\n @property\n def chunk_store(self):\n \"\"\"A MutableMapping providing the underlying storage for array chunks.\"\"\"\n if self._chunk_store is None:\n return self._store\n else:\n return self._chunk_store\n\n @property\n def shape(self):\n \"\"\"A tuple of integers describing the length of each dimension of\n the array.\"\"\"\n # N.B., shape may change if array is resized, hence need to refresh\n # metadata\n self._refresh_metadata()\n return self._shape\n\n @shape.setter\n def shape(self, value):\n self.resize(value)\n\n @property\n def chunks(self):\n \"\"\"A tuple of integers describing the length of each dimension of a\n chunk of the array.\"\"\"\n return self._chunks\n\n @property\n def dtype(self):\n \"\"\"The NumPy data type.\"\"\"\n return self._dtype\n\n @property\n def compressor(self):\n \"\"\"Primary compression codec.\"\"\"\n return self._compressor\n\n @property\n def fill_value(self):\n \"\"\"A value used for uninitialized portions of the array.\"\"\"\n return self._fill_value\n\n @property\n def order(self):\n \"\"\"A string indicating the order in which bytes are arranged within\n chunks of the array.\"\"\"\n return self._order\n\n @property\n def filters(self):\n \"\"\"One or more codecs used to transform data prior to compression.\"\"\"\n return self._filters\n\n @property\n def synchronizer(self):\n \"\"\"Object used to synchronize write access to the array.\"\"\"\n return self._synchronizer\n\n @property\n def attrs(self):\n \"\"\"A MutableMapping containing user-defined attributes. Note that\n attribute values must be JSON serializable.\"\"\"\n return self._attrs\n\n @property\n def ndim(self):\n \"\"\"Number of dimensions.\"\"\"\n return len(self.shape)\n\n @property\n def _size(self):\n return reduce(operator.mul, self._shape, 1)\n\n @property\n def size(self):\n \"\"\"The total number of elements in the array.\"\"\"\n # N.B., this property depends on shape, and shape may change if array\n # is resized, hence need to refresh metadata\n self._refresh_metadata()\n return self._size\n\n @property\n def itemsize(self):\n \"\"\"The size in bytes of each item in the array.\"\"\"\n return self.dtype.itemsize\n\n @property\n def _nbytes(self):\n return self._size * self.itemsize\n\n @property\n def nbytes(self):\n \"\"\"The total number of bytes that would be required to store the\n array without compression.\"\"\"\n # N.B., this property depends on shape, and shape may change if array\n # is resized, hence need to refresh metadata\n self._refresh_metadata()\n return self._nbytes\n\n @property\n def nbytes_stored(self):\n \"\"\"The total number of stored bytes of data for the array. This\n includes storage required for configuration metadata and user\n attributes.\"\"\"\n m = getsize(self._store, self._path)\n if self._chunk_store is None:\n return m\n else:\n n = getsize(self._chunk_store, self._path)\n if m < 0 or n < 0:\n return -1\n else:\n return m + n\n\n @property\n def _cdata_shape(self):\n if self._shape == ():\n return 1,\n else:\n return tuple(math.ceil(s / c)\n for s, c in zip(self._shape, self._chunks))\n\n @property\n def cdata_shape(self):\n \"\"\"A tuple of integers describing the number of chunks along each\n dimension of the array.\"\"\"\n self._refresh_metadata()\n return self._cdata_shape\n\n @property\n def _nchunks(self):\n return reduce(operator.mul, self._cdata_shape, 1)\n\n @property\n def nchunks(self):\n \"\"\"Total number of chunks.\"\"\"\n self._refresh_metadata()\n return self._nchunks\n\n @property\n def nchunks_initialized(self):\n \"\"\"The number of chunks that have been initialized with some data.\"\"\"\n\n # key pattern for chunk keys\n prog = re.compile(r'\\.'.join([r'\\d+'] * min(1, self.ndim)))\n\n # count chunk keys\n return sum(1 for k in listdir(self.chunk_store, self._path) if prog.match(k))\n\n # backwards compability\n initialized = nchunks_initialized\n\n @property\n def is_view(self):\n \"\"\"A boolean, True if this array is a view on another array.\"\"\"\n return self._is_view\n\n @property\n def oindex(self):\n \"\"\"Shortcut for orthogonal (outer) indexing, see :func:`get_orthogonal_selection` and\n :func:`set_orthogonal_selection` for documentation and examples.\"\"\"\n return self._oindex\n\n @property\n def vindex(self):\n \"\"\"Shortcut for vectorized (inner) indexing, see :func:`get_coordinate_selection`,\n :func:`set_coordinate_selection`, :func:`get_mask_selection` and\n :func:`set_mask_selection` for documentation and examples.\"\"\"\n return self._vindex\n\n def __eq__(self, other):\n return (\n isinstance(other, Array) and\n self.store == other.store and\n self.read_only == other.read_only and\n self.path == other.path and\n not self._is_view\n # N.B., no need to compare other properties, should be covered by\n # store comparison\n )\n\n def __array__(self, *args):\n a = self[...]\n if args:\n a = a.astype(args[0])\n return a\n\n def __iter__(self):\n if len(self.shape) == 0:\n # Same error as numpy\n raise TypeError(\"iteration over a 0-d array\")\n # Avoid repeatedly decompressing chunks by iterating over the chunks\n # in the first dimension.\n chunk_size = self.chunks[0]\n for j in range(self.shape[0]):\n if j % chunk_size == 0:\n chunk = self[j: j + chunk_size]\n yield chunk[j % chunk_size]\n\n def __len__(self):\n if self.shape:\n return self.shape[0]\n else:\n # 0-dimensional array, same error message as numpy\n raise TypeError('len() of unsized object')\n\n def __getitem__(self, selection):\n \"\"\"Retrieve data for an item or region of the array.\n\n Parameters\n ----------\n selection : tuple\n An integer index or slice or tuple of int/slice objects specifying the\n requested item or region for each dimension of the array.\n\n Returns\n -------\n out : ndarray\n A NumPy array containing the data for the requested region.\n\n Examples\n --------\n Setup a 1-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.array(np.arange(100))\n\n Retrieve a single item::\n\n >>> z[5]\n 5\n\n Retrieve a region via slicing::\n\n >>> z[:5]\n array([0, 1, 2, 3, 4])\n >>> z[-5:]\n array([95, 96, 97, 98, 99])\n >>> z[5:10]\n array([5, 6, 7, 8, 9])\n >>> z[5:10:2]\n array([5, 7, 9])\n >>> z[::2]\n array([ 0, 2, 4, ..., 94, 96, 98])\n\n Load the entire array into memory::\n\n >>> z[...]\n array([ 0, 1, 2, ..., 97, 98, 99])\n\n Setup a 2-dimensional array::\n\n >>> z = zarr.array(np.arange(100).reshape(10, 10))\n\n Retrieve an item::\n\n >>> z[2, 2]\n 22\n\n Retrieve a region via slicing::\n\n >>> z[1:3, 1:3]\n array([[11, 12],\n [21, 22]])\n >>> z[1:3, :]\n array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]])\n >>> z[:, 1:3]\n array([[ 1, 2],\n [11, 12],\n [21, 22],\n [31, 32],\n [41, 42],\n [51, 52],\n [61, 62],\n [71, 72],\n [81, 82],\n [91, 92]])\n >>> z[0:5:2, 0:5:2]\n array([[ 0, 2, 4],\n [20, 22, 24],\n [40, 42, 44]])\n >>> z[::2, ::2]\n array([[ 0, 2, 4, 6, 8],\n [20, 22, 24, 26, 28],\n [40, 42, 44, 46, 48],\n [60, 62, 64, 66, 68],\n [80, 82, 84, 86, 88]])\n\n Load the entire array into memory::\n\n >>> z[...]\n array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],\n [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],\n [50, 51, 52, 53, 54, 55, 56, 57, 58, 59],\n [60, 61, 62, 63, 64, 65, 66, 67, 68, 69],\n [70, 71, 72, 73, 74, 75, 76, 77, 78, 79],\n [80, 81, 82, 83, 84, 85, 86, 87, 88, 89],\n [90, 91, 92, 93, 94, 95, 96, 97, 98, 99]])\n\n For arrays with a structured dtype, specific fields can be retrieved, e.g.::\n\n >>> a = np.array([(b'aaa', 1, 4.2),\n ... (b'bbb', 2, 8.4),\n ... (b'ccc', 3, 12.6)],\n ... dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])\n >>> z = zarr.array(a)\n >>> z['foo']\n array([b'aaa', b'bbb', b'ccc'],\n dtype='|S3')\n\n Notes\n -----\n Slices with step > 1 are supported, but slices with negative step are not.\n\n Currently the implementation for __getitem__ is provided by\n :func:`get_basic_selection`. For advanced (\"fancy\") indexing, see the methods\n listed under See Also.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,\n get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,\n set_orthogonal_selection, vindex, oindex, __setitem__\n\n \"\"\"\n\n fields, selection = pop_fields(selection)\n return self.get_basic_selection(selection, fields=fields)\n\n def get_basic_selection(self, selection=Ellipsis, out=None, fields=None):\n \"\"\"Retrieve data for an item or region of the array.\n\n Parameters\n ----------\n selection : tuple\n A tuple specifying the requested item or region for each dimension of the\n array. May be any combination of int and/or slice for multidimensional arrays.\n out : ndarray, optional\n If given, load the selected data directly into this array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to\n extract data for.\n\n Returns\n -------\n out : ndarray\n A NumPy array containing the data for the requested region.\n\n Examples\n --------\n Setup a 1-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.array(np.arange(100))\n\n Retrieve a single item::\n\n >>> z.get_basic_selection(5)\n 5\n\n Retrieve a region via slicing::\n\n >>> z.get_basic_selection(slice(5))\n array([0, 1, 2, 3, 4])\n >>> z.get_basic_selection(slice(-5, None))\n array([95, 96, 97, 98, 99])\n >>> z.get_basic_selection(slice(5, 10))\n array([5, 6, 7, 8, 9])\n >>> z.get_basic_selection(slice(5, 10, 2))\n array([5, 7, 9])\n >>> z.get_basic_selection(slice(None, None, 2))\n array([ 0, 2, 4, ..., 94, 96, 98])\n\n Setup a 2-dimensional array::\n\n >>> z = zarr.array(np.arange(100).reshape(10, 10))\n\n Retrieve an item::\n\n >>> z.get_basic_selection((2, 2))\n 22\n\n Retrieve a region via slicing::\n\n >>> z.get_basic_selection((slice(1, 3), slice(1, 3)))\n array([[11, 12],\n [21, 22]])\n >>> z.get_basic_selection((slice(1, 3), slice(None)))\n array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24, 25, 26, 27, 28, 29]])\n >>> z.get_basic_selection((slice(None), slice(1, 3)))\n array([[ 1, 2],\n [11, 12],\n [21, 22],\n [31, 32],\n [41, 42],\n [51, 52],\n [61, 62],\n [71, 72],\n [81, 82],\n [91, 92]])\n >>> z.get_basic_selection((slice(0, 5, 2), slice(0, 5, 2)))\n array([[ 0, 2, 4],\n [20, 22, 24],\n [40, 42, 44]])\n >>> z.get_basic_selection((slice(None, None, 2), slice(None, None, 2)))\n array([[ 0, 2, 4, 6, 8],\n [20, 22, 24, 26, 28],\n [40, 42, 44, 46, 48],\n [60, 62, 64, 66, 68],\n [80, 82, 84, 86, 88]])\n\n For arrays with a structured dtype, specific fields can be retrieved, e.g.::\n\n >>> a = np.array([(b'aaa', 1, 4.2),\n ... (b'bbb', 2, 8.4),\n ... (b'ccc', 3, 12.6)],\n ... dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])\n >>> z = zarr.array(a)\n >>> z.get_basic_selection(slice(2), fields='foo')\n array([b'aaa', b'bbb'],\n dtype='|S3')\n\n Notes\n -----\n Slices with step > 1 are supported, but slices with negative step are not.\n\n Currently this method provides the implementation for accessing data via the\n square bracket notation (__getitem__). See :func:`__getitem__` for examples\n using the alternative notation.\n\n See Also\n --------\n set_basic_selection, get_mask_selection, set_mask_selection,\n get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,\n set_orthogonal_selection, vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata()\n\n # check args\n check_fields(fields, self._dtype)\n\n # handle zero-dimensional arrays\n if self._shape == ():\n return self._get_basic_selection_zd(selection=selection, out=out,\n fields=fields)\n else:\n return self._get_basic_selection_nd(selection=selection, out=out,\n fields=fields)\n\n def _get_basic_selection_zd(self, selection, out=None, fields=None):\n # special case basic selection for zero-dimensional array\n\n # check selection is valid\n selection = ensure_tuple(selection)\n if selection not in ((), (Ellipsis,)):\n err_too_many_indices(selection, ())\n\n try:\n # obtain encoded data for chunk\n ckey = self._chunk_key((0,))\n cdata = self.chunk_store[ckey]\n\n except KeyError:\n # chunk not initialized\n chunk = np.zeros((), dtype=self._dtype)\n if self._fill_value is not None:\n chunk.fill(self._fill_value)\n\n else:\n chunk = self._decode_chunk(cdata)\n\n # handle fields\n if fields:\n chunk = chunk[fields]\n\n # handle selection of the scalar value via empty tuple\n if out is None:\n out = chunk[selection]\n else:\n out[selection] = chunk[selection]\n\n return out\n\n def _get_basic_selection_nd(self, selection, out=None, fields=None):\n # implementation of basic selection for array with at least one dimension\n\n # setup indexer\n indexer = BasicIndexer(selection, self)\n\n return self._get_selection(indexer=indexer, out=out, fields=fields)\n\n def get_orthogonal_selection(self, selection, out=None, fields=None):\n \"\"\"Retrieve data by making a selection for each dimension of the array. For\n example, if an array has 2 dimensions, allows selecting specific rows and/or\n columns. The selection for each dimension can be either an integer (indexing a\n single item), a slice, an array of integers, or a Boolean array where True\n values indicate a selection.\n\n Parameters\n ----------\n selection : tuple\n A selection for each dimension of the array. May be any combination of int,\n slice, integer array or Boolean array.\n out : ndarray, optional\n If given, load the selected data directly into this array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to\n extract data for.\n\n Returns\n -------\n out : ndarray\n A NumPy array containing the data for the requested selection.\n\n Examples\n --------\n Setup a 2-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.array(np.arange(100).reshape(10, 10))\n\n Retrieve rows and columns via any combination of int, slice, integer array and/or\n Boolean array::\n\n >>> z.get_orthogonal_selection(([1, 4], slice(None)))\n array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n [40, 41, 42, 43, 44, 45, 46, 47, 48, 49]])\n >>> z.get_orthogonal_selection((slice(None), [1, 4]))\n array([[ 1, 4],\n [11, 14],\n [21, 24],\n [31, 34],\n [41, 44],\n [51, 54],\n [61, 64],\n [71, 74],\n [81, 84],\n [91, 94]])\n >>> z.get_orthogonal_selection(([1, 4], [1, 4]))\n array([[11, 14],\n [41, 44]])\n >>> sel = np.zeros(z.shape[0], dtype=bool)\n >>> sel[1] = True\n >>> sel[4] = True\n >>> z.get_orthogonal_selection((sel, sel))\n array([[11, 14],\n [41, 44]])\n\n For convenience, the orthogonal selection functionality is also available via the\n `oindex` property, e.g.::\n\n >>> z.oindex[[1, 4], :]\n array([[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n [40, 41, 42, 43, 44, 45, 46, 47, 48, 49]])\n >>> z.oindex[:, [1, 4]]\n array([[ 1, 4],\n [11, 14],\n [21, 24],\n [31, 34],\n [41, 44],\n [51, 54],\n [61, 64],\n [71, 74],\n [81, 84],\n [91, 94]])\n >>> z.oindex[[1, 4], [1, 4]]\n array([[11, 14],\n [41, 44]])\n >>> sel = np.zeros(z.shape[0], dtype=bool)\n >>> sel[1] = True\n >>> sel[4] = True\n >>> z.oindex[sel, sel]\n array([[11, 14],\n [41, 44]])\n\n Notes\n -----\n Orthogonal indexing is also known as outer indexing.\n\n Slices with step > 1 are supported, but slices with negative step are not.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,\n get_coordinate_selection, set_coordinate_selection, set_orthogonal_selection,\n vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata()\n\n # check args\n check_fields(fields, self._dtype)\n\n # setup indexer\n indexer = OrthogonalIndexer(selection, self)\n\n return self._get_selection(indexer=indexer, out=out, fields=fields)\n\n def get_coordinate_selection(self, selection, out=None, fields=None):\n \"\"\"Retrieve a selection of individual items, by providing the indices\n (coordinates) for each selected item.\n\n Parameters\n ----------\n selection : tuple\n An integer (coordinate) array for each dimension of the array.\n out : ndarray, optional\n If given, load the selected data directly into this array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to\n extract data for.\n\n Returns\n -------\n out : ndarray\n A NumPy array containing the data for the requested selection.\n\n Examples\n --------\n Setup a 2-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.array(np.arange(100).reshape(10, 10))\n\n Retrieve items by specifying their coordinates::\n\n >>> z.get_coordinate_selection(([1, 4], [1, 4]))\n array([11, 44])\n\n For convenience, the coordinate selection functionality is also available via the\n `vindex` property, e.g.::\n\n >>> z.vindex[[1, 4], [1, 4]]\n array([11, 44])\n\n Notes\n -----\n Coordinate indexing is also known as point selection, and is a form of vectorized\n or inner indexing.\n\n Slices are not supported. Coordinate arrays must be provided for all dimensions\n of the array.\n\n Coordinate arrays may be multidimensional, in which case the output array will\n also be multidimensional. Coordinate arrays are broadcast against each other\n before being applied. The shape of the output will be the same as the shape of\n each coordinate array after broadcasting.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,\n get_orthogonal_selection, set_orthogonal_selection, set_coordinate_selection,\n vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata()\n\n # check args\n check_fields(fields, self._dtype)\n\n # setup indexer\n indexer = CoordinateIndexer(selection, self)\n\n # handle output - need to flatten\n if out is not None:\n out = out.reshape(-1)\n\n out = self._get_selection(indexer=indexer, out=out, fields=fields)\n\n # restore shape\n out = out.reshape(indexer.sel_shape)\n\n return out\n\n def get_mask_selection(self, selection, out=None, fields=None):\n \"\"\"Retrieve a selection of individual items, by providing a Boolean array of the\n same shape as the array against which the selection is being made, where True\n values indicate a selected item.\n\n Parameters\n ----------\n selection : ndarray, bool\n A Boolean array of the same shape as the array against which the selection is\n being made.\n out : ndarray, optional\n If given, load the selected data directly into this array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to\n extract data for.\n\n Returns\n -------\n out : ndarray\n A NumPy array containing the data for the requested selection.\n\n Examples\n --------\n Setup a 2-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.array(np.arange(100).reshape(10, 10))\n\n Retrieve items by specifying a maks::\n\n >>> sel = np.zeros_like(z, dtype=bool)\n >>> sel[1, 1] = True\n >>> sel[4, 4] = True\n >>> z.get_mask_selection(sel)\n array([11, 44])\n\n For convenience, the mask selection functionality is also available via the\n `vindex` property, e.g.::\n\n >>> z.vindex[sel]\n array([11, 44])\n\n Notes\n -----\n Mask indexing is a form of vectorized or inner indexing, and is equivalent to\n coordinate indexing. Internally the mask array is converted to coordinate\n arrays by calling `np.nonzero`.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, set_mask_selection,\n get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection,\n set_coordinate_selection, vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata()\n\n # check args\n check_fields(fields, self._dtype)\n\n # setup indexer\n indexer = MaskIndexer(selection, self)\n\n return self._get_selection(indexer=indexer, out=out, fields=fields)\n\n def _get_selection(self, indexer, out=None, fields=None):\n\n # We iterate over all chunks which overlap the selection and thus contain data\n # that needs to be extracted. Each chunk is processed in turn, extracting the\n # necessary data and storing into the correct location in the output array.\n\n # N.B., it is an important optimisation that we only visit chunks which overlap\n # the selection. This minimises the number of iterations in the main for loop.\n\n # check fields are sensible\n out_dtype = check_fields(fields, self._dtype)\n\n # determine output shape\n out_shape = indexer.shape\n\n # setup output array\n if out is None:\n out = np.empty(out_shape, dtype=out_dtype, order=self._order)\n else:\n check_array_shape('out', out, out_shape)\n\n # iterate over chunks\n for chunk_coords, chunk_selection, out_selection in indexer:\n\n # load chunk selection into output array\n self._chunk_getitem(chunk_coords, chunk_selection, out, out_selection,\n drop_axes=indexer.drop_axes, fields=fields)\n\n if out.shape:\n return out\n else:\n return out[()]\n\n def __setitem__(self, selection, value):\n \"\"\"Modify data for an item or region of the array.\n\n Parameters\n ----------\n selection : tuple\n An integer index or slice or tuple of int/slice specifying the requested\n region for each dimension of the array.\n value : scalar or array-like\n Value to be stored into the array.\n\n Examples\n --------\n Setup a 1-dimensional array::\n\n >>> import zarr\n >>> z = zarr.zeros(100, dtype=int)\n\n Set all array elements to the same scalar value::\n\n >>> z[...] = 42\n >>> z[...]\n array([42, 42, 42, ..., 42, 42, 42])\n\n Set a portion of the array::\n\n >>> z[:10] = np.arange(10)\n >>> z[-10:] = np.arange(10)[::-1]\n >>> z[...]\n array([ 0, 1, 2, ..., 2, 1, 0])\n\n Setup a 2-dimensional array::\n\n >>> z = zarr.zeros((5, 5), dtype=int)\n\n Set all array elements to the same scalar value::\n\n >>> z[...] = 42\n\n Set a portion of the array::\n\n >>> z[0, :] = np.arange(z.shape[1])\n >>> z[:, 0] = np.arange(z.shape[0])\n >>> z[...]\n array([[ 0, 1, 2, 3, 4],\n [ 1, 42, 42, 42, 42],\n [ 2, 42, 42, 42, 42],\n [ 3, 42, 42, 42, 42],\n [ 4, 42, 42, 42, 42]])\n\n For arrays with a structured dtype, specific fields can be modified, e.g.::\n\n >>> a = np.array([(b'aaa', 1, 4.2),\n ... (b'bbb', 2, 8.4),\n ... (b'ccc', 3, 12.6)],\n ... dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])\n >>> z = zarr.array(a)\n >>> z['foo'] = b'zzz'\n >>> z[...]\n array([(b'zzz', 1, 4.2), (b'zzz', 2, 8.4), (b'zzz', 3, 12.6)],\n dtype=[('foo', 'S3'), ('bar', '<i4'), ('baz', '<f8')])\n\n Notes\n -----\n Slices with step > 1 are supported, but slices with negative step are not.\n\n Currently the implementation for __setitem__ is provided by\n :func:`set_basic_selection`, which means that only integers and slices are\n supported within the selection. For advanced (\"fancy\") indexing, see the\n methods listed under See Also.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,\n get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,\n set_orthogonal_selection, vindex, oindex, __getitem__\n\n \"\"\"\n\n fields, selection = pop_fields(selection)\n self.set_basic_selection(selection, value, fields=fields)\n\n def set_basic_selection(self, selection, value, fields=None):\n \"\"\"Modify data for an item or region of the array.\n\n Parameters\n ----------\n selection : tuple\n An integer index or slice or tuple of int/slice specifying the requested\n region for each dimension of the array.\n value : scalar or array-like\n Value to be stored into the array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to set\n data for.\n\n Examples\n --------\n Setup a 1-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.zeros(100, dtype=int)\n\n Set all array elements to the same scalar value::\n\n >>> z.set_basic_selection(..., 42)\n >>> z[...]\n array([42, 42, 42, ..., 42, 42, 42])\n\n Set a portion of the array::\n\n >>> z.set_basic_selection(slice(10), np.arange(10))\n >>> z.set_basic_selection(slice(-10, None), np.arange(10)[::-1])\n >>> z[...]\n array([ 0, 1, 2, ..., 2, 1, 0])\n\n Setup a 2-dimensional array::\n\n >>> z = zarr.zeros((5, 5), dtype=int)\n\n Set all array elements to the same scalar value::\n\n >>> z.set_basic_selection(..., 42)\n\n Set a portion of the array::\n\n >>> z.set_basic_selection((0, slice(None)), np.arange(z.shape[1]))\n >>> z.set_basic_selection((slice(None), 0), np.arange(z.shape[0]))\n >>> z[...]\n array([[ 0, 1, 2, 3, 4],\n [ 1, 42, 42, 42, 42],\n [ 2, 42, 42, 42, 42],\n [ 3, 42, 42, 42, 42],\n [ 4, 42, 42, 42, 42]])\n\n For arrays with a structured dtype, the `fields` parameter can be used to set\n data for a specific field, e.g.::\n\n >>> a = np.array([(b'aaa', 1, 4.2),\n ... (b'bbb', 2, 8.4),\n ... (b'ccc', 3, 12.6)],\n ... dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')])\n >>> z = zarr.array(a)\n >>> z.set_basic_selection(slice(0, 2), b'zzz', fields='foo')\n >>> z[:]\n array([(b'zzz', 1, 4.2), (b'zzz', 2, 8.4), (b'ccc', 3, 12.6)],\n dtype=[('foo', 'S3'), ('bar', '<i4'), ('baz', '<f8')])\n\n Notes\n -----\n This method provides the underlying implementation for modifying data via square\n bracket notation, see :func:`__setitem__` for equivalent examples using the\n alternative notation.\n\n See Also\n --------\n get_basic_selection, get_mask_selection, set_mask_selection,\n get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,\n set_orthogonal_selection, vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # guard conditions\n if self._read_only:\n raise ReadOnlyError()\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata_nosync()\n\n # handle zero-dimensional arrays\n if self._shape == ():\n return self._set_basic_selection_zd(selection, value, fields=fields)\n else:\n return self._set_basic_selection_nd(selection, value, fields=fields)\n\n def set_orthogonal_selection(self, selection, value, fields=None):\n \"\"\"Modify data via a selection for each dimension of the array.\n\n Parameters\n ----------\n selection : tuple\n A selection for each dimension of the array. May be any combination of int,\n slice, integer array or Boolean array.\n value : scalar or array-like\n Value to be stored into the array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to set\n data for.\n\n Examples\n --------\n Setup a 2-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.zeros((5, 5), dtype=int)\n\n Set data for a selection of rows::\n\n >>> z.set_orthogonal_selection(([1, 4], slice(None)), 1)\n >>> z[...]\n array([[0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1]])\n\n Set data for a selection of columns::\n\n >>> z.set_orthogonal_selection((slice(None), [1, 4]), 2)\n >>> z[...]\n array([[0, 2, 0, 0, 2],\n [1, 2, 1, 1, 2],\n [0, 2, 0, 0, 2],\n [0, 2, 0, 0, 2],\n [1, 2, 1, 1, 2]])\n\n Set data for a selection of rows and columns::\n\n >>> z.set_orthogonal_selection(([1, 4], [1, 4]), 3)\n >>> z[...]\n array([[0, 2, 0, 0, 2],\n [1, 3, 1, 1, 3],\n [0, 2, 0, 0, 2],\n [0, 2, 0, 0, 2],\n [1, 3, 1, 1, 3]])\n\n For convenience, this functionality is also available via the `oindex` property.\n E.g.::\n\n >>> z.oindex[[1, 4], [1, 4]] = 4\n >>> z[...]\n array([[0, 2, 0, 0, 2],\n [1, 4, 1, 1, 4],\n [0, 2, 0, 0, 2],\n [0, 2, 0, 0, 2],\n [1, 4, 1, 1, 4]])\n\n Notes\n -----\n Orthogonal indexing is also known as outer indexing.\n\n Slices with step > 1 are supported, but slices with negative step are not.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,\n get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection,\n vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # guard conditions\n if self._read_only:\n raise ReadOnlyError()\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata_nosync()\n\n # setup indexer\n indexer = OrthogonalIndexer(selection, self)\n\n self._set_selection(indexer, value, fields=fields)\n\n def set_coordinate_selection(self, selection, value, fields=None):\n \"\"\"Modify a selection of individual items, by providing the indices (coordinates)\n for each item to be modified.\n\n Parameters\n ----------\n selection : tuple\n An integer (coordinate) array for each dimension of the array.\n value : scalar or array-like\n Value to be stored into the array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to set\n data for.\n\n Examples\n --------\n Setup a 2-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.zeros((5, 5), dtype=int)\n\n Set data for a selection of items::\n\n >>> z.set_coordinate_selection(([1, 4], [1, 4]), 1)\n >>> z[...]\n array([[0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1]])\n\n For convenience, this functionality is also available via the `vindex` property.\n E.g.::\n\n >>> z.vindex[[1, 4], [1, 4]] = 2\n >>> z[...]\n array([[0, 0, 0, 0, 0],\n [0, 2, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 2]])\n\n Notes\n -----\n Coordinate indexing is also known as point selection, and is a form of vectorized\n or inner indexing.\n\n Slices are not supported. Coordinate arrays must be provided for all dimensions\n of the array.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection,\n get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection,\n vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # guard conditions\n if self._read_only:\n raise ReadOnlyError()\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata_nosync()\n\n # setup indexer\n indexer = CoordinateIndexer(selection, self)\n\n # handle value - need to flatten\n if not is_scalar(value, self._dtype):\n value = np.asanyarray(value)\n if hasattr(value, 'shape') and len(value.shape) > 1:\n value = value.reshape(-1)\n\n self._set_selection(indexer, value, fields=fields)\n\n def set_mask_selection(self, selection, value, fields=None):\n \"\"\"Modify a selection of individual items, by providing a Boolean array of the\n same shape as the array against which the selection is being made, where True\n values indicate a selected item.\n\n Parameters\n ----------\n selection : ndarray, bool\n A Boolean array of the same shape as the array against which the selection is\n being made.\n value : scalar or array-like\n Value to be stored into the array.\n fields : str or sequence of str, optional\n For arrays with a structured dtype, one or more fields can be specified to set\n data for.\n\n Examples\n --------\n Setup a 2-dimensional array::\n\n >>> import zarr\n >>> import numpy as np\n >>> z = zarr.zeros((5, 5), dtype=int)\n\n Set data for a selection of items::\n\n >>> sel = np.zeros_like(z, dtype=bool)\n >>> sel[1, 1] = True\n >>> sel[4, 4] = True\n >>> z.set_mask_selection(sel, 1)\n >>> z[...]\n array([[0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1]])\n\n For convenience, this functionality is also available via the `vindex` property.\n E.g.::\n\n >>> z.vindex[sel] = 2\n >>> z[...]\n array([[0, 0, 0, 0, 0],\n [0, 2, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 2]])\n\n Notes\n -----\n Mask indexing is a form of vectorized or inner indexing, and is equivalent to\n coordinate indexing. Internally the mask array is converted to coordinate\n arrays by calling `np.nonzero`.\n\n See Also\n --------\n get_basic_selection, set_basic_selection, get_mask_selection,\n get_orthogonal_selection, set_orthogonal_selection, get_coordinate_selection,\n set_coordinate_selection, vindex, oindex, __getitem__, __setitem__\n\n \"\"\"\n\n # guard conditions\n if self._read_only:\n raise ReadOnlyError()\n\n # refresh metadata\n if not self._cache_metadata:\n self._load_metadata_nosync()\n\n # setup indexer\n indexer = MaskIndexer(selection, self)\n\n self._set_selection(indexer, value, fields=fields)\n\n def _set_basic_selection_zd(self, selection, value, fields=None):\n # special case __setitem__ for zero-dimensional array\n\n # check selection is valid\n selection = ensure_tuple(selection)\n if selection not in ((), (Ellipsis,)):\n err_too_many_indices(selection, self._shape)\n\n # check fields\n check_fields(fields, self._dtype)\n fields = check_no_multi_fields(fields)\n\n # obtain key for chunk\n ckey = self._chunk_key((0,))\n\n # setup chunk\n try:\n # obtain compressed data for chunk\n cdata = self.chunk_store[ckey]\n\n except KeyError:\n # chunk not initialized\n chunk = np.zeros((), dtype=self._dtype)\n if self._fill_value is not None:\n chunk.fill(self._fill_value)\n\n else:\n # decode chunk\n chunk = self._decode_chunk(cdata).copy()\n\n # set value\n if fields:\n chunk[fields][selection] = value\n else:\n chunk[selection] = value\n\n # encode and store\n cdata = self._encode_chunk(chunk)\n self.chunk_store[ckey] = cdata\n\n def _set_basic_selection_nd(self, selection, value, fields=None):\n # implementation of __setitem__ for array with at least one dimension\n\n # setup indexer\n indexer = BasicIndexer(selection, self)\n\n self._set_selection(indexer, value, fields=fields)\n\n def _set_selection(self, indexer, value, fields=None):\n\n # We iterate over all chunks which overlap the selection and thus contain data\n # that needs to be replaced. Each chunk is processed in turn, extracting the\n # necessary data from the value array and storing into the chunk array.\n\n # N.B., it is an important optimisation that we only visit chunks which overlap\n # the selection. This minimises the number of iterations in the main for loop.\n\n # check fields are sensible\n check_fields(fields, self._dtype)\n fields = check_no_multi_fields(fields)\n\n # determine indices of chunks overlapping the selection\n sel_shape = indexer.shape\n\n # check value shape\n if sel_shape == ():\n # setting a single item\n pass\n elif is_scalar(value, self._dtype):\n # setting a scalar value\n pass\n else:\n if not hasattr(value, 'shape'):\n value = np.asanyarray(value)\n check_array_shape('value', value, sel_shape)\n\n # iterate over chunks in range\n for chunk_coords, chunk_selection, out_selection in indexer:\n\n # extract data to store\n if sel_shape == ():\n chunk_value = value\n elif is_scalar(value, self._dtype):\n chunk_value = value\n else:\n chunk_value = value[out_selection]\n # handle missing singleton dimensions\n if indexer.drop_axes:\n item = [slice(None)] * self.ndim\n for a in indexer.drop_axes:\n item[a] = np.newaxis\n item = tuple(item)\n chunk_value = chunk_value[item]\n\n # put data\n self._chunk_setitem(chunk_coords, chunk_selection, chunk_value, fields=fields)\n\n def _chunk_getitem(self, chunk_coords, chunk_selection, out, out_selection,\n drop_axes=None, fields=None):\n \"\"\"Obtain part or whole of a chunk.\n\n Parameters\n ----------\n chunk_coords : tuple of ints\n Indices of the chunk.\n chunk_selection : selection\n Location of region within the chunk to extract.\n out : ndarray\n Array to store result in.\n out_selection : selection\n Location of region within output array to store results in.\n drop_axes : tuple of ints\n Axes to squeeze out of the chunk.\n fields\n TODO\n\n \"\"\"\n\n assert len(chunk_coords) == len(self._cdata_shape)\n\n out_is_ndarray = True\n try:\n out = ensure_ndarray(out)\n except TypeError:\n out_is_ndarray = False\n\n # obtain key for chunk\n ckey = self._chunk_key(chunk_coords)\n\n try:\n # obtain compressed data for chunk\n cdata = self.chunk_store[ckey]\n\n except KeyError:\n # chunk not initialized\n if self._fill_value is not None:\n if fields:\n fill_value = self._fill_value[fields]\n else:\n fill_value = self._fill_value\n out[out_selection] = fill_value\n\n else:\n\n if (out_is_ndarray and\n not fields and\n is_contiguous_selection(out_selection) and\n is_total_slice(chunk_selection, self._chunks) and\n not self._filters and\n self._dtype != object):\n\n dest = out[out_selection]\n write_direct = (\n dest.flags.writeable and (\n (self._order == 'C' and dest.flags.c_contiguous) or\n (self._order == 'F' and dest.flags.f_contiguous)\n )\n )\n\n if write_direct:\n\n # optimization: we want the whole chunk, and the destination is\n # contiguous, so we can decompress directly from the chunk\n # into the destination array\n\n if self._compressor:\n self._compressor.decode(cdata, dest)\n else:\n chunk = ensure_ndarray(cdata).view(self._dtype)\n chunk = chunk.reshape(self._chunks, order=self._order)\n np.copyto(dest, chunk)\n return\n\n # decode chunk\n chunk = self._decode_chunk(cdata)\n\n # select data from chunk\n if fields:\n chunk = chunk[fields]\n tmp = chunk[chunk_selection]\n if drop_axes:\n tmp = np.squeeze(tmp, axis=drop_axes)\n\n # store selected data in output\n out[out_selection] = tmp\n\n def _chunk_setitem(self, chunk_coords, chunk_selection, value, fields=None):\n \"\"\"Replace part or whole of a chunk.\n\n Parameters\n ----------\n chunk_coords : tuple of ints\n Indices of the chunk.\n chunk_selection : tuple of slices\n Location of region within the chunk.\n value : scalar or ndarray\n Value to set.\n\n \"\"\"\n\n if self._synchronizer is None:\n # no synchronization\n lock = nolock\n else:\n # synchronize on the chunk\n ckey = self._chunk_key(chunk_coords)\n lock = self._synchronizer[ckey]\n\n with lock:\n self._chunk_setitem_nosync(chunk_coords, chunk_selection, value,\n fields=fields)\n\n def _chunk_setitem_nosync(self, chunk_coords, chunk_selection, value, fields=None):\n\n # obtain key for chunk storage\n ckey = self._chunk_key(chunk_coords)\n\n if is_total_slice(chunk_selection, self._chunks) and not fields:\n # totally replace chunk\n\n # optimization: we are completely replacing the chunk, so no need\n # to access the existing chunk data\n\n if is_scalar(value, self._dtype):\n\n # setup array filled with value\n chunk = np.empty(self._chunks, dtype=self._dtype, order=self._order)\n chunk.fill(value)\n\n else:\n\n # ensure array is contiguous\n chunk = value.astype(self._dtype, order=self._order, copy=False)\n\n else:\n # partially replace the contents of this chunk\n\n try:\n\n # obtain compressed data for chunk\n cdata = self.chunk_store[ckey]\n\n except KeyError:\n\n # chunk not initialized\n if self._fill_value is not None:\n chunk = np.empty(self._chunks, dtype=self._dtype, order=self._order)\n chunk.fill(self._fill_value)\n elif self._dtype == object:\n chunk = np.empty(self._chunks, dtype=self._dtype, order=self._order)\n else:\n # N.B., use zeros here so any region beyond the array has consistent\n # and compressible data\n chunk = np.zeros(self._chunks, dtype=self._dtype, order=self._order)\n\n else:\n\n # decode chunk\n chunk = self._decode_chunk(cdata)\n if not chunk.flags.writeable:\n chunk = chunk.copy(order='K')\n\n # modify\n if fields:\n # N.B., currently multi-field assignment is not supported in numpy, so\n # this only works for a single field\n chunk[fields][chunk_selection] = value\n else:\n chunk[chunk_selection] = value\n\n # encode chunk\n cdata = self._encode_chunk(chunk)\n\n # store\n self.chunk_store[ckey] = cdata\n\n def _chunk_key(self, chunk_coords):\n return self._key_prefix + '.'.join(map(str, chunk_coords))\n\n def _decode_chunk(self, cdata):\n\n # decompress\n if self._compressor:\n chunk = self._compressor.decode(cdata)\n else:\n chunk = cdata\n\n # apply filters\n if self._filters:\n for f in reversed(self._filters):\n chunk = f.decode(chunk)\n\n # view as numpy array with correct dtype\n chunk = ensure_ndarray(chunk)\n # special case object dtype, because incorrect handling can lead to\n # segfaults and other bad things happening\n if self._dtype != object:\n chunk = chunk.view(self._dtype)\n elif chunk.dtype != object:\n # If we end up here, someone must have hacked around with the filters.\n # We cannot deal with object arrays unless there is an object\n # codec in the filter chain, i.e., a filter that converts from object\n # array to something else during encoding, and converts back to object\n # array during decoding.\n raise RuntimeError('cannot read object array without object codec')\n\n # ensure correct chunk shape\n chunk = chunk.reshape(-1, order='A')\n chunk = chunk.reshape(self._chunks, order=self._order)\n\n return chunk\n\n def _encode_chunk(self, chunk):\n\n # apply filters\n if self._filters:\n for f in self._filters:\n chunk = f.encode(chunk)\n\n # check object encoding\n if ensure_ndarray(chunk).dtype == object:\n raise RuntimeError('cannot write object array without object codec')\n\n # compress\n if self._compressor:\n cdata = self._compressor.encode(chunk)\n else:\n cdata = chunk\n\n # ensure in-memory data is immutable and easy to compare\n if isinstance(self.chunk_store, dict):\n cdata = ensure_bytes(cdata)\n\n return cdata\n\n def __repr__(self):\n t = type(self)\n r = '<{}.{}'.format(t.__module__, t.__name__)\n if self.name:\n r += ' %r' % self.name\n r += ' %s' % str(self.shape)\n r += ' %s' % self.dtype\n if self._read_only:\n r += ' read-only'\n r += '>'\n return r\n\n @property\n def info(self):\n \"\"\"Report some diagnostic information about the array.\n\n Examples\n --------\n >>> import zarr\n >>> z = zarr.zeros(1000000, chunks=100000, dtype='i4')\n >>> z.info\n Type : zarr.core.Array\n Data type : int32\n Shape : (1000000,)\n Chunk shape : (100000,)\n Order : C\n Read-only : False\n Compressor : Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)\n Store type : builtins.dict\n No. bytes : 4000000 (3.8M)\n No. bytes stored : ...\n Storage ratio : ...\n Chunks initialized : 0/10\n\n \"\"\"\n return self._info_reporter\n\n def info_items(self):\n return self._synchronized_op(self._info_items_nosync)\n\n def _info_items_nosync(self):\n\n def typestr(o):\n return '{}.{}'.format(type(o).__module__, type(o).__name__)\n\n def bytestr(n):\n if n > 2**10:\n return '{} ({})'.format(n, human_readable_size(n))\n else:\n return str(n)\n\n items = []\n\n # basic info\n if self.name is not None:\n items += [('Name', self.name)]\n items += [\n ('Type', typestr(self)),\n ('Data type', '%s' % self.dtype),\n ('Shape', str(self.shape)),\n ('Chunk shape', str(self.chunks)),\n ('Order', self.order),\n ('Read-only', str(self.read_only)),\n ]\n\n # filters\n if self.filters:\n for i, f in enumerate(self.filters):\n items += [('Filter [%s]' % i, repr(f))]\n\n # compressor\n items += [('Compressor', repr(self.compressor))]\n\n # synchronizer\n if self._synchronizer is not None:\n items += [('Synchronizer type', typestr(self._synchronizer))]\n\n # storage info\n items += [('Store type', typestr(self._store))]\n if self._chunk_store is not None:\n items += [('Chunk store type', typestr(self._chunk_store))]\n items += [('No. bytes', bytestr(self.nbytes))]\n if self.nbytes_stored > 0:\n items += [\n ('No. bytes stored', bytestr(self.nbytes_stored)),\n ('Storage ratio', '%.1f' % (self.nbytes / self.nbytes_stored)),\n ]\n items += [\n ('Chunks initialized', '{}/{}'.format(self.nchunks_initialized, self.nchunks))\n ]\n\n return items\n\n def digest(self, hashname=\"sha1\"):\n \"\"\"\n Compute a checksum for the data. Default uses sha1 for speed.\n\n Examples\n --------\n >>> import binascii\n >>> import zarr\n >>> z = zarr.empty(shape=(10000, 10000), chunks=(1000, 1000))\n >>> binascii.hexlify(z.digest())\n b'041f90bc7a571452af4f850a8ca2c6cddfa8a1ac'\n >>> z = zarr.zeros(shape=(10000, 10000), chunks=(1000, 1000))\n >>> binascii.hexlify(z.digest())\n b'7162d416d26a68063b66ed1f30e0a866e4abed60'\n >>> z = zarr.zeros(shape=(10000, 10000), dtype=\"u1\", chunks=(1000, 1000))\n >>> binascii.hexlify(z.digest())\n b'cb387af37410ae5a3222e893cf3373e4e4f22816'\n \"\"\"\n\n h = hashlib.new(hashname)\n\n for i in itertools.product(*[range(s) for s in self.cdata_shape]):\n h.update(self.chunk_store.get(self._chunk_key(i), b\"\"))\n\n h.update(self.store.get(self._key_prefix + array_meta_key, b\"\"))\n\n h.update(self.store.get(self.attrs.key, b\"\"))\n\n checksum = h.digest()\n\n return checksum\n\n def hexdigest(self, hashname=\"sha1\"):\n \"\"\"\n Compute a checksum for the data. Default uses sha1 for speed.\n\n Examples\n --------\n >>> import zarr\n >>> z = zarr.empty(shape=(10000, 10000), chunks=(1000, 1000))\n >>> z.hexdigest()\n '041f90bc7a571452af4f850a8ca2c6cddfa8a1ac'\n >>> z = zarr.zeros(shape=(10000, 10000), chunks=(1000, 1000))\n >>> z.hexdigest()\n '7162d416d26a68063b66ed1f30e0a866e4abed60'\n >>> z = zarr.zeros(shape=(10000, 10000), dtype=\"u1\", chunks=(1000, 1000))\n >>> z.hexdigest()\n 'cb387af37410ae5a3222e893cf3373e4e4f22816'\n \"\"\"\n\n checksum = binascii.hexlify(self.digest(hashname=hashname))\n\n # This is a bytes object on Python 3 and we want a str.\n if type(checksum) is not str:\n checksum = checksum.decode('utf8')\n\n return checksum\n\n def __getstate__(self):\n return (self._store, self._path, self._read_only, self._chunk_store,\n self._synchronizer, self._cache_metadata, self._attrs.cache)\n\n def __setstate__(self, state):\n self.__init__(*state)\n\n def _synchronized_op(self, f, *args, **kwargs):\n\n if self._synchronizer is None:\n # no synchronization\n lock = nolock\n\n else:\n # synchronize on the array\n mkey = self._key_prefix + array_meta_key\n lock = self._synchronizer[mkey]\n\n with lock:\n self._refresh_metadata_nosync()\n result = f(*args, **kwargs)\n\n return result\n\n def _write_op(self, f, *args, **kwargs):\n\n # guard condition\n if self._read_only:\n raise ReadOnlyError()\n\n return self._synchronized_op(f, *args, **kwargs)\n\n def resize(self, *args):\n \"\"\"Change the shape of the array by growing or shrinking one or more\n dimensions.\n\n Examples\n --------\n >>> import zarr\n >>> z = zarr.zeros(shape=(10000, 10000), chunks=(1000, 1000))\n >>> z.shape\n (10000, 10000)\n >>> z.resize(20000, 10000)\n >>> z.shape\n (20000, 10000)\n >>> z.resize(30000, 1000)\n >>> z.shape\n (30000, 1000)\n\n Notes\n -----\n When resizing an array, the data are not rearranged in any way.\n\n If one or more dimensions are shrunk, any chunks falling outside the\n new array shape will be deleted from the underlying store.\n\n \"\"\"\n\n return self._write_op(self._resize_nosync, *args)\n\n def _resize_nosync(self, *args):\n\n # normalize new shape argument\n old_shape = self._shape\n new_shape = normalize_resize_args(old_shape, *args)\n old_cdata_shape = self._cdata_shape\n\n # update metadata\n self._shape = new_shape\n self._flush_metadata_nosync()\n\n # determine the new number and arrangement of chunks\n chunks = self._chunks\n new_cdata_shape = tuple(math.ceil(s / c)\n for s, c in zip(new_shape, chunks))\n\n # remove any chunks not within range\n chunk_store = self.chunk_store\n for cidx in itertools.product(*[range(n) for n in old_cdata_shape]):\n if all(i < c for i, c in zip(cidx, new_cdata_shape)):\n pass # keep the chunk\n else:\n key = self._chunk_key(cidx)\n try:\n del chunk_store[key]\n except KeyError:\n # chunk not initialized\n pass\n\n def append(self, data, axis=0):\n \"\"\"Append `data` to `axis`.\n\n Parameters\n ----------\n data : array_like\n Data to be appended.\n axis : int\n Axis along which to append.\n\n Returns\n -------\n new_shape : tuple\n\n Notes\n -----\n The size of all dimensions other than `axis` must match between this\n array and `data`.\n\n Examples\n --------\n >>> import numpy as np\n >>> import zarr\n >>> a = np.arange(10000000, dtype='i4').reshape(10000, 1000)\n >>> z = zarr.array(a, chunks=(1000, 100))\n >>> z.shape\n (10000, 1000)\n >>> z.append(a)\n (20000, 1000)\n >>> z.append(np.vstack([a, a]), axis=1)\n (20000, 2000)\n >>> z.shape\n (20000, 2000)\n\n \"\"\"\n return self._write_op(self._append_nosync, data, axis=axis)\n\n def _append_nosync(self, data, axis=0):\n\n # ensure data is array-like\n if not hasattr(data, 'shape'):\n data = np.asanyarray(data)\n\n # ensure shapes are compatible for non-append dimensions\n self_shape_preserved = tuple(s for i, s in enumerate(self._shape)\n if i != axis)\n data_shape_preserved = tuple(s for i, s in enumerate(data.shape)\n if i != axis)\n if self_shape_preserved != data_shape_preserved:\n raise ValueError('shape of data to append is not compatible with the array; '\n 'all dimensions must match except for the dimension being '\n 'appended')\n\n # remember old shape\n old_shape = self._shape\n\n # determine new shape\n new_shape = tuple(\n self._shape[i] if i != axis else self._shape[i] + data.shape[i]\n for i in range(len(self._shape))\n )\n\n # resize\n self._resize_nosync(new_shape)\n\n # store data\n # noinspection PyTypeChecker\n append_selection = tuple(\n slice(None) if i != axis else slice(old_shape[i], new_shape[i])\n for i in range(len(self._shape))\n )\n self[append_selection] = data\n\n return new_shape\n\n def view(self, shape=None, chunks=None, dtype=None,\n fill_value=None, filters=None, read_only=None,\n synchronizer=None):\n \"\"\"Return an array sharing the same data.\n\n Parameters\n ----------\n shape : int or tuple of ints\n Array shape.\n chunks : int or tuple of ints, optional\n Chunk shape.\n dtype : string or dtype, optional\n NumPy dtype.\n fill_value : object\n Default value to use for uninitialized portions of the array.\n filters : sequence, optional\n Sequence of filters to use to encode chunk data prior to\n compression.\n read_only : bool, optional\n True if array should be protected against modification.\n synchronizer : object, optional\n Array synchronizer.\n\n Notes\n -----\n WARNING: This is an experimental feature and should be used with care.\n There are plenty of ways to generate errors and/or cause data\n corruption.\n\n Examples\n --------\n\n Bypass filters:\n\n >>> import zarr\n >>> import numpy as np\n >>> np.random.seed(42)\n >>> labels = ['female', 'male']\n >>> data = np.random.choice(labels, size=10000)\n >>> filters = [zarr.Categorize(labels=labels,\n ... dtype=data.dtype,\n ... astype='u1')]\n >>> a = zarr.array(data, chunks=1000, filters=filters)\n >>> a[:]\n array(['female', 'male', 'female', ..., 'male', 'male', 'female'],\n dtype='<U6')\n >>> v = a.view(dtype='u1', filters=[])\n >>> v.is_view\n True\n >>> v[:]\n array([1, 2, 1, ..., 2, 2, 1], dtype=uint8)\n\n Views can be used to modify data:\n\n >>> x = v[:]\n >>> x.sort()\n >>> v[:] = x\n >>> v[:]\n array([1, 1, 1, ..., 2, 2, 2], dtype=uint8)\n >>> a[:]\n array(['female', 'female', 'female', ..., 'male', 'male', 'male'],\n dtype='<U6')\n\n View as a different dtype with the same item size:\n\n >>> data = np.random.randint(0, 2, size=10000, dtype='u1')\n >>> a = zarr.array(data, chunks=1000)\n >>> a[:]\n array([0, 0, 1, ..., 1, 0, 0], dtype=uint8)\n >>> v = a.view(dtype=bool)\n >>> v[:]\n array([False, False, True, ..., True, False, False])\n >>> np.all(a[:].view(dtype=bool) == v[:])\n True\n\n An array can be viewed with a dtype with a different item size, however\n some care is needed to adjust the shape and chunk shape so that chunk\n data is interpreted correctly:\n\n >>> data = np.arange(10000, dtype='u2')\n >>> a = zarr.array(data, chunks=1000)\n >>> a[:10]\n array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint16)\n >>> v = a.view(dtype='u1', shape=20000, chunks=2000)\n >>> v[:10]\n array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0], dtype=uint8)\n >>> np.all(a[:].view('u1') == v[:])\n True\n\n Change fill value for uninitialized chunks:\n\n >>> a = zarr.full(10000, chunks=1000, fill_value=-1, dtype='i1')\n >>> a[:]\n array([-1, -1, -1, ..., -1, -1, -1], dtype=int8)\n >>> v = a.view(fill_value=42)\n >>> v[:]\n array([42, 42, 42, ..., 42, 42, 42], dtype=int8)\n\n Note that resizing or appending to views is not permitted:\n\n >>> a = zarr.empty(10000)\n >>> v = a.view()\n >>> try:\n ... v.resize(20000)\n ... except PermissionError as e:\n ... print(e)\n operation not permitted for views\n\n \"\"\"\n\n store = self._store\n chunk_store = self._chunk_store\n path = self._path\n if read_only is None:\n read_only = self._read_only\n if synchronizer is None:\n synchronizer = self._synchronizer\n a = Array(store=store, path=path, chunk_store=chunk_store, read_only=read_only,\n synchronizer=synchronizer, cache_metadata=True)\n a._is_view = True\n\n # allow override of some properties\n if dtype is None:\n dtype = self._dtype\n else:\n dtype = np.dtype(dtype)\n a._dtype = dtype\n if shape is None:\n shape = self._shape\n else:\n shape = normalize_shape(shape)\n a._shape = shape\n if chunks is not None:\n chunks = normalize_chunks(chunks, shape, dtype.itemsize)\n a._chunks = chunks\n if fill_value is not None:\n a._fill_value = fill_value\n if filters is not None:\n a._filters = filters\n\n return a\n\n def astype(self, dtype):\n \"\"\"Returns a view that does on the fly type conversion of the underlying data.\n\n Parameters\n ----------\n dtype : string or dtype\n NumPy dtype.\n\n Notes\n -----\n This method returns a new Array object which is a view on the same\n underlying chunk data. Modifying any data via the view is currently\n not permitted and will result in an error. This is an experimental\n feature and its behavior is subject to change in the future.\n\n See Also\n --------\n Array.view\n\n Examples\n --------\n\n >>> import zarr\n >>> import numpy as np\n >>> data = np.arange(100, dtype=np.uint8)\n >>> a = zarr.array(data, chunks=10)\n >>> a[:]\n array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,\n 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,\n 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,\n 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,\n 96, 97, 98, 99], dtype=uint8)\n >>> v = a.astype(np.float32)\n >>> v.is_view\n True\n >>> v[:]\n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,\n 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,\n 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,\n 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,\n 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,\n 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,\n 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,\n 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,\n 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,\n 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.],\n dtype=float32)\n \"\"\"\n\n dtype = np.dtype(dtype)\n\n filters = []\n if self._filters:\n filters.extend(self._filters)\n filters.insert(0, AsType(encode_dtype=self._dtype, decode_dtype=dtype))\n\n return self.view(filters=filters, dtype=dtype, read_only=True)\n"
] | [
[
"numpy.empty",
"numpy.squeeze",
"numpy.zeros",
"numpy.dtype",
"numpy.asanyarray",
"numpy.copyto"
]
] |
san99tiago/ML_BASICS | [
"ebd51827f7dd427c848b5c8e1d4bfd017d2fb56f"
] | [
"00_LIBRARIES/00_NUMPY/03_numpy_manipulations.py"
] | [
"# NUMPY MANIPULATIONS OF ARRAYS\n# Santiago Garcia Arango\n# -------------------------------------------------------------------------\nimport numpy as np\n\nmy_array = np.arange(1, 11) # [1,2,..,8,9,10]\nprint(\"my_array=\\n\", my_array, \"\\n\")\n\n\n# -----------------CHECKING CONDITIONS IN ARRAY ITEMS----------------------\n# FIRST WAY...\n# This is how we show boolean result of a desired condition of an array\nboolean_array = my_array > 5\nprint(\"my_array > 5 --> \", boolean_array, \"\\n\")\n# We can take advantage of the boolean_array, by calling the main array...\n# ...\"evaluated\" in the True statements of the boolean_array.\n# This will give us only the original array where the conditions are True\nprint(\"my_array[boolean_array] = \", my_array[boolean_array], \"\\n\")\n\n# SECOND WAY...\n# This previous two step process is usually done in one step!!!\n# Remark: This is the most common way to to this!!!\nprint(\"my_array[my_array>5] = \", my_array[my_array > 5], \"\\n\")\n\n\n# -----------------------CREATE MATRICES EASIER----------------------------\n# Example: create this matrix:\n# 1 1 1 1 1\n# 1 0 0 0 1\n# 1 0 9 0 1\n# 1 0 0 0 1\n# 1 1 1 1 1\n\ncool_matrix = np.ones((5, 5))\ncool_matrix[1:4, 1:4] = 0\ncool_matrix[2, 2] = 9\nprint(\"cool_matrix:\\n\", cool_matrix, \"\\n\")\n"
] | [
[
"numpy.arange",
"numpy.ones"
]
] |
onyalcin/PyMO | [
"1d49620096b7f81b6db0cd4ed427cd7496bd5f99"
] | [
"pymo/viz_tools.py"
] | [
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport IPython\nimport os\n\ndef save_fig(fig_id, tight_layout=True):\n if tight_layout:\n plt.tight_layout()\n plt.savefig(fig_id + '.png', format='png', dpi=300)\n \n \ndef draw_stickfigure(mocap_track, frame, data=None, joints=None, draw_names=False, ax=None, figsize=(8,8)):\n if ax is None:\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n \n if joints is None:\n joints_to_draw = mocap_track.skeleton.keys()\n else:\n joints_to_draw = joints\n \n if data is None:\n df = mocap_track.values\n else:\n df = data\n \n for joint in joints_to_draw:\n ax.scatter(x=df['%s_Xposition'%joint][frame], \n y=df['%s_Yposition'%joint][frame], \n alpha=0.6, c='b', marker='o')\n\n parent_x = df['%s_Xposition'%joint][frame]\n parent_y = df['%s_Yposition'%joint][frame]\n \n children_to_draw = [c for c in mocap_track.skeleton[joint]['children'] if c in joints_to_draw]\n \n for c in children_to_draw:\n child_x = df['%s_Xposition'%c][frame]\n child_y = df['%s_Yposition'%c][frame]\n ax.plot([parent_x, child_x], [parent_y, child_y], 'k-', lw=2)\n \n if draw_names:\n ax.annotate(joint, \n (df['%s_Xposition'%joint][frame] + 0.1, \n df['%s_Yposition'%joint][frame] + 0.1))\n\n return ax\n\ndef draw_stickfigure3d(mocap_track, frame, data=None, joints=None, draw_names=False, ax=None, figsize=(8,8)):\n from mpl_toolkits.mplot3d import Axes3D\n \n if ax is None:\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111, projection='3d') \n \n if joints is None:\n joints_to_draw = mocap_track.skeleton.keys()\n else:\n joints_to_draw = joints\n \n if data is None:\n df = mocap_track.values\n else:\n df = data\n \n for joint in joints_to_draw:\n parent_x = df['%s_Xposition'%joint][frame]\n parent_y = df['%s_Zposition'%joint][frame]\n parent_z = df['%s_Yposition'%joint][frame]\n # ^ In mocaps, Y is the up-right axis \n\n ax.scatter(xs=parent_x, \n ys=parent_y, \n zs=parent_z, \n alpha=0.6, c='b', marker='o')\n\n \n children_to_draw = [c for c in mocap_track.skeleton[joint]['children'] if c in joints_to_draw]\n \n for c in children_to_draw:\n child_x = df['%s_Xposition'%c][frame]\n child_y = df['%s_Zposition'%c][frame]\n child_z = df['%s_Yposition'%c][frame]\n # ^ In mocaps, Y is the up-right axis\n\n ax.plot([parent_x, child_x], [parent_y, child_y], [parent_z, child_z], 'k-', lw=2, c='black')\n \n if draw_names:\n ax.text(x=parent_x + 0.1, \n y=parent_y + 0.1,\n z=parent_z + 0.1,\n s=joint,\n color='rgba(0,0,0,0.9)')\n\n return ax\n\n\ndef sketch_move(mocap_track, data=None, ax=None, figsize=(16,8)):\n if ax is None:\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n \n if data is None:\n data = mocap_track.values\n\n for frame in range(0, data.shape[0], 4):\n# draw_stickfigure(mocap_track, f, data=data, ax=ax)\n \n for joint in mocap_track.skeleton.keys():\n children_to_draw = [c for c in mocap_track.skeleton[joint]['children']]\n \n parent_x = data['%s_Xposition'%joint][frame]\n parent_y = data['%s_Yposition'%joint][frame]\n \n frame_alpha = frame/data.shape[0]\n \n for c in children_to_draw:\n child_x = data['%s_Xposition'%c][frame]\n child_y = data['%s_Yposition'%c][frame]\n \n ax.plot([parent_x, child_x], [parent_y, child_y], '-', lw=1, color='gray', alpha=frame_alpha)\n\n\n\ndef viz_cnn_filter(feature_to_viz, mocap_track, data, gap=25):\n fig = plt.figure(figsize=(16,4))\n ax = plt.subplot2grid((1,8),(0,0))\n ax.imshow(feature_to_viz.T, aspect='auto', interpolation='nearest')\n \n ax = plt.subplot2grid((1,8),(0,1), colspan=7)\n for frame in range(feature_to_viz.shape[0]):\n frame_alpha = 0.2#frame/data.shape[0] * 2 + 0.2\n\n for joint_i, joint in enumerate(mocap_track.skeleton.keys()):\n children_to_draw = [c for c in mocap_track.skeleton[joint]['children']]\n\n parent_x = data['%s_Xposition'%joint][frame] + frame * gap\n parent_y = data['%s_Yposition'%joint][frame] \n\n ax.scatter(x=parent_x, \n y=parent_y, \n alpha=0.6,\n cmap='RdBu',\n c=feature_to_viz[frame][joint_i] * 10000,\n marker='o',\n s = abs(feature_to_viz[frame][joint_i] * 10000))\n plt.axis('off')\n for c in children_to_draw:\n child_x = data['%s_Xposition'%c][frame] + frame * gap\n child_y = data['%s_Yposition'%c][frame] \n\n ax.plot([parent_x, child_x], [parent_y, child_y], '-', lw=1, color='gray', alpha=frame_alpha)\n\n \ndef print_skel(X):\n stack = [X.root_name]\n tab=0\n while stack:\n joint = stack.pop()\n tab = len(stack)\n print('%s- %s (%s)'%('| '*tab, joint, X.skeleton[joint]['parent']))\n for c in X.skeleton[joint]['children']:\n stack.append(c)\n\n\ndef nb_play_mocap_fromurl(mocap, mf, frame_time=1/30, scale=1, base_url='http://titan:8385'):\n if mf == 'bvh':\n bw = BVHWriter()\n with open('test.bvh', 'w') as ofile:\n bw.write(mocap, ofile)\n \n filepath = '../notebooks/test.bvh'\n elif mf == 'pos':\n c = list(mocap.values.columns)\n\n for cc in c:\n if 'rotation' in cc:\n c.remove(cc)\n mocap.values.to_csv('test.csv', index=False, columns=c)\n \n filepath = '../notebooks/test.csv'\n else:\n return\n \n url = '%s/mocapplayer/player.html?data_url=%s&scale=%f&cz=200&order=xzyi&frame_time=%f'%(base_url, filepath, scale, frame_time)\n iframe = '<iframe src=' + url + ' width=\"100%\" height=500></iframe>'\n link = '<a href=%s target=\"_blank\">New Window</a>'%url\n return IPython.display.HTML(iframe+link)\n\ndef nb_play_mocap(mocap, mf, meta=None, frame_time=1/30, scale=1, camera_z=500, base_url=None):\n data_template = 'var dataBuffer = `$$DATA$$`;'\n data_template += 'var metadata = $$META$$;'\n data_template += 'start(dataBuffer, metadata, $$CZ$$, $$SCALE$$, $$FRAMETIME$$);'\n dir_path = os.path.dirname(os.path.realpath(__file__))\n\n\n if base_url is None:\n base_url = os.path.join(dir_path, 'mocapplayer/playBuffer.html')\n \n # print(dir_path)\n\n if mf == 'bvh':\n pass\n elif mf == 'pos':\n cols = list(mocap.values.columns)\n for c in cols:\n if 'rotation' in c:\n cols.remove(c)\n \n data_csv = mocap.values.to_csv(index=False, columns=cols)\n\n if meta is not None:\n lines = [','.join(item) for item in meta.astype('str')]\n meta_csv = '[' + ','.join('[%s]'%l for l in lines) +']' \n else:\n meta_csv = '[]'\n \n data_assigned = data_template.replace('$$DATA$$', data_csv)\n data_assigned = data_assigned.replace('$$META$$', meta_csv)\n data_assigned = data_assigned.replace('$$CZ$$', str(camera_z))\n data_assigned = data_assigned.replace('$$SCALE$$', str(scale))\n data_assigned = data_assigned.replace('$$FRAMETIME$$', str(frame_time))\n\n else:\n return\n \n \n\n with open(os.path.join(dir_path, 'mocapplayer/data.js'), 'w') as oFile:\n oFile.write(data_assigned)\n\n url = '%s?&cz=200&order=xzyi&frame_time=%f&scale=%f'%(base_url, frame_time, scale)\n iframe = '<iframe frameborder=\"0\" src=' + url + ' width=\"100%\" height=500></iframe>'\n link = '<a href=%s target=\"_blank\">New Window</a>'%url\n return IPython.display.HTML(iframe+link)\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot2grid"
]
] |
SunLoveSheep/Sem-LSD | [
"8c085217c372588fbb9ca37c5aef32d66270560f"
] | [
"src/_analyze_semantic_line.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\n# /workspace/tangyang.sy/pytorch_CV/pytorch_CenterNet/src\n\ntry:\n from utils_ctdet.lineNMS import do_nms_line, do_acl_line, do_acl_line_v1\nexcept ImportError:\n from utils.lineNMS import do_nms_line, do_acl_line, do_acl_line_v1\n\nfrom collections import OrderedDict\nimport numpy as np\nimport os\nimport xml.etree.ElementTree as ET\n\nLABEL_MAP_KAIST = [\n 'building', 'ground_building', 'wall', 'ground_wall',\n 'grass', 'fence', 'ground_fence', 'pole', 'curb', 'sign', 'tree', 'window', 'door', 'bridge'\n]\n\nLABEL2SKIP = [\n 'sign', 'window', 'tree',\n]\n\nnms_func = {\n 'acl': do_acl_line,\n 'acl_v1': do_acl_line_v1,\n 'iou': do_nms_line,\n}\n\nMETRIC = 'acl'\nMETRIC_EVAL = 'acl'\nIF_CORR_FN = True\nMETRIC_THRESH = 0.9\nDEFAULT_DATASET = 'OBJLINE_KAIST'\n\n# mAP metrics:\nIF_mAP = True\nmAP_score = 0.5\nLV_IOU = [0.5, 0.75, 0.9] if not IF_mAP else [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]\n# LV_IOU = [0.5, 0.75, 0.9] if not IF_mAP else [0.5, 0.9]\nmAP_SUB = ['building', 'pole', 'curb']\n\nLV_ACL = LV_IOU\nLV_SCORE = [0.25, 0.5, 0.6, 0.7, 0.8, 0.9]\nROC_score = 0.25\nif mAP_score not in LV_SCORE:\n LV_SCORE.append(mAP_score)\n\nroot = '/workspace/tangyang.sy/pytorch_CV/test_imgs/KAIST_5seqs_20200214/'\n\nMODEL = 'best'\n# EXP_ID = 'kaist_5seqs_ResNet18_GradMagLoss_NegOnly_20200224_ep{}_vis0.25'.format(MODEL)\nEXP_ID = 'kaist_5seqs_ResNet18_20200224_ep{}_vis0.25'.format(MODEL)\n\nSRC_GT = root + 'Annotations/'\n\nDATASET_MAP = {\n 'KAIST': LABEL_MAP_KAIST,\n}\n\nDATASET = 'KAIST'\nLABEL_MAP = DATASET_MAP[DATASET]\nLABEL_MAP.append('lineseg') # For LSD and AFM results.\n\nSRC_PRED = root + 'Preds_{}/'.format(EXP_ID)\nPTH_EXT = '' if not IF_CORR_FN else '_corrFN'\nRECORD_PTH = root + 'summary{}/'.format(PTH_EXT)\nif not os.path.exists(RECORD_PTH):\n os.makedirs(RECORD_PTH)\nif len(LABEL2SKIP) == 0:\n RECORD_TXT = RECORD_PTH + 'res_id-{}_metric-{}-{}.txt'.format(EXP_ID, METRIC, METRIC_THRESH)\n RECORD_perCate_TXT = RECORD_PTH + 'res_perCate_id-{}_metric-{}-{}.txt'.format(EXP_ID, METRIC, METRIC_THRESH)\nelse:\n RECORD_TXT = RECORD_PTH + 'res_id-{}_metric-{}-{}-skip{}.txt'.format(EXP_ID, METRIC, METRIC_THRESH, len(LABEL2SKIP))\n RECORD_perCate_TXT = RECORD_PTH + 'res_perCate_id-{}_metric-{}-{}-skip{}.txt'.format(EXP_ID, METRIC, METRIC_THRESH,\n len(LABEL2SKIP))\n\nlst_gt = [x for x in sorted(os.listdir(SRC_GT)) if '.xml' in x]\nlst_pred = [x for x in sorted(os.listdir(SRC_PRED)) if '.xml' in x]\n\nPI = 3.14159265359\n\n\nclass LineSeg:\n x_left = 0 # x_left\n y_left = 0 # y_left\n x_right = 0 # x_right\n y_right = 0 # y_right\n cate = 'null'\n score = 0.0\n direction = None\n angle = 0.0 # angle against positive x-axis\n length = 0.0\n\n def __init__(self, x_left_in, y_left_in, x_right_in, y_right_in,\n cate_in, score_in, direct_in):\n self.x_left = x_left_in\n self.y_left = y_left_in\n self.x_right = x_right_in\n self.y_right = y_right_in\n self.cate = cate_in\n self.score = score_in\n self.direction = direct_in\n self.angle = self._cal_ang()\n\n def _cal_ang(self):\n ang_res = 0.\n return ang_res\n\n\ndef _fast_reject(line1, line2):\n # Fast reject:\n if line1.x_right < line2.x_left or line1.x_left > line2.x_right:\n return True\n line1_ymin = min(line1.y_left, line1.y_right)\n line1_ymax = max(line1.y_left, line1.y_right)\n line2_ymin = min(line2.y_left, line2.y_right)\n line2_ymax = max(line2.y_left, line2.y_right)\n if line1_ymin > line2_ymax or line1_ymax < line2_ymin:\n return True\n return False\n\n\ndef _cal_iou_line(line1, line2):\n # if _fast_reject(line1, line2):\n # return 0.\n\n # if line1.direction != line2.direction:\n # return 0.\n\n line1_xmin = min(line1.x_left, line1.x_right)\n line1_ymin = min(line1.y_left, line1.y_right)\n line1_xmax = max(line1.x_left, line1.x_right)\n line1_ymax = max(line1.y_left, line1.y_right)\n\n line2_xmin = min(line2.x_left, line2.x_right)\n line2_ymin = min(line2.y_left, line2.y_right)\n line2_xmax = max(line2.x_left, line2.x_right)\n line2_ymax = max(line2.y_left, line2.y_right)\n\n inter_xmin = max(line1_xmin, line2_xmin)\n inter_ymin = max(line1_ymin, line2_ymin)\n inter_xmax = min(line1_xmax, line2_xmax)\n inter_ymax = min(line1_ymax, line2_ymax)\n inter_x = max(0, inter_xmax - inter_xmin)\n inter_y = max(0, inter_ymax - inter_ymin)\n area_inter = inter_x * inter_y\n\n union_xmin = min(line1_xmin, line2_xmin)\n union_ymin = min(line1_ymin, line2_ymin)\n union_xmax = max(line1_xmax, line2_xmax)\n union_ymax = max(line1_ymax, line2_ymax)\n\n union_x = 1. if union_xmax == union_xmin else union_xmax - union_xmin\n union_y = 1. if union_ymax == union_ymin else union_ymax - union_ymin\n area_union = union_x * union_y\n\n iou = area_inter / area_union\n\n return iou\n\n\n# line1 should be ground truth, if available\ndef _cal_acl_line(line1, line2):\n # if _fast_reject(line1, line2):\n # return 0.\n # if line1.direction != line2.direction:\n # return 0.\n\n sum1_x = line1.x_left + line1.x_right\n sum1_y = line1.y_left + line1.y_right\n c1_x = sum1_x / 2\n c1_y = sum1_y / 2\n l1_wr = np.sqrt(sum1_x * sum1_x + sum1_y * sum1_y)\n l1_x = line1.x_right - line1.x_left\n l1_y = line1.y_right - line1.y_left\n l1 = np.sqrt(l1_x * l1_x + l1_y * l1_y)\n alpha1 = (line1.y_right - line1.y_left) / (line1.x_right - line1.x_left) if \\\n line1.x_right != line1.x_left else (line1.y_right - line1.y_left) / 1.0\n alpha1 = np.abs(np.arctan(alpha1) * 180 / PI)\n\n sum2_x = line2.x_left + line2.x_right\n sum2_y = line2.y_left + line2.y_right\n c2_x = sum2_x / 2\n c2_y = sum2_y / 2\n l2_wr = np.sqrt(sum2_x * sum2_x + sum2_y * sum2_y)\n l2_x = line2.x_right - line2.x_left\n l2_y = line2.y_right - line2.y_left\n l2 = np.sqrt(l2_x * l2_x + l2_y * l2_y)\n alpha2 = (line2.y_right - line2.y_left) / (line2.x_right - line2.x_left) if \\\n line2.x_right != line2.x_left else (line2.y_right - line2.y_left) / 1.0\n alpha2 = np.abs(np.arctan(alpha2) * 180 / PI)\n\n sim_a = max(0, 1 - (np.abs(alpha1 - alpha2) * 0.0111111111111))\n sim_c = max(0, 1 - (np.sqrt((c2_x - c1_x) * (c2_x - c1_x) + (c2_y - c1_y) * (c2_y - c1_y)))\n / (l1 * 0.5))\n sim_l = max(0, 1 - np.abs(l1 - l2) / l1)\n sim_l_wr = max(0, 1 - np.abs(l1_wr - l2_wr) / l1_wr)\n #print(\"sim l: {:.3f} | sim l wr: {:.3f} | line1: {},{},{},{} | line2: {},{},{},{}\".format(\n # sim_l, sim_l_wr,\n # line1.x_left, line1.y_left, line1.x_right, line1.y_right,\n # line2.x_left, line2.y_left, line1.x_right, line2.y_right\n #))\n acl = sim_a * sim_c * sim_l\n\n return acl\n\n\ndef _gaussian_radius(height, width, min_overlap=0.7):\n # a3 = 4 * min_overlap\n # b3 = -2 * min_overlap * (height + width)\n # c3 = (min_overlap - 1) * width * height\n # sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)\n # r3 = (b3 + sq3) / 2\n a1 = 1\n b1 = (height + width)\n c1 = width * height * (1 - min_overlap) / (1 + min_overlap)\n sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)\n r1 = (b1 + sq1) / 2\n return r1\n\n\ndef _cal_acl_line_v1(line1, line2):\n # if _fast_reject(line1, line2):\n # return 0.\n\n # Vectorize to calculate cosine angle\n v_x1 = line1.x_right - line1.x_left\n v_y1 = line1.y_right - line1.y_left\n v_x2 = line2.x_right - line2.x_left\n v_y2 = line2.y_right - line2.y_left\n l1 = np.sqrt(v_x1 * v_x1 + v_y1 * v_y1)\n l2 = np.sqrt(v_x2 * v_x2 + v_y2 * v_y2)\n cos_a = (v_x1 * v_x2 + v_y1 * v_y2) / (l1 * l2)\n\n # Gaussian distribution to get score of center point\n radius = _gaussian_radius(v_y1, v_x1)\n sigma = (2 * radius - 1)/6\n sum1_x = line1.x_left + line1.x_right\n sum1_y = line1.y_left + line1.y_right\n c1_x = sum1_x / 2\n c1_y = sum1_y / 2\n sum2_x = line2.x_left + line2.x_right\n sum2_y = line2.y_left + line2.y_right\n c2_x = sum2_x / 2\n c2_y = sum2_y / 2\n d_x = c2_x - c1_x\n d_y = c2_y - c1_y\n c_score = np.exp(-(d_x * d_x + d_y * d_y) / (2 * sigma * sigma))\n\n sim_a = cos_a\n sim_c = c_score\n sim_l = max(0, 1 - np.abs(l1 - l2) / l1)\n\n return sim_a * sim_c * sim_l\n\n\nmetric_func = {\n 'acl': _cal_acl_line,\n 'acl_v1': _cal_acl_line_v1,\n 'iou': _cal_iou_line,\n}\n\n\ndef reverse_direct(lines):\n lines_out = lines.copy()\n for line in lines_out:\n line.direction = 1 - line.direction\n y_tmp = line.y_left\n line.y_left = line.y_right\n line.y_right = y_tmp\n return lines_out\n\n\ndef get_lines_from_xml(xml_file, if_gt=False):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n\n lst_lines = list()\n for obj in root.findall('object'):\n objline = obj.find('bndbox')\n\n name = obj.find('name').text\n if name is None:\n continue\n name = name.lower()\n score = obj.find('score').text if not if_gt else 1.0\n try:\n direct_str = obj.find('direction').text\n direction = 1.0 if direct_str == 'lt2rb' else 0.0\n except AttributeError:\n direction = 2.0\n\n # The followings are actually already converted to x_left, y_left, x_right, and y_right\n xmin = int(float(objline.find('xmin').text))\n ymin = int(float(objline.find('ymin').text))\n xmax = int(float(objline.find('xmax').text))\n ymax = int(float(objline.find('ymax').text))\n if direction == 1.0:\n x_left = min(xmin, xmax)\n y_left = min(ymin, ymax)\n x_right = max(xmin, xmax)\n y_right = max(ymin, ymax)\n elif direction == 0.0:\n x_left = min(xmin, xmax)\n y_left = max(ymin, ymax)\n x_right = max(xmin, xmax)\n y_right = min(ymin, ymax)\n else: # direction == 2.0, bbox\n x_left = xmin\n y_left = ymin\n x_right = xmax\n y_right = ymax\n\n # if not if_gt:\n # direction = obj.find('direction').text\n # else:\n # direct_str = 'lb2rt' if x_left < x_right and y_left < y_right else 'lt2rb'\n # direction = 1.0 if direct_str == 'lt2rb' else 0.0\n\n # line = [xmin, ymin, xmax, ymax, name, score, direction]\n line = LineSeg(\n x_left_in=int(x_left),\n y_left_in=int(y_left),\n x_right_in=int(x_right),\n y_right_in=int(y_right),\n cate_in=name,\n score_in=float(score),\n direct_in=float(direction)\n )\n lst_lines.append(line)\n\n return lst_lines\n\n\n# given ground truth objects and predicted objects.\n# given 3M/5M mask and levels of IoU and confidence to test\n# return resultant number of precision / recall objects\n# TP (True Positive): predict the box with correct category\n# TN (True Negative): predict the box with wrong category\n# FP (False Positive): predict a box when there is no box\n# FN (False Negative): predict no box when there is a box\n# Output dictionary format:\n# IoU_thres_1 -\n# Score_thres_1 - TP, FP, FN, TN\n# Score_thres_2 - TP, FP, FN, TN\n# ...\n# Score_thres_N - TP, FP, FN, TN\n# IoU_thres_2 -\n# Score_thres_1 - TP, FP, FN, TN\n# Score_thres_2 - TP, FP, FN, TN\n# ...\n# Score_thres_N - TP, FP, FN, TN\n# ...\n# IoU_thres_M -\n# Score_thres_1 - TP, FP, FN, TN\n# Score_thres_2 - TP, FP, FN, TN\n# ...\n# Score_thres_N - TP, FP, FN, TN\ndef cal_metric_res(\n lines_gt_in,\n lines_pred_in,\n metric='acl'\n):\n # initiate output dict\n dict_res = OrderedDict((key_iou, 0) for key_iou in LV_IOU)\n for key_iou in dict_res.keys():\n dict_res[key_iou] = OrderedDict((key_score, 0) for key_score in LV_SCORE)\n for key_score in dict_res[key_iou].keys():\n dict_res[key_iou][key_score] = {\n 'TP': 0,\n 'TN': 0,\n 'FP': 0,\n 'FN': 0,\n }\n\n # initiate output dict with Cate level:\n dict_res_perCate = OrderedDict((key, 0) for key in LABEL_MAP if key not in LABEL2SKIP)\n for key_cate in dict_res_perCate.keys():\n dict_res_perCate[key_cate] = OrderedDict((key_iou, 0) for key_iou in LV_IOU)\n for key_iou in dict_res_perCate[key_cate].keys():\n dict_res_perCate[key_cate][key_iou] = OrderedDict((key_score, 0) for key_score in LV_SCORE)\n for key_score in dict_res_perCate[key_cate][key_iou].keys():\n dict_res_perCate[key_cate][key_iou][key_score] = {\n 'TP': 0,\n 'TN': 0,\n 'FP': 0,\n 'FN': 0,\n }\n\n # ------ loop through ground truth bboxes to find FN (missed bboxes) ------\n for line in lines_gt_in:\n # Each gt should be counted as TP or FN, this is to record if it has been recorded\n if_recorded = 0\n\n # Reivse some typo in prediction cates\n line.cate = 'ground_building' if line.cate == 'ground building' else line.cate\n\n if line.cate in LABEL2SKIP or line.cate not in LABEL_MAP:\n # print(\"Found invalid line type: {}, skip.\".format(line.cate))\n continue\n # compare bbox with all ground truth bboxes\n # to find the one with largest IoU:\n max_iou = 0\n if_cate_right = False\n score_at_max_iou = 0.\n for line_pred in lines_pred_in:\n # modified to support no-semantic line segments from LSD:\n if (line_pred.cate not in LABEL_MAP and line_pred.cate != 'lineseg')\\\n or (line_pred.cate in LABEL2SKIP):\n continue\n\n if line.cate in LABEL_MAP_OBJ_KAIST and line_pred.cate in LABEL_MAP_OBJ_KAIST:\n iou = _cal_iou_line(line, line_pred)\n else:\n iou = metric_func[metric](line, line_pred)\n # print(iou)\n # iou = _cal_acl_line(line, line_pred) if metric == 'acl' else _cal_iou_line(line, line_pred)\n # iou_test = _cal_acl_line_v1(line, line_pred)\n # print(iou_test)\n if iou > max_iou:\n max_iou = iou\n score_at_max_iou = line_pred.score\n if line.cate == line_pred.cate:\n if_cate_right = True\n else:\n if_cate_right = False if line_pred.cate != 'lineseg' else True\n\n # arrange output according to levels of IoU and levels of score to check:\n for lv_iou in LV_IOU:\n # if IoU larger than current IoU threshold:\n if max_iou > lv_iou:\n for lv_score in LV_SCORE:\n # if score larger than current score threshold:\n if score_at_max_iou > lv_score:\n if if_cate_right:\n dict_res[lv_iou][lv_score]['TP'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['TP'] += 1\n else:\n # FP increase by 1, since this pred has no gt (diff cate)\n dict_res[lv_iou][lv_score]['FP'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FP'] += 1\n\n # FN should also be increased by 1, since this gt has no pred (diff cate)\n dict_res[lv_iou][lv_score]['FN'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FN'] += 1\n else:\n dict_res[lv_iou][lv_score]['FN'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FN'] += 1\n else:\n for lv_score in LV_SCORE:\n dict_res[lv_iou][lv_score]['FN'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FN'] += 1\n # -----------------------------------------------------------------------------\n\n # ------ loop through predicted bboxes to find FP (extra bboxes) ------\n for line in lines_pred_in:\n # modified to support no-semantic line segments from LSD:\n if (line.cate not in LABEL_MAP and line.cate != 'lineseg')\\\n or (line.cate in LABEL2SKIP):\n continue\n\n # compare bbox with all ground truth bboxes\n # to find the one with largest IoU:\n max_iou = 0\n if_cate_right = False\n for line_gt in lines_gt_in:\n if line_gt.cate in LABEL2SKIP:\n continue\n iou = metric_func[metric](line_gt, line)\n # iou = _cal_acl_line(line_gt, line) if metric == 'acl' else _cal_iou_line(line_gt, line)\n if iou > max_iou:\n max_iou = iou\n if line.cate == line_gt.cate:\n if_cate_right = True\n else:\n if_cate_right = False\n\n # arrange output according to levels of IoU and levels of score to check:\n score_at_max_iou = line.score\n for lv_iou in LV_IOU:\n # if IoU smaller than current IoU threshold:\n if max_iou < lv_iou:\n for lv_score in LV_SCORE:\n if score_at_max_iou > lv_score:\n dict_res[lv_iou][lv_score]['FP'] += 1\n if line.cate != 'lineseg':\n dict_res_perCate[line.cate][lv_iou][lv_score]['FP'] += 1\n # ---------------------------------------------------------------------\n\n return dict_res, dict_res_perCate\n\n\ndef cal_metric_res_AngLen(\n lines_gt_in,\n lines_pred_in,\n metric='acl'\n):\n # initiate output dict\n dict_res = OrderedDict((key_iou, 0) for key_iou in LV_IOU)\n for key_iou in dict_res.keys():\n dict_res[key_iou] = OrderedDict((key_score, 0) for key_score in LV_SCORE)\n for key_score in dict_res[key_iou].keys():\n dict_res[key_iou][key_score] = {\n 'TP': 0,\n 'TN': 0,\n 'FP': 0,\n 'FN': 0,\n }\n\n # initiate output dict with Cate level:\n dict_res_perCate = OrderedDict((key, 0) for key in LABEL_MAP if key not in LABEL2SKIP)\n for key_cate in dict_res_perCate.keys():\n dict_res_perCate[key_cate] = OrderedDict((key_iou, 0) for key_iou in LV_IOU)\n for key_iou in dict_res_perCate[key_cate].keys():\n dict_res_perCate[key_cate][key_iou] = OrderedDict((key_score, 0) for key_score in LV_SCORE)\n for key_score in dict_res_perCate[key_cate][key_iou].keys():\n dict_res_perCate[key_cate][key_iou][key_score] = {\n 'TP': 0,\n 'TN': 0,\n 'FP': 0,\n 'FN': 0,\n }\n\n # ------ loop through ground truth bboxes to find FN (missed bboxes) ------\n for line in lines_gt_in:\n # Each gt should be counted as TP or FN, this is to record if it has been recorded\n if_recorded = 0\n\n # Reivse some typo in prediction cates\n line.cate = 'ground_building' if line.cate == 'ground building' else line.cate\n\n if line.cate in LABEL2SKIP or line.cate not in LABEL_MAP:\n print(\"Found invalid line type: {}, skip.\".format(line.cate))\n continue\n # compare bbox with all ground truth bboxes\n # to find the one with largest IoU:\n max_iou = 0\n if_cate_right = False\n score_at_max_iou = 0.\n for line_pred in lines_pred_in:\n # modified to support no-semantic line segments from LSD:\n if (line_pred.cate not in LABEL_MAP and line_pred.cate != 'lineseg')\\\n or (line_pred.cate in LABEL2SKIP):\n continue\n\n if line.cate in LABEL_MAP_OBJ_KAIST and line_pred.cate in LABEL_MAP_OBJ_KAIST:\n iou = _cal_iou_line(line, line_pred)\n else:\n iou = metric_func[metric](line, line_pred)\n # print(iou)\n # iou = _cal_acl_line(line, line_pred) if metric == 'acl' else _cal_iou_line(line, line_pred)\n # iou_test = _cal_acl_line_v1(line, line_pred)\n # print(iou_test)\n if iou > max_iou:\n max_iou = iou\n score_at_max_iou = line_pred.score\n if line.cate == line_pred.cate:\n if_cate_right = True\n else:\n if_cate_right = False if line_pred.cate != 'lineseg' else True\n\n # arrange output according to levels of IoU and levels of score to check:\n for lv_iou in LV_IOU:\n # if IoU larger than current IoU threshold:\n if max_iou > lv_iou:\n for lv_score in LV_SCORE:\n # if score larger than current score threshold:\n if score_at_max_iou > lv_score:\n if if_cate_right:\n dict_res[lv_iou][lv_score]['TP'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['TP'] += 1\n else:\n # FP increase by 1, since this pred has no gt (diff cate)\n dict_res[lv_iou][lv_score]['FP'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FP'] += 1\n\n # FN should also be increased by 1, since this gt has no pred (diff cate)\n dict_res[lv_iou][lv_score]['FN'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FN'] += 1\n else:\n dict_res[lv_iou][lv_score]['FN'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FN'] += 1\n else:\n for lv_score in LV_SCORE:\n dict_res[lv_iou][lv_score]['FN'] += 1\n dict_res_perCate[line.cate][lv_iou][lv_score]['FN'] += 1\n # -----------------------------------------------------------------------------\n\n # ------ loop through predicted bboxes to find FP (extra bboxes) ------\n for line in lines_pred_in:\n # modified to support no-semantic line segments from LSD:\n if (line.cate not in LABEL_MAP and line.cate != 'lineseg')\\\n or (line.cate in LABEL2SKIP):\n continue\n\n # compare bbox with all ground truth bboxes\n # to find the one with largest IoU:\n max_iou = 0\n if_cate_right = False\n for line_gt in lines_gt_in:\n if line_gt.cate in LABEL2SKIP:\n continue\n iou = metric_func[metric](line_gt, line)\n # iou = _cal_acl_line(line_gt, line) if metric == 'acl' else _cal_iou_line(line_gt, line)\n if iou > max_iou:\n max_iou = iou\n if line.cate == line_gt.cate:\n if_cate_right = True\n else:\n if_cate_right = False\n\n # arrange output according to levels of IoU and levels of score to check:\n score_at_max_iou = line.score\n for lv_iou in LV_IOU:\n # if IoU smaller than current IoU threshold:\n if max_iou < lv_iou:\n for lv_score in LV_SCORE:\n if score_at_max_iou > lv_score:\n dict_res[lv_iou][lv_score]['FP'] += 1\n if line.cate != 'lineseg':\n dict_res_perCate[line.cate][lv_iou][lv_score]['FP'] += 1\n # ---------------------------------------------------------------------\n\n return dict_res, dict_res_perCate\n\n\ndef cal_mAP(dict_res_perCate_in):\n mAP = 0.\n mAP3 = 0.\n cnt_nonzero = 0\n for cate, sub_dict in dict_res_perCate_in.items():\n print(\"cate: \", cate)\n cate_AP = 0.\n cate_AP3 = 0.\n if_has_gt = False # to check if there exist gt of this category\n for lv_iou in LV_IOU:\n TP = dict_res_perCate_in[cate][lv_iou][mAP_score]['TP']\n FP = dict_res_perCate_in[cate][lv_iou][mAP_score]['FP']\n FN = dict_res_perCate_in[cate][lv_iou][mAP_score]['FN']\n AP = TP / (TP + FP) if TP + FP > 0 else 0\n cate_AP += AP\n if cate in mAP_SUB:\n cate_AP3 += AP\n if_has_gt = True if TP + FN > 0 else False\n\n print(\"lv IoU: {} | TP: {} | FP: {} | FN : {} | AP: {} |\".format(\n lv_iou, TP, FP, FN, AP\n ))\n\n cate_mAP = cate_AP/len(LV_IOU)\n cate_mAP3 = cate_AP3/len(LV_IOU)\n print(\"Category {} | AP {}\".format(cate, cate_mAP))\n mAP += cate_mAP\n mAP3 += cate_mAP3\n if if_has_gt:\n cnt_nonzero += 1\n # print(\"... category {} has gt...\".format(cate))\n\n print(\"# of None zero AP categories: \", cnt_nonzero)\n mAP = mAP / cnt_nonzero\n mAP3 = mAP3 / len(mAP_SUB)\n print(\"mAP @ score {} : \".format(mAP_score), mAP)\n print(\"mAP{} @ score {} : \".format(len(mAP_SUB), mAP_score), mAP3)\n return mAP, mAP3\n\n\ndef cal_mAP_mAR_F1(dict_res_perCate_in):\n mAP = 0.\n mAP3 = 0.\n mAR = 0.\n mAR3 = 0.\n mF1 = 0.\n mF13 = 0.\n cnt_nonzero = 0\n for cate, sub_dict in dict_res_perCate_in.items():\n print(\"cate: \", cate)\n cate_AP = 0.\n cate_AP3 = 0.\n cate_AR = 0.\n cate_AR3 = 0.\n cate_F1 = 0.\n cate_F13 = 0.\n if_has_gt = False # to check if there exist gt of this category\n for lv_iou in LV_IOU:\n TP = dict_res_perCate_in[cate][lv_iou][mAP_score]['TP']\n FP = dict_res_perCate_in[cate][lv_iou][mAP_score]['FP']\n FN = dict_res_perCate_in[cate][lv_iou][mAP_score]['FN']\n AP = TP / (TP + FP) if TP + FP > 0 else 0\n AR = TP / (TP + FN) if TP + FN > 0 else 0\n F1 = AP * AR / (AP + AR) if AP > 0 and AR > 0 else 0\n cate_AP += AP\n cate_AR += AR\n cate_F1 += F1\n if cate in mAP_SUB:\n cate_AP3 += AP\n cate_AR3 += AR\n cate_F13 += F1\n if_has_gt = True if TP + FN > 0 else False\n\n print(\"lv IoU: {} | TP: {} | FP: {} | FN : {} | AP: {} |\".format(\n lv_iou, TP, FP, FN, AP\n ))\n\n cate_mAP = cate_AP / len(LV_IOU)\n cate_mAP3 = cate_AP3 / len(LV_IOU)\n cate_mAR = cate_AR / len(LV_IOU)\n cate_mAR3 = cate_AR3 / len(LV_IOU)\n cate_F1 = cate_F1 / len(LV_IOU)\n cate_F13 = cate_F13 / len(LV_IOU)\n print(\"Category {} | AP {} | AR {} | F1 {}\".format(cate, cate_mAP, cate_mAR, cate_F1))\n mAP += cate_mAP\n mAP3 += cate_mAP3\n mAR += cate_mAR\n mAR3 += cate_mAR3\n mF1 += cate_F1\n mF13 += cate_F13\n if if_has_gt:\n cnt_nonzero += 1\n # print(\"... category {} has gt...\".format(cate))\n\n print(\"# of None zero AP categories: \", cnt_nonzero)\n mAP = mAP / cnt_nonzero\n mAP3 = mAP3 / len(mAP_SUB)\n mAR = mAR / cnt_nonzero\n mAR3 = mAR3 / len(mAP_SUB)\n mF1 = mF1 / cnt_nonzero\n mF13 = mF13 / len(mAP_SUB)\n print(\"mAP @ score {} : \".format(mAP_score), mAP)\n print(\"mAP{} @ score {} : \".format(len(mAP_SUB), mAP_score), mAP3)\n print(\"mAR @ score {} : \".format(mAP_score), mAR)\n print(\"mAR{} @ score {} : \".format(len(mAP_SUB), mAP_score), mAR3)\n print(\"F1 @ score {} : \".format(mAP_score), mF1)\n print(\"F1{} @ score {} : \".format(len(mAP_SUB), mAP_score), mF13)\n return mAP, mAP3\n\n\ndef performLineNMS(lineSegs_in):\n lst_line = [None] * len(lineSegs_in)\n for l, lineseg in enumerate(lineSegs_in):\n line = [lineseg.x_left, lineseg.y_left, lineseg.x_right, lineseg.y_right,\n lineseg.cate, lineseg.score, lineseg.direction]\n lst_line[l] = line\n dict_line_out = nms_func[METRIC](lst_line, thres_in=METRIC_THRESH)\n # dict_line_out = do_acl_line(lst_line, thres_in=METRIC_THRESH) if METRIC == 'acl' else \\\n # do_nms_line(lst_line, thres_in=METRIC_THRESH)\n\n lineSegs_out = []\n for cate, lines in dict_line_out.items():\n if lines is None:\n continue\n for line in lines:\n lineSeg_res = LineSeg(\n x_left_in=int(line[0]),\n y_left_in=int(line[1]),\n x_right_in=int(line[2]),\n y_right_in=int(line[3]),\n cate_in=cate,\n score_in=float(line[4]),\n direct_in=float(line[5])\n )\n lineSegs_out.append(lineSeg_res)\n\n return lineSegs_out\n\n\ndef eval():\n # initiate total result dict\n dict_total = OrderedDict((key_iou, 0) for key_iou in LV_IOU)\n for key_iou in dict_total.keys():\n dict_total[key_iou] = OrderedDict((key_score, 0) for key_score in LV_SCORE)\n for key_score in dict_total[key_iou].keys():\n dict_total[key_iou][key_score] = {\n 'TP': 0,\n 'TN': 0,\n 'FP': 0,\n 'FN': 0,\n }\n # initiate total result dict with Cate level:\n dict_total_perCate = OrderedDict((key, 0) for key in LABEL_MAP if key not in LABEL2SKIP)\n for key_cate in dict_total_perCate.keys():\n dict_total_perCate[key_cate] = OrderedDict((key_iou, 0) for key_iou in LV_IOU)\n for key_iou in dict_total_perCate[key_cate].keys():\n dict_total_perCate[key_cate][key_iou] = OrderedDict((key_score, 0) for key_score in LV_SCORE)\n for key_score in dict_total_perCate[key_cate][key_iou].keys():\n dict_total_perCate[key_cate][key_iou][key_score] = {\n 'TP': 0,\n 'TN': 0,\n 'FP': 0,\n 'FN': 0,\n }\n\n cnt_file = 0\n cnt_gt = 0\n for file in lst_gt:\n if 'TZKJ' in SRC_PRED:\n lines_pred = get_lines_from_xml(SRC_PRED + file)\n else:\n if file not in lst_pred:\n tmpfile = file.replace('val_semantic_line', '')\n if tmpfile not in lst_pred:\n print(\"{} not found in prediction! Empty prediction...\".format(file))\n lines_pred = list()\n else:\n lines_pred = get_lines_from_xml(SRC_PRED + tmpfile)\n else:\n lines_pred = get_lines_from_xml(SRC_PRED + file)\n\n lines_pred = performLineNMS(lines_pred) # Line ACL\n lines_gt = get_lines_from_xml(SRC_GT + file, if_gt=True)\n cnt_gt += len(lines_gt)\n\n # metric_res, metric_res_perCate = cal_metric_res(lines_gt, lines_pred, metric=METRIC)\n metric_res, metric_res_perCate = cal_metric_res(lines_gt, lines_pred, metric=METRIC_EVAL)\n\n # update total results:\n for key_iou in dict_total.keys():\n for key_score in dict_total[key_iou].keys():\n dict_total[key_iou][key_score]['TP'] += metric_res[key_iou][key_score]['TP']\n dict_total[key_iou][key_score]['TN'] += metric_res[key_iou][key_score]['TN']\n dict_total[key_iou][key_score]['FP'] += metric_res[key_iou][key_score]['FP']\n dict_total[key_iou][key_score]['FN'] += metric_res[key_iou][key_score]['FN']\n\n for key_cate in LABEL_MAP:\n if key_cate in LABEL2SKIP:\n continue\n for key_iou in dict_total.keys():\n for key_score in dict_total[key_iou].keys():\n dict_total_perCate[key_cate][key_iou][key_score]['TP'] += \\\n metric_res_perCate[key_cate][key_iou][key_score]['TP']\n dict_total_perCate[key_cate][key_iou][key_score]['TN'] += \\\n metric_res_perCate[key_cate][key_iou][key_score]['TN']\n dict_total_perCate[key_cate][key_iou][key_score]['FP'] += \\\n metric_res_perCate[key_cate][key_iou][key_score]['FP']\n dict_total_perCate[key_cate][key_iou][key_score]['FN'] += \\\n metric_res_perCate[key_cate][key_iou][key_score]['FN']\n\n cnt_file += 1\n if cnt_file % 100 == 0:\n print(\"Checked {} images out of total {}...\".format(cnt_file, len(lst_pred)))\n\n # Calculate mAP:\n print(\"Calculating mAP...\")\n # mAP, mAP3 = cal_mAP(dict_res_perCate_in=dict_total_perCate)\n mAP, mAP3 = cal_mAP_mAR_F1(dict_res_perCate_in=dict_total_perCate)\n\n print(\"DONE, printing summary...\")\n\n def cal_recall_precision(tp, tn, fp, fn):\n try:\n rec = float(tp) / float(tp + fn)\n except ZeroDivisionError:\n rec = float(tp) / (float(tp + fn) + 1e-6)\n try:\n prec = float(tp) / float(tp + fp)\n except:\n prec = float(tp) / (float(tp + fp) + 1e-6)\n return rec, prec\n\n print(\"For ROC curve:\")\n txt4roc = \"\"\n for key_iou in dict_total.keys():\n TP = dict_total[key_iou][ROC_score]['TP']\n TN = dict_total[key_iou][ROC_score]['TN']\n FP = dict_total[key_iou][ROC_score]['FP']\n FN = dict_total[key_iou][ROC_score]['FN']\n recall, precision = cal_recall_precision(TP, TN, FP, FN)\n txt4roc += \"{}|{}|\".format(recall, precision)\n print(txt4roc)\n\n print(\"In total {} test images:\".format(len(lst_gt)))\n for key_iou in dict_total.keys():\n print(\"IoU: {}\".format(key_iou))\n for key_score in dict_total[key_iou].keys():\n print(\"\\tConf: {}\".format(key_score))\n\n TP = dict_total[key_iou][key_score]['TP']\n TN = dict_total[key_iou][key_score]['TN']\n FP = dict_total[key_iou][key_score]['FP']\n FN = dict_total[key_iou][key_score]['FN']\n num_obj = TP + TN + FN\n print(\"\\tTotal gt objects: {}\".format(num_obj))\n print(\"\\tTP: {}\\t TN: {}\\t FP: {}\\t FN:{}\".format(TP, TN, FP, FN))\n recall, precision = cal_recall_precision(TP, TN, FP, FN)\n print(\"\\tRecall: {:.4f}\\t Precision: {:.4f}\".format(recall, precision))\n\n with open(RECORD_TXT, 'w+') as res:\n res.write(\"mAP@score{}: {} | mAP{}@score{}: {}\\n\".format(mAP_score, mAP,\n len(mAP_SUB), mAP_score, mAP3))\n res.write(\"IoU\\tConfidence\\tTP\\tTN\\tFP\\tFN\\tRecall\\tPrecision\\n\")\n for key_iou in dict_total.keys():\n res.write(\"{}\".format(key_iou))\n pre = '\\t'\n for key_score in dict_total[key_iou].keys():\n res.write(\"{}{}\".format(pre, key_score))\n pre = '\\t'\n\n TP = dict_total[key_iou][key_score]['TP']\n TN = dict_total[key_iou][key_score]['TN']\n FP = dict_total[key_iou][key_score]['FP']\n FN = dict_total[key_iou][key_score]['FN']\n res.write(\"\\t{}\\t{}\\t{}\\t{}\".format(TP, TN, FP, FN))\n recall, precision = cal_recall_precision(TP, TN, FP, FN)\n res.write(\"\\t{:.4f}\\t{:.4f}\\n\".format(recall, precision))\n\n with open(RECORD_perCate_TXT, 'w+') as res:\n res.write(\"Category\\tIoU\\tConfidence\\tTP\\tTN\\tFP\\tFN\\tRecall\\tPrecision\\n\")\n for key_cate in dict_total_perCate.keys():\n res.write(\"{}\".format(key_cate))\n for key_iou in dict_total_perCate[key_cate].keys():\n pre = '\\t\\t'\n res.write(\"{}{}\".format(pre, key_iou))\n pre = '\\t'\n for key_score in dict_total_perCate[key_cate][key_iou].keys():\n res.write(\"{}{}\".format(pre, key_score))\n pre = '\\t\\t\\t'\n\n TP = dict_total_perCate[key_cate][key_iou][key_score]['TP']\n TN = dict_total_perCate[key_cate][key_iou][key_score]['TN']\n FP = dict_total_perCate[key_cate][key_iou][key_score]['FP']\n FN = dict_total_perCate[key_cate][key_iou][key_score]['FN']\n res.write(\"\\t{}\\t{}\\t{}\\t{}\".format(TP, TN, FP, FN))\n recall, precision = cal_recall_precision(TP, TN, FP, FN)\n res.write(\"\\t{:.4f}\\t{:.4f}\\n\".format(recall, precision))\n\n\nif __name__ == '__main__':\n eval()\n"
] | [
[
"numpy.sqrt",
"numpy.arctan",
"numpy.abs",
"numpy.exp"
]
] |
jidebingfeng/segmatch | [
"c662324d23b9e049fbb49b52cda7895d1a4d2798"
] | [
"segmatch/python/display_matched_segments.py"
] | [
"from __future__ import print_function\n\nimport numpy as np\n\n## LOAD DATA ##\n###############\nprint(\"Loading segments.\")\nfrom import_export import load_segments\nsegments, ids = load_segments(folder=\"/tmp/segment_matcher/\")\nfrom import_export import load_matches\nmatches = load_matches(folder=\"/tmp/segment_matcher/\")\n\nvisualize=True\nprint(\"q<Enter> to quit\")\nfor match in matches:\n if visualize:\n segment1 = segments[match[0]]\n segment2 = segments[match[1]]\n\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n X = segment1[:,0]\n Y = segment1[:,1]\n Z = segment1[:,2]\n fig = plt.figure(1)\n plt.cla()\n ax = fig.add_subplot(1, 2, 1, projection='3d')\n ax.scatter(X, Y, Z)\n plt.pause(0.05)\n\n X = segment2[:,0]\n Y = segment2[:,1]\n Z = segment2[:,2]\n ax = fig.add_subplot(1, 2, 2, projection='3d')\n ax.scatter(X, Y, Z)\n plt.pause(0.05)\n\n ## Get User Input ##\n ####################\n keys = raw_input(\">\")\n if keys == 'q':\n visualize = False\n\nplt.close(\"all\")\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.close"
]
] |
TingquanGao/Paddle | [
"311b3b44fc7d51d4d66d90ab8a3fc0d42231afda"
] | [
"python/paddle/dataset/movielens.py"
] | [
"# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nMovielens 1-M dataset.\n\nMovielens 1-M dataset contains 1 million ratings from 6000 users on 4000\nmovies, which was collected by GroupLens Research. This module will download\nMovielens 1-M dataset from\nhttp://files.grouplens.org/datasets/movielens/ml-1m.zip and parse training\nset and test set into paddle reader creators.\n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport numpy as np\nimport zipfile\nimport paddle.dataset.common\nimport paddle.utils.deprecated as deprecated\nimport re\nimport random\nimport functools\nimport six\nimport paddle.compat as cpt\n\n__all__ = [\n 'train', 'test', 'get_movie_title_dict', 'max_movie_id', 'max_user_id',\n 'age_table', 'movie_categories', 'max_job_id', 'user_info', 'movie_info'\n]\n\nage_table = [1, 18, 25, 35, 45, 50, 56]\n\n#URL = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip'\nURL = 'https://dataset.bj.bcebos.com/movielens%2Fml-1m.zip'\nMD5 = 'c4d9eecfca2ab87c1945afe126590906'\n\n\nclass MovieInfo(object):\n \"\"\"\n Movie id, title and categories information are stored in MovieInfo.\n \"\"\"\n\n def __init__(self, index, categories, title):\n self.index = int(index)\n self.categories = categories\n self.title = title\n\n def value(self):\n \"\"\"\n Get information from a movie.\n \"\"\"\n return [\n self.index, [CATEGORIES_DICT[c] for c in self.categories],\n [MOVIE_TITLE_DICT[w.lower()] for w in self.title.split()]\n ]\n\n def __str__(self):\n return \"<MovieInfo id(%d), title(%s), categories(%s)>\" % (\n self.index, self.title, self.categories)\n\n def __repr__(self):\n return self.__str__()\n\n\nclass UserInfo(object):\n \"\"\"\n User id, gender, age, and job information are stored in UserInfo.\n \"\"\"\n\n def __init__(self, index, gender, age, job_id):\n self.index = int(index)\n self.is_male = gender == 'M'\n self.age = age_table.index(int(age))\n self.job_id = int(job_id)\n\n def value(self):\n \"\"\"\n Get information from a user.\n \"\"\"\n return [self.index, 0 if self.is_male else 1, self.age, self.job_id]\n\n def __str__(self):\n return \"<UserInfo id(%d), gender(%s), age(%d), job(%d)>\" % (\n self.index, \"M\"\n if self.is_male else \"F\", age_table[self.age], self.job_id)\n\n def __repr__(self):\n return str(self)\n\n\nMOVIE_INFO = None\nMOVIE_TITLE_DICT = None\nCATEGORIES_DICT = None\nUSER_INFO = None\n\n\ndef __initialize_meta_info__():\n fn = paddle.dataset.common.download(URL, \"movielens\", MD5)\n global MOVIE_INFO\n if MOVIE_INFO is None:\n pattern = re.compile(r'^(.*)\\((\\d+)\\)$')\n with zipfile.ZipFile(file=fn) as package:\n for info in package.infolist():\n assert isinstance(info, zipfile.ZipInfo)\n MOVIE_INFO = dict()\n title_word_set = set()\n categories_set = set()\n with package.open('ml-1m/movies.dat') as movie_file:\n for i, line in enumerate(movie_file):\n line = cpt.to_text(line, encoding='latin')\n movie_id, title, categories = line.strip().split('::')\n categories = categories.split('|')\n for c in categories:\n categories_set.add(c)\n title = pattern.match(title).group(1)\n MOVIE_INFO[int(movie_id)] = MovieInfo(\n index=movie_id, categories=categories, title=title)\n for w in title.split():\n title_word_set.add(w.lower())\n\n global MOVIE_TITLE_DICT\n MOVIE_TITLE_DICT = dict()\n for i, w in enumerate(title_word_set):\n MOVIE_TITLE_DICT[w] = i\n\n global CATEGORIES_DICT\n CATEGORIES_DICT = dict()\n for i, c in enumerate(categories_set):\n CATEGORIES_DICT[c] = i\n\n global USER_INFO\n USER_INFO = dict()\n with package.open('ml-1m/users.dat') as user_file:\n for line in user_file:\n line = cpt.to_text(line, encoding='latin')\n uid, gender, age, job, _ = line.strip().split(\"::\")\n USER_INFO[int(uid)] = UserInfo(\n index=uid, gender=gender, age=age, job_id=job)\n return fn\n\n\ndef __reader__(rand_seed=0, test_ratio=0.1, is_test=False):\n fn = __initialize_meta_info__()\n np.random.seed(rand_seed)\n with zipfile.ZipFile(file=fn) as package:\n with package.open('ml-1m/ratings.dat') as rating:\n for line in rating:\n line = cpt.to_text(line, encoding='latin')\n if (np.random.random() < test_ratio) == is_test:\n uid, mov_id, rating, _ = line.strip().split(\"::\")\n uid = int(uid)\n mov_id = int(mov_id)\n rating = float(rating) * 2 - 5.0\n\n mov = MOVIE_INFO[mov_id]\n usr = USER_INFO[uid]\n yield usr.value() + mov.value() + [[rating]]\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef __reader_creator__(**kwargs):\n return lambda: __reader__(**kwargs)\n\n\ntrain = functools.partial(__reader_creator__, is_test=False)\ntest = functools.partial(__reader_creator__, is_test=True)\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef get_movie_title_dict():\n \"\"\"\n Get movie title dictionary.\n \"\"\"\n __initialize_meta_info__()\n return MOVIE_TITLE_DICT\n\n\ndef __max_index_info__(a, b):\n if a.index > b.index:\n return a\n else:\n return b\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef max_movie_id():\n \"\"\"\n Get the maximum value of movie id.\n \"\"\"\n __initialize_meta_info__()\n return six.moves.reduce(__max_index_info__, list(MOVIE_INFO.values())).index\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef max_user_id():\n \"\"\"\n Get the maximum value of user id.\n \"\"\"\n __initialize_meta_info__()\n return six.moves.reduce(__max_index_info__, list(USER_INFO.values())).index\n\n\ndef __max_job_id_impl__(a, b):\n if a.job_id > b.job_id:\n return a\n else:\n return b\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef max_job_id():\n \"\"\"\n Get the maximum value of job id.\n \"\"\"\n __initialize_meta_info__()\n return six.moves.reduce(__max_job_id_impl__,\n list(USER_INFO.values())).job_id\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef movie_categories():\n \"\"\"\n Get movie categories dictionary.\n \"\"\"\n __initialize_meta_info__()\n return CATEGORIES_DICT\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef user_info():\n \"\"\"\n Get user info dictionary.\n \"\"\"\n __initialize_meta_info__()\n return USER_INFO\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef movie_info():\n \"\"\"\n Get movie info dictionary.\n \"\"\"\n __initialize_meta_info__()\n return MOVIE_INFO\n\n\ndef unittest():\n for train_count, _ in enumerate(train()()):\n pass\n for test_count, _ in enumerate(test()()):\n pass\n\n print(train_count, test_count)\n\n\n@deprecated(\n since=\"2.0.0\",\n update_to=\"paddle.text.datasets.Movielens\",\n reason=\"Please use new dataset API which supports paddle.io.DataLoader\")\ndef fetch():\n paddle.dataset.common.download(URL, \"movielens\", MD5)\n\n\nif __name__ == '__main__':\n unittest()\n"
] | [
[
"numpy.random.random",
"numpy.random.seed"
]
] |
mcvenkat/Python-Programs | [
"2ff66bbd5b07c8e093b11360e1dcac06740a5024"
] | [
"ARIMA1- Air Passengers.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Mar 29 11:20:51 2020\r\n\r\n@author: 766810\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nfrom statsmodels.tsa.stattools import adfuller\r\nfrom statsmodels.tsa.seasonal import seasonal_decompose\r\nfrom statsmodels.tsa.arima_model import ARIMA\r\nfrom pandas.plotting import register_matplotlib_converters\r\nregister_matplotlib_converters()\r\n\r\ndf = pd.read_csv('airpassengers.csv', parse_dates = ['year'], index_col = ['year'])\r\ndf.head()\r\nplt.xlabel('Date')\r\nplt.ylabel('Number of air passengers')\r\nplt.plot(df)\r\n\r\n#Rolling Statistics\r\nrolling_mean = df.rolling(window = 12).mean()\r\nrolling_std = df.rolling(window = 12).std()\r\nplt.plot(df, color = 'blue', label = 'Original')\r\nplt.plot(rolling_mean, color = 'red', label = 'Rolling Mean')\r\nplt.plot(rolling_std, color = 'black', label = 'Rolling Std')\r\nplt.legend(loc = 'best')\r\nplt.title('Rolling Mean & Rolling Standard Deviation')\r\nplt.show()\r\n\r\n\r\n#ADF Statistics\r\nresult = adfuller(df['Passengers'])\r\nprint('ADF Statistic: {}'.format(result[0]))\r\nprint('p-value: {}'.format(result[1]))\r\nprint('Critical Values:')\r\nfor key, value in result[4].items():\r\n print('\\t{}: {}'.format(key, value))\r\n \r\ndf_log = np.log(df)\r\nplt.plot(df_log)\r\n\r\n\r\ndef get_stationarity(timeseries):\r\n \r\n # rolling statistics\r\n rolling_mean = timeseries.rolling(window=12).mean()\r\n rolling_std = timeseries.rolling(window=12).std()\r\n \r\n # rolling statistics plot\r\n original = plt.plot(timeseries, color='blue', label='Original')\r\n mean = plt.plot(rolling_mean, color='red', label='Rolling Mean')\r\n std = plt.plot(rolling_std, color='black', label='Rolling Std')\r\n plt.legend(loc='best')\r\n plt.title('Rolling Mean & Standard Deviation')\r\n plt.show(block=False)\r\n \r\n # Dickey–Fuller test:\r\n result = adfuller(timeseries['Passengers'])\r\n print('ADF Statistic: {}'.format(result[0]))\r\n print('p-value: {}'.format(result[1]))\r\n print('Critical Values:')\r\n for key, value in result[4].items():\r\n print('\\t{}: {}'.format(key, value))\r\n\r\n\r\nrolling_mean = df_log.rolling(window=12).mean()\r\ndf_log_minus_mean = df_log - rolling_mean\r\ndf_log_minus_mean.dropna(inplace=True)\r\nget_stationarity(df_log_minus_mean)\r\n\r\n\r\nrolling_mean_exp_decay = df_log.ewm(halflife=12, min_periods=0, adjust=True).mean()\r\ndf_log_exp_decay = df_log - rolling_mean_exp_decay\r\ndf_log_exp_decay.dropna(inplace=True)\r\nget_stationarity(df_log_exp_decay)\r\n\r\ndf_log_shift = df_log - df_log.shift()\r\ndf_log_shift.dropna(inplace=True)\r\nget_stationarity(df_log_shift)\r\n\r\n\r\ndecomposition = seasonal_decompose(df_log) \r\nmodel = ARIMA(df_log, order=(2,1,2))\r\nresults = model.fit(disp=-1)\r\nplt.plot(df_log_shift)\r\nplt.plot(results.fittedvalues, color='red')\r\n\r\n\r\npredictions_ARIMA_diff = pd.Series(results.fittedvalues, copy=True)\r\npredictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()\r\npredictions_ARIMA_log = pd.Series(df_log['Passengers'].iloc[0], index=df_log.index)\r\npredictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum, fill_value=0)\r\npredictions_ARIMA = np.exp(predictions_ARIMA_log)\r\nplt.plot(df)\r\nplt.plot(predictions_ARIMA)\r\n\r\nresults.plot_predict(1,264)\r\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.Series",
"pandas.read_csv",
"numpy.exp",
"pandas.plotting.register_matplotlib_converters",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.log",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
amongstar/https-github.com-tflearn-tflearn | [
"af57b1759c0d251313c5bcde8cbb7274bf4b08c3"
] | [
"examples/nlp/cnn_sentence_classification.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nSimple example using convolutional neural network to classify IMDB\nsentiment dataset.\n\nReferences:\n - Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng,\n and Christopher Potts. (2011). Learning Word Vectors for Sentiment\n Analysis. The 49th Annual Meeting of the Association for Computational\n Linguistics (ACL 2011).\n - Kim Y. Convolutional Neural Networks for Sentence Classification[C]. \n Empirical Methods in Natural Language Processing, 2014.\n\nLinks:\n - http://ai.stanford.edu/~amaas/data/sentiment/\n - http://emnlp2014.org/papers/pdf/EMNLP2014181.pdf\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport tensorflow as tf\nimport tflearn\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.conv import conv_1d, global_max_pool\nfrom tflearn.layers.merge_ops import merge\nfrom tflearn.layers.estimator import regression\nfrom tflearn.data_utils import to_categorical, pad_sequences\nfrom tflearn.datasets import imdb\n\n# IMDB Dataset loading\ntrain, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000,\n valid_portion=0.1)\ntrainX, trainY = train\ntestX, testY = test\n\n# Data preprocessing\n# Sequence padding\ntrainX = pad_sequences(trainX, maxlen=100, value=0.)\ntestX = pad_sequences(testX, maxlen=100, value=0.)\n# Converting labels to binary vectors\ntrainY = to_categorical(trainY, nb_classes=2)\ntestY = to_categorical(testY, nb_classes=2)\n\n# Building convolutional network\nnetwork = input_data(shape=[None, 100], name='input')\nnetwork = tflearn.embedding(network, input_dim=10000, output_dim=128)\nbranch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer=\"L2\")\nbranch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer=\"L2\")\nbranch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer=\"L2\")\nnetwork = merge([branch1, branch2, branch3], mode='concat', axis=1)\nnetwork = tf.expand_dims(network, 2)\nnetwork = global_max_pool(network)\nnetwork = dropout(network, 0.5)\nnetwork = fully_connected(network, 2, activation='softmax')\nnetwork = regression(network, optimizer='adam', learning_rate=0.001,\n loss='categorical_crossentropy', name='target')\n# Training\nmodel = tflearn.DNN(network, tensorboard_verbose=0)\nmodel.fit(trainX, trainY, n_epoch = 5, shuffle=True, validation_set=(testX, testY), show_metric=True, batch_size=32)\n"
] | [
[
"tensorflow.expand_dims"
]
] |
Vivoe/DeepSM | [
"bc35f2bfc3758199466079ec54de1d5297374921"
] | [
"bin/evaluate_step_placement.py"
] | [
"import os\nimport argparse\n\nimport numpy as np\nfrom sklearn import metrics\n\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.utils.data as datautils\n\nfrom deepSM import StepPlacement\nfrom deepSM import SMDUtils\nfrom deepSM import post_processing\nfrom deepSM import utils\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('placement_model', type=str)\nparser.add_argument('dataset_name', type=str)\nparser.add_argument('--n_batches', type=int, default=2000)\nparser.add_argument('--chunk_size', type=int, default=100)\nparser.add_argument('--batch_size', type=int, default=128)\n\nargs = parser.parse_args()\n\nprint(\"Testing model\", args.placement_model)\nprint(\"Datset name:\", args.dataset_name)\n\ntest_dataset = SMDUtils.get_dataset_from_file(\n args.dataset_name + '_test',\n 'placement',\n chunk_size=args.chunk_size)\n\ntest_loader = datautils.DataLoader(\n test_dataset,\n num_workers = 4,\n batch_size = args.batch_size)\n\nmodel = StepPlacement.RegularizedRecurrentStepPlacementModel()\nmodel.load_state_dict(torch.load(args.placement_model))\nmodel.cuda()\n\noutputs, labels = model.predict(test_loader, max_batches=args.n_batches)\n\npmodel_str = args.placement_model.split('/')[-1][:-3]\ntorch.save(outputs, f'outputs_{args.dataset_name}_{pmodel_str}.torch')\n\ndef evaluate(outputs, labels):\n\n def zscore(x):\n return (x - x.mean()) / np.std(x)\n\n preds = zscore(outputs) > 1.5\n acc = metrics.accuracy_score(labels, preds)\n print(\"Accuracy:\", acc)\n\n print(\"Percent positive:\", preds.mean())\n\n roc = metrics.roc_auc_score(labels, outputs)\n print(\"ROC-AUC:\", roc)\n\n precision, recall, thresh = metrics.precision_recall_curve(labels, outputs)\n\n prauc = metrics.auc(recall, precision)\n print(\"PR-AUC:\", prauc)\n\n f1 = metrics.f1_score(labels, preds)\n print(\"F1 score:\", f1)\n\nprint(\"Smoothed preds results:\")\nsmoothed_outputs = post_processing.smooth_outputs(outputs)\nevaluate(smoothed_outputs, labels)\n\nprint(\"Naive preds results:\")\nevaluate(outputs, labels)\n\n\nutils.notify(\"DONE\")\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.load",
"sklearn.metrics.auc",
"torch.save",
"numpy.std",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.f1_score",
"sklearn.metrics.precision_recall_curve"
]
] |
craiggua/NaturalLanguageGen | [
"de7c83ed08eded17528f8e55c07a969e0f409e8a"
] | [
"code/LanguageGenChars_Predict.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nPurpose: Character level Natural Language Generation (NLG). This file loads a\r\n previously trained character NLG model from LanguageGenChars_train.py, \r\n and predicts subsequent chars.\r\n\r\nTo run: \r\n 1) Set constants below to be the same as the languagegenchars_train.py file\r\n 2) At Anaconda command prompt enter\r\n >> python languagegenchars_predict.py\r\n\r\n\"\"\"\r\n\r\n# ---\r\n# Libs\r\n\r\nimport numpy as np\r\nimport os\r\nfrom datetime import datetime, timedelta\r\n\r\nimport re\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.layers import LSTM, Dense, Dropout\r\nfrom tensorflow.keras import utils as keras_utils\r\n\r\n\r\n# ---\r\n# Constants\r\n\r\n# Set CURR_DIR to the subdir with this PY file. Everything else is relative to this subdir.\r\nCURR_DIR = \"C:\\\\NaturalLanguageGen\\\\code\"\r\n\r\n# Predictions reuses the previously cleaned file.\r\nINPUT_FILE = '..\\\\data\\\\Complete_Shakespeare_cleaned.txt'\r\n\r\nMODEL_WEIGHTS_FILE = \"..\\\\Saved_Model\\\\training_GenChars\\\\cp_Epoch_{epoch:02d}_Loss_{loss:.3f}.ckpt\"\r\nMODEL_WEIGHTS_DIR = os.path.dirname(MODEL_WEIGHTS_FILE)\r\n\r\n# The constants below MUST be the SAME as the model trained in LanguageGenChars_training.py.\r\nSEQ_LEN = 100\r\n#BATCH_SIZE = 256\r\nUNITS = 128\r\n\r\nNUM_CHARS_PREDICT = 200\r\n\r\n# ---\r\n# Funcs\r\n\r\ndef clean_text(text):\r\n \"\"\"\r\n Purpose: Pass a string, this func will remove everything and only leave \r\n A-Z, a-z and sentence endings. It will also remove brackets [] and \r\n everything between those brackets like [_Exit._], [_Exeunt._], etc.\r\n \"\"\"\r\n\r\n # Remove brackets and the text within the brackets. \r\n text = \"\".join(re.split(\"\\(|\\)|\\[|\\]\", text)[::2])\r\n\r\n # Remove quotes and replace with no space. \r\n text = re.sub(r\"[\\'\\\"\\‘\\’\\`\\ʹ]\", \"\", text) \r\n \r\n # Keep only a-z and sentence endings, everything else gets a space. \r\n new_string = re.sub(\"[^a-zA-Z.?!;]\", \" \", text).strip()\r\n \r\n # Remove consective spaces and leave only one space.\r\n new_string = re.sub(\" +\", \" \", new_string)\r\n \r\n new_string = new_string.lower()\r\n \r\n return(new_string)\r\n\r\n\r\n# ---\r\n# Main\r\n\r\nstart_time = datetime.now()\r\n\r\nos.chdir(CURR_DIR)\r\n\r\n# Load the previously cleaned file.\r\nwith open(INPUT_FILE, 'r', encoding='utf-8') as file:\r\n text = file.read()\r\n\r\n# Load less data for optional model evaluations below. \r\ntext = text[0:int(len(text)/4)]\r\n\r\n# NOTE: No need to clean here since the previously cleaned TXT file from \r\n# the training file is reused here. Specified above as INPUT_FILE.\r\n\r\n\r\n# NN's and other ML algorithms work with numeric data vs. text. Here set() \r\n# gets unique characters. Next, each unique character is assigned an integer\r\n# in the order in which the characters were sorted. \r\nchars = sorted(list(set(text)))\r\nchar_num_map = dict((c, i) for i, c in enumerate(chars))\r\n\r\ninput_char_len = len(text)\r\nvocab_len = len(chars)\r\n\r\nprint(\"Total number of characters overall:\", input_char_len)\r\nprint(\"Total unique characters:\", vocab_len)\r\n\r\n# Do a 1-time conversion to convert each char to it's integer representation.\r\nint_text = []\r\nint_text = [char_num_map[char] for char in text]\r\n\r\nx_seq_num = []\r\ny_pred_num = []\r\n\r\n# x_seq_num is a list of lists. The inner list is a sequence of SEQ_LEN. For \r\n# each input sequence, save a corresponding integer in y_pred_num[] to be \r\n# predicted for that sequence.\r\nfor i in range(0, input_char_len - SEQ_LEN, 1):\r\n \r\n # Define an input sequence of integers. \r\n x_seq_num.append(int_text[i:i + SEQ_LEN])\r\n\r\n # Holds 1 predicted integer associated with 1 x_seq_num sequence above. \r\n y_pred_num.append(int_text[i + SEQ_LEN])\r\n \r\n \r\nnum_sequences = len(x_seq_num)\r\nprint(\"\\nNumber of sequences:\", num_sequences)\r\n\r\n# Numpy reshape will reshape x_seq_num to have samples, sequence length and \r\n# input dimensions. This input is expected by our NN. \r\nX = np.reshape(x_seq_num, (num_sequences, SEQ_LEN, 1))\r\n\r\n# Normalize the integers to be within a range of zero to one. When a NN is fit \r\n# on scaled data that uses a small range of values (like zero to 1) the \r\n# network can be more effective learning the output. \r\nX = X/float(vocab_len) \r\n \r\n# One-hot encode the char numbers to be predicted. \r\ny = keras_utils.to_categorical(y_pred_num)\r\n\r\n# Define the model. \r\n# Note, If no activation function is chosen it defaults to activation = 'tanh', \r\n# however added this param to be explicit. See model.get_config() output below \r\n# for details.\r\n# Simple Model. \r\n# Must be the SAME as the model trained in LanguageGenChars_train.py.\r\nmodel = Sequential()\r\nmodel.add(LSTM(UNITS, activation='tanh', input_shape=(X.shape[1], X.shape[2]), return_sequences = False, name = \"layer_1\"))\r\nmodel.add(Dropout(0.2, name = \"layer_2\"))\r\nmodel.add(Dense(y.shape[1], activation='softmax', name = \"layer_3\"))\r\n\r\n\r\n'''\r\n# Deeper model. 100 sequence length, 3 LSTM layers, each having 700 units, trained \r\n# across 100 epochs. Takes 45 mins PER epoch on P3.2xlarge EC2 instance, very costly!\r\nmodel = Sequential()\r\nmodel.add(LSTM(UNITS, activation='tanh', input_shape=(X.shape[1], X.shape[2]), return_sequences=True))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(LSTM(UNITS, return_sequences=True))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(LSTM(UNITS))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(Dense(y.shape[1], activation='softmax'))\r\n'''\r\n\r\nmodel.summary()\r\n\r\n# Compile the model above. \r\nmodel.compile(loss = 'categorical_crossentropy', \r\n optimizer='adam',\r\n #optimizer = RMSprop(learning_rate=0.01), # Maybe try a different optimizer and learning rate. \r\n metrics = ['accuracy'])\r\n\r\nprint(\"\\nModel Config:\\n\", model.get_config() )\r\n\r\n# Optional - Evaluate the Untrained model. \r\nprint(\"\\nEvaluating the untrained model...\")\r\nloss, acc = model.evaluate(X, y, verbose=2)\r\nprint(\"\\nUntrained model accuracy: {:5.2f}%\".format(100 * acc))\r\n\r\nmodel_weights = tf.train.latest_checkpoint(MODEL_WEIGHTS_DIR) \r\nprint(\"\\nLoading best model weight file: %s\" % model_weights)\r\nmodel.load_weights(model_weights)\r\n\r\n# Required - Re-evaluate the trained model to get it going before making predictions.\r\nprint(\"\\nEvaluating the trained model...\")\r\nloss, acc = model.evaluate(X, y, verbose=2)\r\nprint(\"\\nTrained model accuracy: {:5.2f}%\".format(100 * acc))\r\n\r\n\r\n# Make a prediction. \r\n\r\nnum_to_char = dict((i, c) for i, c in enumerate(chars))\r\n\r\n# x_seq_num is a list of lists. The inner list is numbers of SEQ_LEN long.\r\n# Get a random starting point in the inner list for 1 numeric sequence to make \r\n# a prediction below. \r\nstart = np.random.randint(0, len(x_seq_num) - 1)\r\nsequence = x_seq_num[start]\r\n\r\nprint(\"\\n-----\\nTry predict method 1\\n-----\")\r\nprint(\"Random Seed:\")\r\nprint(\"\\\"\", ''.join([num_to_char[value] for value in sequence]), \"\\\"\")\r\nprint(\"\\nNLG chars:\")\r\n\r\ngen_text = \"\"\r\n\r\nfor i in range(NUM_CHARS_PREDICT):\r\n \r\n # Reshape to samples, sequence length and input dimensions.\r\n x = np.reshape(sequence, (1, len(sequence), 1))\r\n \r\n # If the training file normalized the numbers between 0 to 1 then add that here.\r\n x = x / float(vocab_len) \r\n \r\n prediction = model.predict(x, verbose=0)\r\n \r\n # Prediction is for all chars. The total chars is in input_char_len above. \r\n # Need to get the highest prediction with argmax. Next, convert that \r\n # prediction index location to the predicted char. \r\n index = np.argmax(prediction)\r\n pred_char = num_to_char[index]\r\n \r\n # Save the generated text to print below. \r\n gen_text = gen_text + pred_char\r\n\r\n # Add the argmax predicted index location to our sequence then truncate the\r\n # beginning of the sequence list by 1 so that the sequence list remains \r\n # SEQ_LEN long. \r\n sequence.append(index)\r\n sequence = sequence[1:len(sequence)]\r\n\r\nprint(gen_text)\r\n\r\n\r\nstart = np.random.randint(0, len(x_seq_num) - 1)\r\nsequence = x_seq_num[start]\r\n\r\nprint(\"\\n-----\\nTry predict method 2\\n-----\")\r\nprint(\"Random Seed:\")\r\nprint(\"\\\"\", ''.join([num_to_char[value] for value in sequence]), \"\\\"\")\r\nprint(\"\\nNLG chars:\")\r\n\r\ngen_text = \"\"\r\n\r\nfor i in range(NUM_CHARS_PREDICT):\r\n x = np.reshape(sequence, (1, len(sequence), 1))\r\n \r\n # If the training file normalized the numbers between 0 to 1 then add that here.\r\n x = x / float(vocab_len) \r\n \r\n prediction = model.predict(x, verbose=0)\r\n \r\n # Predictions for each char in the vocabulary. With this prediction method, \r\n # get a random prediction with random.choice(). Next, convert that \r\n # prediction index location to the predicted char. \r\n X = prediction[0] \r\n index = np.random.choice(len(X), p=X)\r\n \r\n pred_char = num_to_char[index]\r\n \r\n # Save the generated text to print below. \r\n gen_text = gen_text + pred_char\r\n\r\n # Add the argmax predicted index location to our sequence then truncate the\r\n # beginning of the sequence list by 1 so that the sequence list remains \r\n # SEQ_LEN long. \r\n sequence.append(index)\r\n sequence = sequence[1:len(sequence)]\r\n\r\nprint(gen_text)\r\n\r\n\r\n# Print stats about the run.\r\nend_time = datetime.now()\r\nelapsed_time = end_time - start_time\r\ntime_diff_mins = elapsed_time / timedelta(minutes=1)\r\nprint(\"\\nTotal runtime %.1f minutes or %.1f hours.\" % (time_diff_mins, time_diff_mins / 60))\r\n\r\n"
] | [
[
"tensorflow.keras.models.Sequential",
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.layers.Dropout",
"numpy.reshape",
"numpy.argmax",
"tensorflow.train.latest_checkpoint",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Dense"
]
] |
stylekilla/syncmrt | [
"816bb57d80d6595719b8b9d7f027f4f17d0a6c0a"
] | [
"QsWidgets/QsMpl/tools.py"
] | [
"from matplotlib.backend_tools import ToolBase, ToolToggleBase, Cursors\nfrom PyQt5.QtCore import QObject, pyqtSignal\nimport logging\n\n\nclass ToolPickPoint(ToolToggleBase,QObject):\n\t\"\"\" Marker selection tool. \"\"\"\n\t# Tool options for matplotlib.\n\tdescription = 'Pick a point on the image'\n\timage = 'pick.png'\n\tcursor = Cursors.SELECT_REGION\n\tradio_group = 'default'\n\t# Qt5 signals.\n\tnewPoint = pyqtSignal(object,float,float)\n\n\tdef __init__(self, *args):\n\t\tToolToggleBase.__init__(self, *args)\n\t\tQObject.__init__(self)\n\t\tself._idPress = None\n\n\tdef enable(self, event):\n\t\t\"\"\"Connect press/release events and lock the canvas\"\"\"\n\t\tself.figure.canvas.widgetlock(self)\n\t\t# Add marker on button release.\n\t\tself._idPress = self.figure.canvas.mpl_connect('button_release_event', self.newMarker)\n\n\tdef disable(self,*args):\n\t\t\"\"\"Release the canvas and disconnect press/release events\"\"\"\n\t\tself.figure.canvas.widgetlock.release(self)\n\t\tself.figure.canvas.mpl_disconnect(self._idPress)\n\n\tdef trigger(self, sender, event, data=None):\n\t\t# What happens when it is triggered?\n\t\tToolToggleBase.trigger(self, sender, event, data)\n\n\tdef newMarker(self, event):\n\t\t# Need to emit axis plus location.\n\t\t# Store the data.\n\t\tif (event.button == 1):\n\t\t\tself.newPoint.emit(event.inaxes,event.xdata,event.ydata)\n\nclass ToolPickIso(ToolToggleBase,QObject):\n\t\"\"\" Marker selection tool. \"\"\"\n\t# Tool options for matplotlib.\n\tdescription = 'Pick the isocenter to treat'\n\timage = 'pickIso.png'\n\tcursor = Cursors.SELECT_REGION\n\tradio_group = 'default'\n\t# Qt5 signals.\n\tnewIsocenter = pyqtSignal(object,float,float)\n\n\tdef __init__(self, *args):\n\t\tToolToggleBase.__init__(self, *args)\n\t\tQObject.__init__(self)\n\t\tself._idPress = None\n\n\tdef enable(self, event):\n\t\t\"\"\"Connect press/release events and lock the canvas\"\"\"\n\t\tself.figure.canvas.widgetlock(self)\n\t\t# Add marker on button release.\n\t\tself._idPress = self.figure.canvas.mpl_connect('button_release_event', self.newIso)\n\n\tdef disable(self,*args):\n\t\t\"\"\"Release the canvas and disconnect press/release events\"\"\"\n\t\tself.figure.canvas.widgetlock.release(self)\n\t\tself.figure.canvas.mpl_disconnect(self._idPress)\n\n\tdef trigger(self, sender, event, data=None):\n\t\t# What happens when it is triggered?\n\t\tToolToggleBase.trigger(self, sender, event, data)\n\n\tdef newIso(self, event):\n\t\t# Need to emit axis plus location.\n\t\t# Store the data.\n\t\tif (event.button == 1):\n\t\t\tself.newIsocenter.emit(event.inaxes,event.xdata,event.ydata)\n\nclass ToolClearPoints(ToolBase,QObject):\n\t\"\"\" Clear markers tool. \"\"\"\n\tdescription = 'Clear the points in the images'\n\timage = 'clear.svg'\n\tradio_group = 'default'\n\tdefault_toggled = False\n\t# Qt5 signals.\n\tclearPoints = pyqtSignal()\n\n\tdef __init__(self, *args):\n\t\tToolToggleBase.__init__(self, *args)\n\t\tQObject.__init__(self)\n\t\tself._button_pressed = None\n\t\tself._xypress = None\n\t\tself._idPress = None\n\t\tself._idRelease = None\n\n\tdef trigger(self, sender, event, data=None):\n\t\tself.clearPoints.emit()"
] | [
[
"matplotlib.backend_tools.ToolToggleBase.__init__",
"matplotlib.backend_tools.ToolToggleBase.trigger"
]
] |
zhangjq933/HowtoSim_Script | [
"90fb8cca87d47d2c45b8ff5d07a35e8a6c846685"
] | [
"aggregate_CDF.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 19 19:24:49 2019\r\n@author: mlin\r\n\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\nfrom collections import OrderedDict\r\nimport numpy as np\r\nimport copy\r\n\r\nclass ffd(): \r\n def __init__(self, ffd_file, incident_Power_W=1):\r\n self.incident_Power_W=incident_Power_W\r\n \r\n with open(ffd_file) as f:\r\n self.theta=[int(i) for i in f.readline().split()] \r\n self.phi=[int(i) for i in f.readline().split()]\r\n f.readline()\r\n self.frequency=float(f.readline().split()[1])\r\n \r\n theta_range=np.linspace(*self.theta)\r\n phi_range= np.linspace(*self.phi)\r\n \r\n self._dtheta=theta_range[1]-theta_range[0]\r\n self._dphi=phi_range[1]-phi_range[0]\r\n self._theta=np.array([i for i in theta_range for j in phi_range]) \r\n \r\n EF=np.loadtxt(ffd_file, skiprows=4)\r\n \r\n Etheta=np.vectorize(complex)(EF[:,0], EF[:,1])\r\n Ephi=np.vectorize(complex)(EF[:,2], EF[:,3])\r\n self._EF=np.column_stack((Etheta, Ephi)) \r\n self._calculate()\r\n \r\n def __eq__(self, other):\r\n if self.theta!=other.theta:\r\n return False \r\n if self.phi!=other.phi:\r\n return False \r\n if self.frequency!=other.frequency:\r\n return False \r\n return True\r\n \r\n def __add__(self, other):\r\n if self==other:\r\n x=copy.deepcopy(self)\r\n x._EF+=other._EF\r\n x.incident_Power_W+=other.incident_Power_W\r\n x._calculate() \r\n return x\r\n \r\n def _calculate(self):\r\n pd=np.sum(np.power(np.absolute(self._EF), 2),1)/377/2\r\n self.U=max(pd)\r\n self.cell_area=np.radians(self._dtheta)*np.radians(self._dphi)*np.sin(np.radians(self._theta))\r\n #self.radiated_power=sum(self.cell_area*pd)\r\n #uniform_power=self.radiated_power/sum(self.cell_area)\r\n #self.peak_directivity=self.U/uniform_power\r\n \r\n self.realized_gain=10*np.log10(pd/(self.incident_Power_W/4/np.pi))\r\n self.peak_realized_gain=max(self.realized_gain)\r\n\r\n def compare(self, other):\r\n x=np.abs(self._EF)\r\n dx=np.abs(other._EF-self._EF)\r\n return np.amax(dx/x) \r\n \r\n def __call__(self, mag, phase):\r\n x=copy.deepcopy(self)\r\n x._EF=np.sqrt(mag)*np.exp(1j*np.radians(phase))*self._EF\r\n x.incident_Power_W=mag\r\n x._calculate()\r\n return x \r\n \r\n def getCDF(self):\r\n x, y=[], []\r\n accumulated_area=0\r\n for gain, area in sorted(zip(self.realized_gain, self.cell_area)):\r\n x.append(gain)\r\n accumulated_area+=area\r\n y.append(accumulated_area)\r\n return x, y/y[-1]\r\n \r\n def plotRealizedGain(self):\r\n plt.figure(figsize=(8, 4))\r\n size=(self.theta[2], self.phi[2])\r\n gain_map=self.realized_gain.reshape(size)\r\n plt.title('Map of Realized Gain(dB)')\r\n plt.xlabel('Phi (degree)')\r\n plt.ylabel('Theta (degree)')\r\n maxV=np.max(gain_map)\r\n [row, col] = np.where(gain_map==maxV)\r\n plt.plot(col, row, 'w*')\r\n plt.annotate(round(maxV,3), (col+3, row+3), color='white')\r\n plt.imshow(gain_map, cmap='jet')\r\n plt.colorbar()\r\n CS=plt.contour(gain_map) \r\n plt.clabel(CS, inline=1, fontsize=10)\r\n \r\nclass aggregatebeam():\r\n def __init__(self, *args):\r\n self.args=args\r\n self.max_gain=np.copy(args[0].realized_gain)\r\n self.beam_occupy=0*np.copy(self.max_gain)\r\n \r\n for beamid, i in enumerate(self.args[1:], 1):\r\n for n in range(len(self.max_gain)):\r\n if i.realized_gain[n]>self.max_gain[n]:\r\n self.beam_occupy[n]=beamid\r\n self.max_gain[n]=i.realized_gain[n]\r\n\r\n self.map_size=(args[0].theta[2], args[0].phi[2])\r\n\r\n \r\n def plotCDF(self):\r\n x, y=[], []\r\n accumulated_area=0\r\n for gain, area in sorted(zip(self.max_gain, self.args[0].cell_area)):\r\n x.append(gain)\r\n accumulated_area+=area\r\n y.append(accumulated_area)\r\n \r\n plt.figure()\r\n plt.title('Cumulative Distribution Function') \r\n plt.xlabel('Realized Gain (dB)')\r\n plt.ylabel('CDF')\r\n plt.grid(True)\r\n plt.plot(x, y/y[-1])\r\n plt.show()\r\n return (x, y/y[-1])\r\n\r\n \r\n def plotGainMap(self):\r\n gain_map=self.max_gain.reshape(self.map_size)\r\n \r\n plt.figure(figsize=(8, 4))\r\n plt.title('Gain Map(dB)')\r\n plt.xlabel('Phi (degree)')\r\n plt.ylabel('Theta (degree)')\r\n maxV=np.max(gain_map)\r\n [row, col] = np.where(gain_map==maxV)\r\n plt.plot(col, row, 'w*')\r\n plt.annotate(round(maxV,3), (col+3, row+3), color='white')\r\n plt.imshow(gain_map, cmap='jet')\r\n plt.colorbar()\r\n CS=plt.contour(gain_map)\r\n plt.clabel(CS, inline=1, fontsize=10) \r\n\r\n \r\n def plotBeamMap(self):\r\n beam_map=self.beam_occupy.reshape(self.map_size)\r\n \r\n plt.figure(figsize=(8, 4))\r\n plt.title('Beam Map') \r\n plt.xlabel('Phi (degree)')\r\n plt.ylabel('Theta (degree)') \r\n plt.imshow(beam_map, cmap='rainbow')\r\n plt.colorbar() \r\n plt.contour(beam_map)\r\n \r\ndef plotCDFtable(table, png=None):\r\n '''table={'A':(gain , cdf), 'B':(gain, cdf), }'''\r\n \r\n plt.figure()\r\n plt.title('Cumulative Distribution Function') \r\n plt.xlabel('Realized Gain (dB)')\r\n plt.ylabel('CDF')\r\n plt.grid(True)\r\n for i in table:\r\n plt.plot(*table[i], label=i)\r\n plt.legend()\r\n if png:\r\n plt.savefig(png) \r\n plt.show()\r\n\r\n \r\n \r\n#%%\r\npath='D:\\OneDrive - ANSYS, Inc/Workshop/2019/2019_Q4_5G_Array_Modula_Analysis/28000000000/'\r\nx1=ffd(path+'4x2_array1_Module_0_Bump_h1.ffd')\r\nx2=ffd(path+'4x2_array1_Module_0_Bump_h2.ffd')\r\nx3=ffd(path+'4x2_array1_Module_0_Bump_h3.ffd')\r\nx4=ffd(path+'4x2_array1_Module_0_Bump_h4.ffd')\r\n\r\n\r\n#%%\r\n\r\nbeam0=x1(1,0) +x2(1,0) +x3(1,0) +x4(1,0)\r\n#beam0.plotRealizedGain()\r\nbeam1=x1(1,0) +x2(1,75) +x3(1,150) +x4(1,225)\r\n#beam1.plotRealizedGain()\r\nbeam2=x1(1,0) +x2(1,150) +x3(1,300) +x4(1,450)\r\n#beam2.plotRealizedGain()\r\n\r\ntable=OrderedDict()\r\nz0=aggregatebeam(beam0, beam1, beam2)\r\ntable['z0']=z0.plotCDF()\r\nz1=aggregatebeam(beam0, beam1)\r\ntable['z1']=z1.plotCDF()\r\nz2=aggregatebeam(beam1, beam2)\r\ntable['z2']=z2.plotCDF()\r\nz3=aggregatebeam(beam0, beam2)\r\ntable['z3']=z3.plotCDF()\r\nplotCDFtable(table, 'd:/demo/aaa.png')\r\n"
] | [
[
"numpy.vectorize",
"numpy.copy",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.ylabel",
"numpy.amax",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"numpy.abs",
"matplotlib.pyplot.title",
"matplotlib.pyplot.contour",
"numpy.log10",
"numpy.absolute",
"numpy.where",
"numpy.linspace",
"numpy.sqrt",
"matplotlib.pyplot.clabel",
"numpy.column_stack",
"numpy.max",
"matplotlib.pyplot.colorbar",
"numpy.radians",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.xlabel",
"numpy.loadtxt"
]
] |
akac0297/PETLAB | [
"950cc153ce230d12d752ad0d11111e7fc22d9e7d"
] | [
"Radiomics/Radiomics MPE images.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\nimport SimpleITK as sitk\nimport pandas as pd\nimport numpy as np\nimport radiomics\n\ndef radiomics_analysis(image_filepath, mask_filepath,img_label):\n img = sitk.ReadImage(image_filepath)\n mask = sitk.ReadImage(mask_filepath)\n \n #Z-score normalisation for MRI\n if img_label==(\"B50T_CAD_ADC_3000\" or \"B50_800 ADC\" or \"MPE\"):\n z_norm=False\n else:\n z_norm=True\n \n if z_norm==True:\n img_arr=sitk.GetArrayFromImage(img)\n img_mean=np.mean(img_arr)\n img_std=np.std(img_arr)\n img=sitk.Cast(img,sitk.sitkInt16)\n img=(img-img_mean)/img_std\n elif z_norm==False:\n img_arr=sitk.GetArrayFromImage(img)\n img=sitk.Cast(img,sitk.sitkInt16)\n\n #Grey-level discretisation for MRI \n img_arr = sitk.GetArrayFromImage(img) \n mask=sitk.Resample(mask,img)\n \n bin_number=512\n min_arr=np.min(img_arr)\n max_arr=np.max(img_arr)\n img_arr[img_arr!=np.max(img_arr)]=np.floor(bin_number*(img_arr[img_arr!=np.max(img_arr)]-min_arr)/(max_arr-min_arr))+1\n img_arr[img_arr==np.max(img_arr)]=bin_number\n \n new_img_arr = img_arr\n new_img=sitk.GetImageFromArray(new_img_arr)\n new_img.CopyInformation(img)\n img=new_img\n \n extractor = radiomics.firstorder.RadiomicsFirstOrder(img, mask)\n dict1 = extractor.execute()\n extractor_2 = radiomics.shape.RadiomicsShape(img, mask)\n dict2 = extractor_2.execute()\n extractor_3 = radiomics.glcm.RadiomicsGLCM(img, mask)\n dict3 = extractor_3.execute()\n extractor_4 = radiomics.glszm.RadiomicsGLSZM(img, mask)\n dict4 = extractor_4.execute()\n extractor_5 = radiomics.glrlm.RadiomicsGLRLM(img, mask)\n dict5 = extractor_5.execute()\n extractor_6 = radiomics.ngtdm.RadiomicsNGTDM(img, mask)\n dict6 = extractor_6.execute()\n extractor_7 = radiomics.gldm.RadiomicsGLDM(img, mask)\n dict7 = extractor_7.execute()\n \n dict1.update(dict2)\n dict1.update(dict3)\n dict1.update(dict4)\n dict1.update(dict5)\n dict1.update(dict6)\n dict1.update(dict7)\n new_img_label=img_label\n if img_label==\"B50T_CAD_ADC_3000\":\n new_img_label=\"B50T_CAD_ADC_3000 no norm\"\n if img_label==\"B50_800 ADC\":\n new_img_label=\"B50_800 ADC no norm\"\n dict1.update({'image label': new_img_label})\n\n return(dict1)\n\nmask_list=['new_seg_003_2_mri.nii.gz', 'new_seg_004_4_mri.nii.gz', 'new_seg_004_5_mri.nii.gz', 'new_seg_004_6_mri.nii.gz', 'new_seg_005_4_mri.nii.gz', 'new_seg_005_5_mri.nii.gz', 'new_seg_005_6_mri.nii.gz', 'new_seg_006_4_mri.nii.gz', 'new_seg_006_5_mri.nii.gz', 'new_seg_006_6_mri.nii.gz', 'new_seg_007_4_mri.nii.gz', 'new_seg_007_5_mri.nii.gz', 'new_seg_007_6_mri.nii.gz', 'new_seg_008_4_mri.nii.gz', 'new_seg_008_5_mri.nii.gz', 'new_seg_008_6_mri.nii.gz', 'new_seg_009_6_mri.nii.gz', 'new_seg_009_7_mri.nii.gz', 'new_seg_009_8_mri.nii.gz', 'new_seg_010_4_mri.nii.gz', 'new_seg_010_5_mri.nii.gz', 'new_seg_010_6_mri.nii.gz', 'new_seg_012_4_mri.nii.gz', 'new_seg_012_5_mri.nii.gz', 'new_seg_012_6_mri.nii.gz', 'new_seg_013_4_mri.nii.gz', 'new_seg_013_5_mri.nii.gz', 'new_seg_013_6_mri.nii.gz', 'new_seg_014_4_mri.nii.gz', 'new_seg_014_5_mri.nii.gz', 'new_seg_014_6_mri.nii.gz', 'new_seg_015_4_mri.nii.gz', 'new_seg_015_5_mri.nii.gz', 'new_seg_015_6_mri.nii.gz', 'new_seg_016_3_mri.nii.gz', 'new_seg_016_4_mri.nii.gz', 'new_seg_016_5_mri.nii.gz', 'new_seg_017_3_mri.nii.gz', 'new_seg_018_4_mri.nii.gz', 'new_seg_018_5_mri.nii.gz', 'new_seg_018_6_mri.nii.gz', 'new_seg_019_3_mri.nii.gz', 'new_seg_019_4_mri.nii.gz', 'new_seg_019_5_mri.nii.gz', 'new_seg_021_2_mri.nii.gz', 'new_seg_021_3_mri.nii.gz', 'new_seg_021_4_mri.nii.gz', 'new_seg_023_2_mri.nii.gz', 'new_seg_023_3_mri.nii.gz', 'new_seg_023_4_mri.nii.gz', 'new_seg_024_3_mri.nii.gz', 'new_seg_024_4_mri.nii.gz', 'new_seg_024_5_mri.nii.gz']\nMPE_list=['max_img_WES_003_2.nii.gz', 'MPE_sub_WES_004_4.nii.gz', 'MPE_sub_WES_004_5.nii.gz', 'MPE_sub_WES_004_6.nii.gz', 'MPE_sub_WES_005_4.nii.gz', 'MPE_sub_WES_005_5.nii.gz', 'MPE_sub_WES_005_6.nii.gz', 'max_img_WES_006_4.nii.gz', 'max_img_WES_006_5.nii.gz', 'max_img_WES_006_6.nii.gz', 'max_img_WES_007_4.nii.gz', 'max_img_WES_007_5.nii.gz', 'max_img_WES_007_6.nii.gz', 'MPE_sub_WES_008_4.nii.gz', 'MPE_sub_WES_008_5.nii.gz', 'MPE_sub_WES_008_6.nii.gz', 'MPE_sub_WES_009_6.nii.gz', 'MPE_sub_WES_009_7.nii.gz', 'MPE_sub_WES_009_8.nii.gz', 'MPE_sub_WES_010_4.nii.gz', 'MPE_sub_WES_010_5.nii.gz', 'MPE_sub_WES_010_6.nii.gz', 'MPE_sub_WES_012_4.nii.gz', 'MPE_sub_WES_012_5.nii.gz', 'MPE_sub_WES_012_6.nii.gz', 'max_img_WES_013_4.nii.gz', 'max_img_WES_013_5.nii.gz', 'max_img_WES_013_6.nii.gz', 'max_img_WES_014_4.nii.gz', 'max_img_WES_014_5.nii.gz', 'max_img_WES_014_6.nii.gz', 'max_img_WES_015_4.nii.gz', 'max_img_WES_015_5.nii.gz', 'max_img_WES_015_6.nii.gz', 'max_img_WES_016_3.nii.gz', 'max_img_WES_016_4.nii.gz', 'max_img_WES_016_5.nii.gz', 'max_img_WES_017_3.nii.gz', 'max_img_WES_018_4.nii.gz', 'max_img_WES_018_5.nii.gz', 'max_img_WES_018_6.nii.gz', 'max_img_WES_019_3.nii.gz', 'max_img_WES_019_4.nii.gz', 'max_img_WES_019_5.nii.gz', 'max_img_WES_021_2.nii.gz', 'max_img_WES_021_3.nii.gz', 'max_img_WES_021_4.nii.gz', 'max_img_WES_023_2.nii.gz', 'max_img_WES_023_3.nii.gz', 'max_img_WES_023_4.nii.gz', 'max_img_WES_024_3.nii.gz', 'max_img_WES_024_4.nii.gz', 'max_img_WES_024_5.nii.gz']\nsphere_list=['image_sphere_WES_003_2.nii.gz' 'image_sphere_WES_004_4.nii.gz', 'image_sphere_WES_004_5.nii.gz', 'image_sphere_WES_004_6.nii.gz', 'image_sphere_WES_005_4.nii.gz', 'image_sphere_WES_005_5.nii.gz', 'image_sphere_WES_005_6.nii.gz', 'image_sphere_WES_006_4.nii.gz', 'image_sphere_WES_006_5.nii.gz', 'image_sphere_WES_006_6.nii.gz', 'image_sphere_WES_007_4.nii.gz', 'image_sphere_WES_007_5.nii.gz', 'image_sphere_WES_007_6.nii.gz', 'image_sphere_WES_008_4.nii.gz', 'image_sphere_WES_008_5.nii.gz', 'image_sphere_WES_008_6.nii.gz', 'image_sphere_WES_009_6.nii.gz', 'image_sphere_WES_009_7.nii.gz', 'image_sphere_WES_009_8.nii.gz', 'image_sphere_WES_010_4.nii.gz', 'image_sphere_WES_010_5.nii.gz', 'image_sphere_WES_010_6.nii.gz', 'image_sphere_WES_012_4.nii.gz', 'image_sphere_WES_012_5.nii.gz', 'image_sphere_WES_012_6.nii.gz', 'image_sphere_WES_013_4.nii.gz', 'image_sphere_WES_013_5.nii.gz', 'image_sphere_WES_013_6.nii.gz', 'image_sphere_WES_014_4.nii.gz', 'image_sphere_WES_014_5.nii.gz', 'image_sphere_WES_014_6.nii.gz', 'image_sphere_WES_015_4.nii.gz', 'image_sphere_WES_015_5.nii.gz', 'image_sphere_WES_015_6.nii.gz', 'image_sphere_WES_016_3.nii.gz', 'image_sphere_WES_016_4.nii.gz', 'image_sphere_WES_016_5.nii.gz', 'image_sphere_WES_017_3.nii.gz', 'image_sphere_WES_018_4.nii.gz', 'image_sphere_WES_018_5.nii.gz', 'image_sphere_WES_018_6.nii.gz', 'image_sphere_WES_019_3.nii.gz', 'image_sphere_WES_019_4.nii.gz', 'image_sphere_WES_019_5.nii.gz', 'image_sphere_WES_021_2.nii.gz', 'image_sphere_WES_021_3.nii.gz', 'image_sphere_WES_021_4.nii.gz', 'image_sphere_WES_023_2.nii.gz', 'image_sphere_WES_023_3.nii.gz', 'image_sphere_WES_023_4.nii.gz', 'image_sphere_WES_024_3.nii.gz', 'image_sphere_WES_024_4.nii.gz', 'image_sphere_WES_024_5.nii.gz']\n\ndf=pd.DataFrame()\nfor image in range(0,10):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=mask_list[image],img_label=\"MPE\")\n dict1.update({'Patient': str(mask_list[image][9:11])})\n dict1.update({'Timepoint': str(mask_list[image][12:13])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_tumour_0_to_9.csv\")\n\ndf=pd.DataFrame()\nfor image in range(0,10):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=sphere_list[image],img_label=\"MPE\")\n dict1.update({'Patient':str(sphere_list[image][18:20])})\n dict1.update({'Timepoint': str(sphere_list[image][21:22])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_sphere_0_to_9.csv\")\n\ndf=pd.DataFrame()\nfor image in range(10,20):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=mask_list[image],img_label=\"MPE\")\n dict1.update({'Patient': str(mask_list[image][9:11])})\n dict1.update({'Timepoint': str(mask_list[image][12:13])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_tumour_10_to_19.csv\")\n\ndf=pd.DataFrame()\nfor image in range(10,20):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=sphere_list[image],img_label=\"MPE\")\n dict1.update({'Patient':str(sphere_list[image][18:20])})\n dict1.update({'Timepoint': str(sphere_list[image][21:22])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_sphere_10_to_19.csv\")\n\ndf=pd.DataFrame()\nfor image in range(20,30):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=mask_list[image],img_label=\"MPE\")\n dict1.update({'Patient': str(mask_list[image][9:11])})\n dict1.update({'Timepoint': str(mask_list[image][12:13])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_tumour_20_to_29.csv\")\n\ndf=pd.DataFrame()\nfor image in range(20,30):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=sphere_list[image],img_label=\"MPE\")\n dict1.update({'Patient':str(sphere_list[image][18:20])})\n dict1.update({'Timepoint': str(sphere_list[image][21:22])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_sphere_20_to_29.csv\")\n\ndf=pd.DataFrame()\nfor image in range(30,40):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=mask_list[image],img_label=\"MPE\")\n dict1.update({'Patient': str(mask_list[image][9:11])})\n dict1.update({'Timepoint': str(mask_list[image][12:13])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_tumour_30_to_39.csv\")\n\ndf=pd.DataFrame()\nfor image in range(30,40):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=sphere_list[image],img_label=\"MPE\")\n dict1.update({'Patient':str(sphere_list[image][18:20])})\n dict1.update({'Timepoint': str(sphere_list[image][21:22])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_sphere_30_to_39.csv\")\n\ndf=pd.DataFrame()\nfor image in range(40,53):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=mask_list[image],img_label=\"MPE\")\n dict1.update({'Patient': str(mask_list[image][9:11])})\n dict1.update({'Timepoint': str(mask_list[image][12:13])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_tumour_40_to_53.csv\")\n\ndf=pd.DataFrame()\nfor image in range(40,53):\n dict1=radiomics_analysis(image_filepath=MPE_list[image], mask_filepath=sphere_list[image],img_label=\"MPE\")\n dict1.update({'Patient':str(sphere_list[image][18:20])})\n dict1.update({'Timepoint': str(sphere_list[image][21:22])})\n df=df.append(dict1,ignore_index=True)\ndf.to_csv(\"./df_MPE_sphere_40_to_53.csv\")\n\ndf1=pd.read_csv(\"./df_MPE_tumour_0_to_9.csv\")\ndf2=pd.read_csv(\"./df_MPE_tumour_10_to_19.csv\")\ndf3=pd.read_csv(\"./df_MPE_tumour_20_to_29.csv\")\ndf4=pd.read_csv(\"./df_MPE_tumour_30_to_39.csv\")\ndf5=pd.read_csv(\"./df_MPE_tumour_40_to_53.csv\")\ndf=df1.append(df2)\ndf=df.append(df3)\ndf=df.append(df4)\ndf=df.append(df5)\ndf.to_csv(\"./df_MPE_tumours.csv\")\n\ndf1=pd.read_csv(\"./df_MPE_sphere_0_to_9.csv\")\ndf2=pd.read_csv(\"./df_MPE_sphere_10_to_19.csv\")\ndf3=pd.read_csv(\"./df_MPE_sphere_20_to_29.csv\")\ndf4=pd.read_csv(\"./df_MPE_sphere_30_to_39.csv\")\ndf5=pd.read_csv(\"./df_MPE_sphere_40_to_53.csv\")\ndf=df1.append(df2)\ndf=df.append(df3)\ndf=df.append(df4)\ndf=df.append(df5)\ndf.to_csv(\"./df_MPE_spheres.csv\")\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"numpy.max",
"numpy.min",
"numpy.std",
"numpy.mean"
]
] |
uri-granta/GPflow | [
"94b432847cb82c7627a57987f5c5ddd7fc400414"
] | [
"tests/gpflow/posteriors/test_bo_integration.py"
] | [
"# Copyright 2022 The GPflow Contributors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, DefaultDict, Dict, Iterator, Mapping, Set, Tuple, Type, TypeVar\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\nfrom _pytest.fixtures import SubRequest\n\nimport gpflow\nfrom gpflow.base import RegressionData\nfrom gpflow.config import default_float\nfrom gpflow.inducing_variables import InducingPoints, InducingVariables\nfrom gpflow.kernels import Kernel, Matern52\nfrom gpflow.likelihoods import Exponential, Likelihood\nfrom gpflow.models import GPR, SGPR, SVGP, VGP, GPModel, training_loss_closure\nfrom gpflow.models.vgp import update_vgp_data\nfrom gpflow.posteriors import AbstractPosterior, PrecomputeCacheType\n\n_CreateModel = Callable[[RegressionData], GPModel]\n_C = TypeVar(\"_C\", bound=_CreateModel)\n\n_MULTI_OUTPUT = \"multi_output\"\n_MODEL_FACTORIES: Dict[_CreateModel, Mapping[str, Any]] = {}\n\n# This exists to make it easy to disable tf.function, for debugging.\n_COMPILE = True\n_MAXITER = 10\n_DEFAULT_ATOL = 1e-10\n_DEFAULT_RTOL = 1e-7\n\n\[email protected](name=\"register_posterior_bo_integration_test\")\ndef _register_posterior_bo_integration_test(\n request: SubRequest,\n tested_posteriors: DefaultDict[str, Set[Type[AbstractPosterior]]],\n) -> Callable[[AbstractPosterior], None]:\n def _register_posterior(posterior: AbstractPosterior) -> None:\n tested_posteriors[request.function.__name__].add(posterior.__class__)\n\n return _register_posterior\n\n\ndef model_factory(\n *flags: str, atol: float = _DEFAULT_ATOL, rtol: float = _DEFAULT_RTOL\n) -> Callable[[_C], _C]:\n \"\"\" Decorator for adding a function to the `_MODEL_FACTORIES` list. \"\"\"\n\n properties = {\n \"atol\": atol,\n \"rtol\": rtol,\n **{flag: True for flag in flags},\n }\n\n def register(create_model: _C) -> _C:\n _MODEL_FACTORIES[create_model] = properties\n return create_model\n\n return register\n\n\ndef create_kernel() -> Kernel:\n return Matern52()\n\n\ndef create_likelihood() -> Likelihood:\n return Exponential()\n\n\ndef create_inducing_points(data: RegressionData) -> InducingPoints:\n n_features = data[0].shape[1]\n n_inducing_points = 5\n rng = np.random.default_rng(20220208)\n Z = tf.constant(rng.random((n_inducing_points, n_features)))\n return InducingPoints(Z)\n\n\ndef create_q(\n inducing_variable: InducingVariables, *, row_scale: int = 1, column_scale: int = 1\n) -> Tuple[bool, tf.Tensor, tf.Tensor]:\n n_inducing_points = inducing_variable.num_inducing\n rng = np.random.default_rng(20220133)\n q_diag = True\n q_mu = tf.constant(rng.random((row_scale * n_inducing_points, column_scale)))\n q_sqrt = tf.constant(rng.random((row_scale * n_inducing_points, column_scale))) ** 2\n return q_diag, q_mu, q_sqrt\n\n\n@model_factory(rtol=1e-3)\ndef create_gpr(data: RegressionData) -> GPR:\n return GPR(data=data, kernel=create_kernel())\n\n\n@model_factory(rtol=1e-4)\ndef create_sgpr(data: RegressionData) -> SGPR:\n return SGPR(data=data, kernel=create_kernel(), inducing_variable=create_inducing_points(data))\n\n\n@model_factory(rtol=5e-3)\ndef create_vgp(data: RegressionData) -> VGP:\n return VGP(data=data, kernel=create_kernel(), likelihood=create_likelihood())\n\n\n@model_factory()\ndef create_svgp__independent_single_output(data: RegressionData) -> SVGP:\n inducing_variable = create_inducing_points(data)\n q_diag, q_mu, q_sqrt = create_q(inducing_variable)\n return SVGP(\n kernel=create_kernel(),\n likelihood=create_likelihood(),\n inducing_variable=inducing_variable,\n q_diag=q_diag,\n q_mu=q_mu,\n q_sqrt=q_sqrt,\n )\n\n\n@model_factory(_MULTI_OUTPUT)\ndef create_svgp__fully_correlated_multi_output(data: RegressionData) -> SVGP:\n n_outputs = data[1].shape[1]\n kernel = gpflow.kernels.SharedIndependent(create_kernel(), output_dim=n_outputs)\n inducing_variable = create_inducing_points(data)\n q_diag, q_mu, q_sqrt = create_q(inducing_variable, row_scale=n_outputs)\n return SVGP(\n kernel=kernel,\n likelihood=create_likelihood(),\n inducing_variable=inducing_variable,\n q_diag=q_diag,\n q_mu=q_mu,\n q_sqrt=q_sqrt,\n )\n\n\n@model_factory(_MULTI_OUTPUT)\ndef create_svgp__independent_multi_output(data: RegressionData) -> SVGP:\n n_outputs = data[1].shape[1]\n kernel = gpflow.kernels.SharedIndependent(create_kernel(), output_dim=n_outputs)\n inducing_variable = gpflow.inducing_variables.SharedIndependentInducingVariables(\n create_inducing_points(data)\n )\n q_diag, q_mu, q_sqrt = create_q(inducing_variable, column_scale=n_outputs)\n return SVGP(\n kernel=kernel,\n likelihood=create_likelihood(),\n inducing_variable=inducing_variable,\n q_diag=q_diag,\n q_mu=q_mu,\n q_sqrt=q_sqrt,\n )\n\n\n@model_factory(_MULTI_OUTPUT)\ndef create_svgp__fallback_independent_latent_posterior(data: RegressionData) -> SVGP:\n n_outputs = data[1].shape[1]\n rng = np.random.default_rng(20220131)\n kernel = gpflow.kernels.LinearCoregionalization(\n [create_kernel()],\n W=tf.constant(rng.standard_normal((n_outputs, 1))),\n )\n inducing_variable = gpflow.inducing_variables.FallbackSeparateIndependentInducingVariables(\n [create_inducing_points(data)]\n )\n q_diag, q_mu, q_sqrt = create_q(inducing_variable)\n return SVGP(\n kernel=kernel,\n likelihood=create_likelihood(),\n inducing_variable=inducing_variable,\n q_diag=q_diag,\n q_mu=q_mu,\n q_sqrt=q_sqrt,\n )\n\n\n@model_factory(_MULTI_OUTPUT)\ndef create_svgp__linear_coregionalization(data: RegressionData) -> SVGP:\n n_outputs = data[1].shape[1]\n rng = np.random.default_rng(20220131)\n kernel = gpflow.kernels.LinearCoregionalization(\n [create_kernel()], W=tf.constant(rng.standard_normal((n_outputs, 1)))\n )\n inducing_variable = gpflow.inducing_variables.SharedIndependentInducingVariables(\n create_inducing_points(data)\n )\n q_diag, q_mu, q_sqrt = create_q(inducing_variable)\n return SVGP(\n kernel=kernel,\n likelihood=create_likelihood(),\n inducing_variable=inducing_variable,\n q_diag=q_diag,\n q_mu=q_mu,\n q_sqrt=q_sqrt,\n )\n\n\[email protected](params=_MODEL_FACTORIES)\ndef _create_model(request: SubRequest) -> _CreateModel:\n return request.param\n\n\[email protected]\ndef _multi_output(_create_model: _CreateModel) -> bool:\n return _MULTI_OUTPUT in _MODEL_FACTORIES[_create_model]\n\n\[email protected]\ndef _rtol(_create_model: _CreateModel) -> float:\n return _MODEL_FACTORIES[_create_model][\"rtol\"]\n\n\[email protected]\ndef _atol(_create_model: _CreateModel) -> float:\n return _MODEL_FACTORIES[_create_model][\"atol\"]\n\n\[email protected]\ndef _f_minimum(_multi_output: bool) -> tf.Tensor:\n return (\n tf.constant(\n [\n [0.2, 0.4],\n [0.4, 0.6],\n [0.6, 0.8],\n ],\n dtype=default_float(),\n )\n if _multi_output\n else tf.constant([[0.3, 0.5]], dtype=default_float())\n )\n\n\[email protected]\ndef _f(_f_minimum: tf.Tensor) -> Callable[[tf.Tensor], tf.Tensor]:\n def f(X: tf.Tensor) -> tf.Tensor:\n err = X[:, None, :] - _f_minimum[None, :, :]\n err_sq = err ** 2\n return tf.reduce_sum(err_sq, axis=-1)\n\n return f\n\n\[email protected]\ndef _data(\n _f: Callable[[tf.Tensor], tf.Tensor], _f_minimum: tf.Tensor\n) -> Tuple[tf.Variable, tf.Variable]:\n n_initial_data = 3\n n_outputs, n_features = _f_minimum.shape\n\n rng = np.random.default_rng(20220126)\n X = tf.Variable(\n rng.random((n_initial_data, n_features)),\n shape=[None, n_features],\n dtype=default_float(),\n trainable=False,\n )\n Y = tf.Variable(\n _f(X),\n shape=[None, n_outputs],\n dtype=default_float(),\n trainable=False,\n )\n\n return X, Y\n\n\[email protected]\ndef _extend_data(\n _data: Tuple[tf.Variable, tf.Variable], _f: Callable[[tf.Tensor], tf.Tensor]\n) -> Callable[[GPModel], Iterator[int]]:\n n_iterations = 3\n rng = np.random.default_rng(20220127)\n X, Y = _data\n n_features = X.shape[1]\n\n def iterate(model: GPModel) -> Iterator[int]:\n for i in range(n_iterations):\n X_new = tf.constant(rng.random((1, n_features)))\n Y_new = _f(X_new)\n X_i = tf.concat([X, X_new], axis=0)\n Y_i = tf.concat([Y, Y_new], axis=0)\n\n if isinstance(model, VGP):\n update_vgp_data(model, (X_i, Y_i))\n else:\n X.assign(X_i)\n Y.assign(Y_i)\n yield i\n\n return iterate\n\n\[email protected]\ndef _X_new(_data: Tuple[tf.Variable, tf.Variable]) -> tf.Tensor:\n rng = np.random.default_rng(20220128)\n X, _Y = _data\n n_features = X.shape[1]\n return tf.constant(rng.random((3, n_features)))\n\n\[email protected]\ndef _optimize(_data: Tuple[tf.Variable, tf.Variable]) -> Callable[[GPModel], None]:\n def optimize(model: GPModel) -> None:\n gpflow.optimizers.Scipy().minimize(\n training_loss_closure(model, _data, compile=_COMPILE),\n variables=model.trainable_variables,\n options=dict(maxiter=_MAXITER),\n method=\"BFGS\",\n compile=_COMPILE,\n )\n\n return optimize\n\n\ndef test_posterior_bo_integration__predict_f(\n register_posterior_bo_integration_test: Callable[[AbstractPosterior], None],\n _create_model: _CreateModel,\n _data: Tuple[tf.Variable, tf.Variable],\n _extend_data: Callable[[GPModel], Iterator[int]],\n _X_new: tf.Tensor,\n _rtol: float,\n _atol: float,\n) -> None:\n \"\"\"\n Check that data added incrementally is correctly reflected in `predict_f`.\n \"\"\"\n _X, Y = _data\n n_rows_new = _X_new.shape[0]\n n_outputs = Y.shape[1]\n\n model = _create_model(_data)\n posterior = model.posterior(PrecomputeCacheType.VARIABLE)\n register_posterior_bo_integration_test(posterior)\n predict_f = posterior.predict_f\n if _COMPILE:\n predict_f = tf.function(predict_f)\n\n for _ in _extend_data(model):\n posterior.update_cache()\n compiled_mean, compiled_var = predict_f(_X_new)\n\n np.testing.assert_equal((n_rows_new, n_outputs), compiled_mean.shape)\n np.testing.assert_equal((n_rows_new, n_outputs), compiled_var.shape)\n\n eager_model = _create_model(_data)\n eager_mean, eager_var = eager_model.predict_f(_X_new)\n\n np.testing.assert_allclose(eager_mean, compiled_mean, rtol=_rtol, atol=_atol)\n np.testing.assert_allclose(eager_var, compiled_var, rtol=_rtol, atol=_atol)\n\n\ndef test_posterior_bo_integration__optimization(\n register_posterior_bo_integration_test: Callable[[AbstractPosterior], None],\n _create_model: _CreateModel,\n _data: Tuple[tf.Variable, tf.Variable],\n _extend_data: Callable[[GPModel], Iterator[int]],\n _X_new: tf.Tensor,\n _optimize: Callable[[GPModel], None],\n _rtol: float,\n _atol: float,\n) -> None:\n \"\"\"\n Check that data added incrementally is considered when optimizing a model.\n \"\"\"\n _X, Y = _data\n n_rows_new = _X_new.shape[0]\n n_outputs = Y.shape[1]\n\n model = _create_model(_data)\n posterior = model.posterior(PrecomputeCacheType.VARIABLE)\n register_posterior_bo_integration_test(posterior)\n predict_f = posterior.predict_f\n if _COMPILE:\n predict_f = tf.function(predict_f)\n\n # Add all the data first, and then `optimize`, so that both models are optimized the same number\n # of times and with the same data, so they converge to the same result.\n\n for _ in _extend_data(model):\n pass\n\n _optimize(model)\n posterior.update_cache()\n compiled_mean, compiled_var = predict_f(_X_new)\n\n np.testing.assert_equal((n_rows_new, n_outputs), compiled_mean.shape)\n np.testing.assert_equal((n_rows_new, n_outputs), compiled_var.shape)\n\n eager_model = _create_model(_data)\n _optimize(eager_model)\n eager_mean, eager_var = eager_model.predict_f(_X_new)\n\n np.testing.assert_allclose(eager_mean, compiled_mean, rtol=_rtol, atol=_atol)\n np.testing.assert_allclose(eager_var, compiled_var, rtol=_rtol, atol=_atol)\n"
] | [
[
"numpy.random.default_rng",
"numpy.testing.assert_equal",
"tensorflow.function",
"tensorflow.concat",
"numpy.testing.assert_allclose",
"tensorflow.reduce_sum"
]
] |
tirkarthi/odin-ai | [
"7900bef82ad8801d0c73880330d5b24d9ff7cd06"
] | [
"odin/ml/plda.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\"\nauthor: 'Omid Sadjadi, Timothee Kheyrkhah'\nemail: '[email protected]'\n\"\"\"\nimport time\nimport warnings\nfrom numbers import Number\n\nimport numpy as np\nfrom scipy.linalg import cholesky, eigh, inv, solve, svd\nfrom six import string_types\n\nfrom odin.backend import calc_white_mat, length_norm\nfrom odin.ml.base import BaseEstimator, Evaluable, TransformerMixin\nfrom odin.ml.scoring import (VectorNormalizer, compute_class_avg,\n compute_within_cov)\nfrom odin.utils import unique\n\n\ndef logdet(A):\n u = cholesky(A)\n y = 2 * np.log(np.diag(u)).sum()\n return y\n\nclass PLDA(BaseEstimator, TransformerMixin, Evaluable):\n r\"\"\" Probabilistic LDA\n\n Parameters\n ----------\n n_phi : int\n number of dimension for the latent space\n\n centering : bool (default: True)\n mean normalization the data before EM\n\n wccn : bool (default: True)\n within class covariance normalization before EM\n\n unit_length : bool (default: True)\n normalize vector length of each sample to 1 before EM\n\n n_iter : {integer, 'auto'}\n if 'auto', keep iterating until no more improvement (i.e. reduction in `sigma` value)\n compared to the `improve_threshold`\n\n improve_threshold : scalar\n Only used in case `n_iter='auto'`\n\n labels : {list of string, or None} (default: None)\n labels information for `evaluate` method\n\n seed : int\n random seed for reproducibility\n\n verbose : int (default: 0)\n verbose level, 0 for turning off all logging activities,\n 1 for basics notification, 2 for fitting progress.\n if `2`, compute log-likelihood during fitting EM, this will\n significantly slows down the process, only suggested for debugging\n\n Attributes\n ----------\n Sigma_ : [feat_dim, feat_dim]\n Phi_ : [feat_dim, n_phi]\n Sb_ : [feat_dim, feat_dim]\n St_ : [feat_dim, feat_dim]\n Lambda : []\n Uk : []\n Q_hat : []\n X_model_ : [num_class, feat_dim]\n class-dependence feature vectors\n \"\"\"\n\n def __init__(self, n_phi=None,\n centering=True, wccn=True, unit_length=True,\n n_iter='auto', improve_threshold=1e-1,\n labels=None, dtype='float64', random_state=None,\n verbose=0):\n super(PLDA, self).__init__()\n # ====== check n_phi ====== #\n if n_phi is not None:\n n_phi = int(n_phi)\n self.n_phi_ = n_phi\n # ====== check num_iter ====== #\n if isinstance(n_iter, string_types):\n n_iter = n_iter.lower()\n assert n_iter == 'auto', 'Invalid `n_iter` value: %s' % n_iter\n elif isinstance(n_iter, Number):\n assert n_iter > 0, \"`n_iter` must greater than 0, but given: %d\" % n_iter\n self.n_iter_ = n_iter\n self.improve_threshold_ = float(improve_threshold)\n # ====== other ====== #\n self.feat_dim_ = None\n self._labels = labels\n self.verbose_ = int(verbose)\n # for normalization\n self._normalizer = VectorNormalizer(\n centering=centering, wccn=wccn, unit_length=unit_length,\n lda=False, concat=False)\n self._dtype = np.dtype(dtype)\n # ====== check random state ====== #\n if random_state is None:\n self._rand_state = np.random.RandomState(None)\n elif isinstance(random_state, Number):\n self._rand_state = np.random.RandomState(seed=random_state)\n elif isinstance(random_state, np.random.RandomState):\n self._rand_state = random_state\n else:\n raise ValueError(\"Invalid argument for `random_state`: %s\" % str(random_state))\n # Attributes\n self.Sigma_ = None\n self.Phi_ = None\n self.Sb_ = None\n self.St_ = None\n\n # ==================== properties ==================== #\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def feat_dim(self):\n return self.feat_dim_\n\n @property\n def normalizer(self):\n return self._normalizer\n\n @property\n def labels(self):\n return self._labels\n\n @property\n def num_classes(self):\n return len(self._labels)\n\n @property\n def is_fitted(self):\n if not hasattr(self, 'Lambda_') or \\\n not hasattr(self, 'Uk_') or \\\n not hasattr(self, 'Q_hat_') or \\\n not hasattr(self, 'X_model_'):\n return False\n return True\n\n # ==================== Pickling ==================== #\n def __getstate__(self):\n if not self.is_fitted:\n raise RuntimeError(\"The PLDA have not been fitted, nothing to pickle!\")\n return (self.n_phi_, self.n_iter_, self.feat_dim_, self._labels, self.verbose_,\n self._normalizer, self._dtype, self._rand_state,\n self.Sigma_, self.Phi_, self.Sb_, self.St_,\n self.Lambda_, self.Uk_, self.Q_hat_, self.X_model_)\n\n def __setstate__(self, states):\n (self.n_phi_, self.n_iter_, self.feat_dim_, self._labels, self.verbose_,\n self._normalizer, self._dtype, self._rand_state,\n self.Sigma_, self.Phi_, self.Sb_, self.St_,\n self.Lambda_, self.Uk_, self.Q_hat_, self.X_model_) = states\n\n # ==================== helpers ==================== #\n def initialize(self, X, labels):\n feat_dim = X.shape[1]\n if self.feat_dim is None or self._num_classes is None:\n self.feat_dim_ = int(feat_dim)\n if self._labels is None:\n self._labels = labels\n if self.feat_dim <= self.n_phi_:\n raise RuntimeError(\"`feat_dim=%d` must be greater than `n_phi=%d`\" %\n (self.feat_dim, self.n_phi_))\n # ====== initialize ====== #\n # covariance matrix of the residual term\n # self.Sigma_ = 1. / self.feat_dim * np.eye(self.feat_dim, dtype=self.dtype)\n self.Sigma_ = (1. / self.feat_dim * np.eye(self.feat_dim) +\n self._rand_state.randn(self.feat_dim, self.feat_dim)\n ).astype(self.dtype)\n # self.Sigma_ = np.cov(X.T).astype(self.dtype)\n # self.Sigma_ = (np.cov(X.T) +\n # self._rand_state.randn(self.feat_dim, self.feat_dim)\n # ).astype(self.dtype)\n # self.Sigma_ = 100 * self._rand_state.randn(\n # self.feat_dim, self.feat_dim).astype(self.dtype)\n # factor loading matrix (Eignevoice matrix) [feat_dim, n_phi]\n # self.Phi_ = np.r_[np.eye(self.n_phi_),\n # np.zeros((self.feat_dim - self.n_phi_, self.n_phi_))]\n # self.Phi_ = self._rand_state.randn(self.feat_dim, self.n_phi_).astype(self.dtype)\n self.Phi_ = self.normalizer.transform(\n self._rand_state.randn(self.n_phi_, self.feat_dim)\n ).T.astype(self.dtype)\n self.Sb_ = np.zeros((self.feat_dim, self.feat_dim), dtype=self.dtype)\n self.St_ = np.zeros((self.feat_dim, self.feat_dim), dtype=self.dtype)\n # ====== validate the dimension ====== #\n if self.feat_dim != feat_dim:\n raise ValueError(\"Mismatch the input feature dimension, %d != %d\" %\n (self.feat_dim, feat_dim))\n if self.num_classes != len(labels):\n raise ValueError(\"Mismatch the number of output classes, %d != %d\" %\n (self.num_classes, len(labels)))\n\n # ==================== sklearn ==================== #\n def _update_caches(self):\n # ====== update cached matrices for scoring ====== #\n iSt = inv(self.St_) # [feat_dim, feat_dim]\n iS = inv(self.St_ - np.dot(np.dot(self.Sb_, iSt), self.Sb_))\n Q = iSt - iS # [feat_dim, feat_dim]\n P = np.dot(np.dot(iSt, self.Sb_), iS) # [feat_dim, feat_dim]\n U, s, V = svd(P, full_matrices=False)\n self.Lambda_ = np.diag(s[:self.n_phi_]) # [n_phi, n_phi]\n self.Uk_ = U[:, :self.n_phi_] # [feat_dim, n_phi]\n self.Q_hat_ = np.dot(np.dot(self.Uk_.T, Q), self.Uk_) # [n_phi, n_phi]\n\n def fit_maximum_likelihood(self, X, y):\n # ====== preprocessing ====== #\n if isinstance(X, (tuple, list)):\n X = np.asarray(X)\n elif \"odin.fuel\" in str(type(X)):\n X = X[:]\n if isinstance(y, (tuple, list)):\n y = np.asarray(y)\n # ====== normalizing and initializing ====== #\n X = self.normalizer.fit(X, y).transform(X)\n classes = np.unique(y)\n self.initialize(X, labels=classes)\n # ====== ml ====== #\n Sw = compute_within_cov(X, y, classes)\n self.St_ = np.cov(X.T)\n self.Sb_ = self.St_ - Sw\n # ====== the default class_avg ====== #\n self._update_caches()\n model_vecs = compute_class_avg(X, y, classes=classes)\n self.X_model_ = np.dot(model_vecs, self.Uk_)\n return self\n\n def fit(self, X, y):\n \"\"\"\n Parameters\n ----------\n X : [num_samples, feat_dim]\n y : [num_samples]\n \"\"\"\n # ====== preprocessing ====== #\n if isinstance(X, (tuple, list)):\n X = np.asarray(X)\n elif \"odin.fuel\" in str(type(X)):\n X = X[:]\n if isinstance(y, (tuple, list)):\n y = np.asarray(y)\n assert X.shape[0] == y.shape[0], \\\n \"Number of samples mismatch in `X` and `y`, %d != %d\" % \\\n (X.shape[0], y.shape[0])\n # ====== normalize and initialize ====== #\n y_counts = np.bincount(y) # sessions per speaker\n classes = np.unique(y)\n X = self.normalizer.fit(X, y).transform(X)\n self.initialize(X, labels=classes)\n # ====== Initializing ====== #\n F = np.zeros((self.num_classes, self.feat_dim))\n for clz in np.unique(y):\n # Speaker indices\n F[clz, :] = X[y == clz, :].sum(axis=0)\n if self.verbose_ > 0:\n print('Re-estimating the Eigenvoice subspace with {} factors ...'.format(self.n_phi_))\n X_sqr = np.dot(X.T, X)\n # ====== iteration ====== #\n iter = 0\n last_llk_value = None\n while True:\n e_time = time.time()\n # expectation\n Ey, Eyy = self.expectation_plda(F, y_counts)\n e_time = time.time() - e_time\n # maximization\n m_time = time.time()\n self.maximization_plda(X, X_sqr, F, Ey, Eyy)\n m_time = time.time() - m_time\n # log-likelihood\n llk = 'None'\n llk_value = None\n if self.verbose_ > 1 or isinstance(self.n_iter_, string_types):\n llk_value = self.compute_llk(X)\n llk = '%.2f' % llk_value\n if self.verbose_ > 0:\n print('#iter:%-3d \\t [llk = %s] \\t [E-step = %.2f s] [M-step = %.2f s]' %\n (iter + 1, llk, e_time, m_time))\n # check breaking condition\n iter += 1\n if isinstance(self.n_iter_, Number):\n if iter >= self.n_iter_:\n break\n elif iter > 2 and last_llk_value is not None:\n if llk_value - last_llk_value < self.improve_threshold_:\n break\n last_llk_value = llk_value\n # ====== Update the eigenvoice space ====== #\n self.Sb_ = self.Phi_.dot(self.Phi_.T)\n self.St_ = self.Sb_ + self.Sigma_\n # ====== the default class_avg ====== #\n self._update_caches()\n model_vecs = compute_class_avg(X, y, classes=classes)\n self.X_model_ = np.dot(model_vecs, self.Uk_)\n\n def expectation_plda(self, F, cls_counts):\n \"\"\"\n Parameters\n ----------\n F : [num_classes, feat_dim]\n cls_count : [num_classes]\n \"\"\"\n # computes the posterior mean and covariance of the factors\n num_classes = F.shape[0]\n Eyy = np.zeros(shape=(self.n_phi_, self.n_phi_))\n Ey_clz = np.zeros(shape=(num_classes, self.n_phi_))\n # initialize common terms to save computations\n uniqFreqs = unique(cls_counts, keep_order=True)\n n_uniq = len(uniqFreqs)\n invTerms = np.empty(shape=(n_uniq, self.n_phi_, self.n_phi_))\n PhiT_invS = solve(self.Sigma_.T, self.Phi_).T # [n_phi, feat_dim]\n PhiT_invS_Phi = np.dot(PhiT_invS, self.Phi_) # [n_phi, n_phi]\n I = np.eye(self.n_phi_)\n\n for ix in range(n_uniq):\n nPhiT_invS_Phi = uniqFreqs[ix] * PhiT_invS_Phi\n invTerms[ix] = inv(I + nPhiT_invS_Phi)\n\n for clz in range(num_classes):\n num_samples = cls_counts[clz]\n PhiT_invS_y = np.dot(PhiT_invS, F[clz, :])\n idx = np.flatnonzero(uniqFreqs == num_samples)[0]\n Cyy = invTerms[idx]\n Ey_clz[clz, :] = np.dot(Cyy, PhiT_invS_y)\n Eyy += num_samples * Cyy\n\n Eyy += np.dot((Ey_clz * cls_counts[:, None]).T, Ey_clz)\n return Ey_clz, Eyy\n\n def compute_llk(self, X):\n \"\"\"\n Parameters\n ----------\n X : [num_samples, feat_dim]\n \"\"\"\n num_samples = X.shape[0]\n S = np.dot(self.Phi_, self.Phi_.T) + self.Sigma_ # [feat_dim, feat_dim]\n llk = -0.5 * (self.feat_dim * num_samples * np.log(2 * np.pi) +\n num_samples * logdet(S) +\n np.sum(X * solve(S, X.T).T))\n return llk\n\n def maximization_plda(self, X, X_sqr, F, Ey, Eyy):\n \"\"\"\n ML re-estimation of the Eignevoice subspace and the covariance of the\n residual noise (full).\n\n Paremters\n ---------\n X : [num_samples, feat_dim]\n X_cov : [feat_dim, feat_dim]\n F : [num_classes, feat_dim]\n Ey : [num_classes, n_phi]\n Eyy : [n_phi, n_phi]\n \"\"\"\n num_samples = X.shape[0]\n Ey_FT = np.dot(Ey.T, F) # [n_phi, feat_dim]\n self.Phi_ = solve(Eyy.T, Ey_FT).T # [feat_dim, n_phi]\n self.Sigma_ = 1. / num_samples * (X_sqr - np.dot(self.Phi_, Ey_FT))\n\n def transform(self, X):\n if not self.is_fitted:\n raise RuntimeError(\"This model hasn't been fitted!\")\n # ====== check X ====== #\n if isinstance(X, (tuple, list)):\n X = np.asarray(X)\n elif \"odin.fuel\" in str(type(X)):\n X = X[:]\n # ====== transform into latent space ====== #\n X_norm = self.normalizer.transform(X)\n X_project = np.dot(X_norm, self.Uk_) # [num_samples, n_phi]\n return X_project\n # return np.dot(X_project, self.Q_hat_)\n # h = np.dot(X_project, self.Q_hat_) * X_project\n # return h\n\n def predict_log_proba(self, X, X_model=None):\n \"\"\"\n Parameters\n ----------\n X : [num_samples, feat_dim]\n X_model : [num_classes, feat_dim]\n if None, use class average extracted based on fitted data\n\n Return\n ------\n log-probabilities matrix [num_samples, num_classes]\n \"\"\"\n if not self.is_fitted:\n raise RuntimeError(\"This model hasn't been fitted!\")\n # ====== check X_model ====== #\n if X_model is None:\n X_model = self.X_model_\n else:\n # [num_classes, n_phi]\n X_model = np.dot(self.normalizer.transform(X_model), self.Uk_)\n if X_model.shape[0] != self.num_classes:\n warnings.warn(\"The model matrix contains %d classes, but the \"\n \"fitted number of classes is %d\" %\n (X_model.shape[0], self.num_classes))\n # ====== check X ====== #\n if isinstance(X, (tuple, list)):\n X = np.asarray(X)\n elif \"odin.fuel\" in str(type(X)):\n X = X[:]\n # ====== transform the input matrices ====== #\n X = np.dot(self.normalizer.transform(X), self.Uk_) # [num_samples, n_phi]\n # [num_classes, 1]\n score_h1 = np.sum(np.dot(X_model, self.Q_hat_) * X_model, axis=1, keepdims=True)\n # [num_samples, 1]\n score_h2 = np.sum(np.dot(X, self.Q_hat_) * X, axis=1, keepdims=True)\n # [num_samples, num_classes]\n score_h1h2 = 2 * np.dot(X, np.dot(X_model, self.Lambda_).T)\n # [num_samples, num_classes]\n scores = score_h1h2 + score_h1.T + score_h2\n return scores\n"
] | [
[
"numpy.eye",
"numpy.bincount",
"numpy.empty",
"numpy.zeros",
"numpy.diag",
"numpy.dtype",
"scipy.linalg.solve",
"numpy.asarray",
"scipy.linalg.inv",
"numpy.random.RandomState",
"scipy.linalg.cholesky",
"numpy.log",
"numpy.dot",
"numpy.flatnonzero",
"numpy.unique",
"numpy.cov",
"scipy.linalg.svd"
]
] |
ryorda/tensorflow-viennacl | [
"d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f"
] | [
"tensorflow/python/ops/nn_grad_test.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Python ops defined in nn_grad.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import nn_grad\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.platform import test\n\n\nclass Relu6OpTest(test.TestCase):\n def testRelu6GradGrad(self):\n inputs = constant_op.constant([[-2, -1, 1, 3], [5, 7, 8, 9]],\n dtype=dtypes.float32)\n x_init_value = np.array([[-3.5, -1.5, 2, 4], [4.5, 7.5, 8.5, 11]])\n r = nn_ops.relu6(inputs)\n r_g = gradients_impl.gradients(r, inputs)[0]\n with self.test_session():\n error = gradient_checker.compute_gradient_error(\n inputs, inputs.get_shape().as_list(),\n r_g, r_g.get_shape().as_list(),\n x_init_value=x_init_value)\n self.assertLess(error, 1e-4)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.ops.nn_ops.relu6",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.platform.test.main",
"numpy.array",
"tensorflow.python.framework.constant_op.constant"
]
] |
arminnh/deep-q-learning | [
"e6ec12cfa2468b86f60a6cb2635f5feb12dcd7a6"
] | [
"src/universe-driving/drive.py"
] | [
"#https://github.com/openai/universe-starter-agent/blob/master/envs.py\n\nimport gym\nimport universe\nimport socketio\nimport eventlet.wsgi\nfrom PIL import Image\nfrom flask import Flask\nfrom io import BytesIO\nfrom TORCH_DQN import DQN\nfrom enum import Enum\nimport torchvision.transforms as T\nimport ast\nimport torch\nfrom env import create_flash_env\n\nclass Moves(Enum):\n LEFT = 0\n RIGHT = 1\n ACCELERATE = 2\n BRAKE = 3\n TURBO = 4\n\n def __str__(self):\n if self == Moves.ACCELERATE:\n return \"up\"\n elif self == Moves.BRAKE:\n return \"down\"\n elif self == Moves.LEFT:\n return \"left\"\n elif self == Moves.RIGHT:\n return \"right\"\n elif self == Moves.TURBO:\n return \"x\"\n\nclass SelfDrivingAgent:\n\n def __init__(self):\n #4 moves\n self.DQN = DQN(len(Moves))\n\n self.state = None\n self.lastScreen = None\n\ndef main():\n # Create env\n env, w, h = create_flash_env('flashgames.DuskDrive-v0')\n _ = env.reset()\n\n agent = SelfDrivingAgent()\n #print(observation_n)\n agent.state = torch.zeros((1,128,200)).numpy()\n agent.lastScreen = torch.zeros((1,128,200)).numpy()\n\n next_state = torch.zeros((1,128,200)).numpy()\n count = 1\n while True:\n action = agent.DQN.act(agent.state)\n\n observation_n, reward_n, done_n, info = env.step(action)\n if \"global/episode_reward\" in info:\n count += 1\n # we have finished an episode\n if count in [100,200,300,400,500,600,700,800,900] or count % 1000 == 0:\n #save\n agent.DQN.save(\"agent_ep{}\".format(count))\n\n #print(\"learning\")\n agent.DQN.remember(agent.state, action, reward_n, next_state, False)\n #print(observation_n)\n next_state = observation_n - agent.lastScreen\n agent.lastScreen = observation_n\n\n agent.state = next_state\n agent.DQN.replay(128)\n\n env.render()\n\nmain()"
] | [
[
"torch.zeros"
]
] |
selfemergence/NEAT-multiprocessing | [
"3dc57c6ec18658253398ae0b361b72f78e3fd0c9"
] | [
"experiments/mario/gym-nes-mario-bros-master/src/run-mario.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# By Lilian Besson (Naereen)\n# https://github.com/Naereen/gym-nes-mario-bros\n# MIT License https://lbesson.mit-license.org/\n#\nfrom __future__ import division, print_function # Python 2 compatibility\n\nimport os\nimport sys\nfrom collections import deque\nfrom time import sleep\n\nPARALLEL_EMULATORS = 1 # XXX Turn down parallel emulators if needed\n# PARALLEL_EMULATORS = 4 # Nb of cores, to have exactly one emulator process by cores\nPARALLEL_EMULATORS = int(os.getenv('N', PARALLEL_EMULATORS))\nif PARALLEL_EMULATORS > 1:\n print(\"WARNING: It's not working with more than one emulator in parallel!\")\n\n\n# FIXME use joblib for something smart?\n# from joblib import Parallel, delayed\n# Parallel(n_jobs=PARALLEL_EMULATORS)(\n# delayed(delayed_play)(XXX)\n# for repeatId in range(PARALLEL_EMULATORS)\n# )\n\n\nimport gym\nfrom gym import wrappers\nimport nesgym\nfrom nesgym.wrappers import CROPPED_WIDTH, CROPPED_HEIGHT\nimport numpy as np\n\nfrom dqn.model import DoubleDQN\nfrom dqn.utils import PiecewiseSchedule\n\n\ndqn_model_name = \"DQN_MarioBros_v1\"\n\n\ndef get_env():\n print(\"Creating gym environment...\") # DEBUG\n env = gym.make('nesgym/MarioBros-v0')\n env = nesgym.wrap_nes_env(env)\n expt_dir = '/tmp/mario/'\n env = wrappers.Monitor(env, os.path.join(expt_dir, \"gym\"), force=True)\n return env\n\n\ndef get_envs(N=1):\n print(\"Creating {} gym environments...\".format(N)) # DEBUG\n envs = []\n for n in range(N):\n print(\"Creating gym environment #{}/{}...\".format(n + 1, N)) # DEBUG\n env = gym.make('nesgym/MarioBros-v0')\n env = nesgym.wrap_nes_env(env)\n expt_dir = '/tmp/mario-{}/'.format(n)\n env = wrappers.Monitor(env, os.path.join(expt_dir, \"gym\"), force=True)\n envs.append(env)\n sleep(1)\n return envs\n\n# Keep a log of the max score seen so far, to plot it as a function of time steps\ndef log_max_seen_score(step, max_seen_score, max_seen_score_csv):\n with open(max_seen_score_csv, 'a') as f:\n f.write(\"\\n{}, {}\".format(step, max_seen_score))\n\n\ndef mario_main(N=1, dqn_model_name=dqn_model_name):\n envs = get_envs(N=N)\n\n # env = envs[0].env.env.env.env\n env = envs[0]\n while hasattr(env, 'env'):\n env = env.env\n env0 = env\n\n last_observations = [ 0 for env in envs ]\n # FIXME finish the support for running emulators in parallel\n for emulatornumber, env in enumerate(envs):\n last_observations[emulatornumber] = env.reset()\n\n try:\n # _emulatornumber = envs[0].env.env.env.env._emulatornumber\n _emulatornumber = env0._emulatornumber\n except:\n _emulatornumber = 0\n dqn_model_name = \"{}-{}\".format(dqn_model_name, _emulatornumber)\n\n max_timesteps = 10000000 # 10 millions steps for 216996 parameters...\n max_seen_score = 0\n\n # Create the log file if needed\n max_seen_score_csv = \"max_seen_score_{}.csv\".format(_emulatornumber)\n if not os.path.isfile(max_seen_score_csv):\n with open(max_seen_score_csv, 'w') as f:\n f.write(\"step, max_seen_score\")\n\n exploration_schedule = PiecewiseSchedule(\n [\n (0, 1.0),\n (1e5, 0.1),\n (max_timesteps / 2, 0.01),\n ], outside_value=0.01\n )\n\n dqn = DoubleDQN(\n image_shape=(CROPPED_WIDTH, CROPPED_HEIGHT, 1),\n num_actions=envs[0].action_space.n,\n # # --- XXX heavy simulations\n # training_starts=10000,\n # target_update_freq=5000,\n # training_batch_size=32,\n # training_freq=4,\n # # --- XXX light simulations?\n training_starts=20,\n target_update_freq=10,\n training_freq=4,\n training_batch_size=4,\n # --- Other parameters...\n frame_history_len=8, # XXX is it more efficient with history?\n replay_buffer_size=10000, # XXX reduce if MemoryError\n # frame_history_len=8, # XXX is it more efficient with history?\n # replay_buffer_size=100000, # XXX reduce if MemoryError\n exploration=exploration_schedule,\n name=dqn_model_name\n )\n\n # How to save the DQN to a file after every training\n # in order to resume from previous step if training was stopped?\n if os.path.isfile(dqn_model_name + '.h5'):\n try:\n dqn.load_weights(dqn_model_name + '.h5')\n print(\"Successfully loaded the DQN weights from file '{}'...\".format(dqn_model_name + '.h5')) # DEBUG\n except (ValueError, NotImplementedError, AttributeError):\n print(\"Unable to load the DQN weights from file '{}'...\".format(dqn_model_name + '.h5')) # DEBUG\n\n dqn.save_model()\n dqn.plot_model()\n\n reward_sum_episode = 0\n num_episodes = 0\n episode_rewards = deque(maxlen=100)\n\n for step in range(max_timesteps):\n if step > 0 and step % 100 == 0:\n print(\"step: \", step,\n \"; episodes:\", num_episodes,\n \"; epsilon:\", exploration_schedule.value(step),\n \"; learning rate:\", dqn.get_learning_rate(),\n \"; last 100 training loss mean\", dqn.get_avg_loss()\n )\n if len(episode_rewards) > 0:\n print(\"last 100 episode mean rewards: \", np.mean(np.array(episode_rewards)))\n\n # also print summary of the model!\n dqn.summary()\n # and save the model!\n dqn.save_weights(dqn_model_name + '.h5')\n\n # --- Parallel loops for different environments\n for emulatornumber, env in enumerate(envs):\n last_obs = last_observations[emulatornumber]\n\n # XXX Enable this to see the Python view of the screen (PIL.imshow)\n # env.render()\n\n if len(envs) > 1:\n print(\"Emulator #\", emulatornumber) # DEBUG\n\n action = dqn.choose_action(step, last_obs)\n obs, reward, done, info = env.step(action)\n reward_sum_episode += reward\n\n if done and reward < 0:\n reward = 0 # force this manually to avoid bug of getting -400 10 times in a row!\n dqn.learn(step, action, reward, done, info)\n\n print(\"Step {:>6}, action {:>2} (#{:>2}), gave reward {:>6}, score {:>6} and max score {:>6}, life {:>2} and level {:>2}.\".format(step, env0.actions[action], action, reward, info['score'], max_seen_score, info['life'], info['level'])) # DEBUG\n\n if info['score'] > max_seen_score:\n max_seen_score = info['score']\n print(\"!!New total score record!!\", max_seen_score)\n log_max_seen_score(step, max_seen_score, max_seen_score_csv)\n if done:\n last_obs = env.reset()\n if info['frame'] > 0: # we actually played a few frames\n print(\"\\ndone, reward_sum_episode =\", reward_sum_episode)\n episode_rewards.append(reward_sum_episode)\n reward_sum_episode = 0\n num_episodes += 1\n else:\n last_obs = obs\n\n last_observations[emulatornumber] = last_obs\n\n print(\"Simulation is done, exiting now\")\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n # FIXME finish the support for running emulators in parallel\n mario_main(N=PARALLEL_EMULATORS)\n"
] | [
[
"numpy.array"
]
] |
susanwe/world-models | [
"0f246a430683e6ab741726df0a97f35830044356"
] | [
"models/vae.py"
] | [
"\n\"\"\"\nVariational encoder model, used as a visual model\nfor our model of the world.\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Decoder(nn.Module):\n \"\"\" VAE decoder \"\"\"\n def __init__(self, img_channels, latent_size):\n super(Decoder, self).__init__()\n self.latent_size = latent_size\n self.img_channels = img_channels\n\n self.fc1 = nn.Linear(latent_size, 1024)\n self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2)\n self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2)\n self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2)\n self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2)\n\n def forward(self, x): # pylint: disable=arguments-differ\n x = F.relu(self.fc1(x))\n x = x.unsqueeze(-1).unsqueeze(-1)\n x = F.relu(self.deconv1(x))\n x = F.relu(self.deconv2(x))\n x = F.relu(self.deconv3(x))\n reconstruction = torch.sigmoid(self.deconv4(x))\n return reconstruction\n\nclass Encoder(nn.Module): # pylint: disable=too-many-instance-attributes\n \"\"\" VAE encoder \"\"\"\n def __init__(self, img_channels, latent_size):\n super(Encoder, self).__init__()\n self.latent_size = latent_size\n #self.img_size = img_size\n self.img_channels = img_channels\n\n self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2)\n self.conv2 = nn.Conv2d(32, 64, 4, stride=2)\n self.conv3 = nn.Conv2d(64, 128, 4, stride=2)\n self.conv4 = nn.Conv2d(128, 256, 4, stride=2)\n\n self.fc_mu = nn.Linear(2*2*256, latent_size)\n self.fc_logsigma = nn.Linear(2*2*256, latent_size)\n\n\n def forward(self, x): # pylint: disable=arguments-differ\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n x = F.relu(self.conv4(x))\n x = x.view(x.size(0), -1)\n\n mu = self.fc_mu(x)\n logsigma = self.fc_logsigma(x)\n\n return mu, logsigma\n\nclass VAE(nn.Module):\n \"\"\" Variational Autoencoder \"\"\"\n def __init__(self, img_channels, latent_size):\n super(VAE, self).__init__()\n self.encoder = Encoder(img_channels, latent_size)\n self.decoder = Decoder(img_channels, latent_size)\n\n def forward(self, x): # pylint: disable=arguments-differ\n mu, logsigma = self.encoder(x)\n sigma = logsigma.exp()\n eps = torch.randn_like(sigma)\n z = eps.mul(sigma).add_(mu)\n\n recon_x = self.decoder(z)\n return recon_x, mu, logsigma\n"
] | [
[
"torch.randn_like",
"torch.nn.Linear",
"torch.nn.Conv2d",
"torch.nn.ConvTranspose2d"
]
] |
tidepool-org/data-science-models | [
"cd06e9aad95a0bc6cc2a81871e567c88159b86d3"
] | [
"tidepool_data_science_models/models/icgm_sensor_generator.py"
] | [
"\"\"\"\nCreates iCGM Sensors given a trueBG trace\n\nThe Dexcom G6 Specifications in this file are publicly available from:\n “EVALUATION OF AUTOMATIC CLASS III DESIGNATION FOR\n Dexcom G6 Continuous Glucose Monitoring System.” n.d.\n https://www.accessdata.fda.gov/cdrh_docs/reviews/DEN170088.pdf.\n\n\"\"\"\n\n# %% Libraries\nimport numpy as np\nfrom scipy.optimize import brute, fmin\nfrom tidepool_data_science_models.models.icgm_sensor import iCGMSensor\nimport tidepool_data_science_models.models.icgm_sensor_generator_functions as sf\nimport multiprocessing\nmultiprocessing.set_start_method(\"fork\")\n\n\n# %% Definitions\nclass iCGMSensorGenerator(object):\n \"\"\"iCGM Sensor Generator object which fits a Johnsonsu distribution to a true_bg_trace\n and generates sensors using this distribution\"\"\"\n\n def __init__(\n self,\n sc_thresholds=None, # This is required only for iCGM sensors for now (A-G)\n batch_training_size=30,\n use_g6_accuracy_in_loss=False,\n bias_type=\"percentage_of_value\",\n bias_drift_type=\"random\",\n random_seed=0,\n verbose=False,\n true_bg_trace=None,\n true_dataset_name=\"default\",\n ):\n \"\"\"\n Sensor Generator Initialization\n\n Parameters\n ----------\n sc_thresholds : float array\n The 7 special control thresholds A-G\n use_g6_accuracy_in_loss : bool\n Whether or not to use the G6 accuracy loss during fit\n bias_type : str\n Type of overall bias used which defines the normalization factor\n bias_drift_type : str\n Type of drift used in the sensor bias (random, linear, none)\n random_seed : int\n Random seed used throughout generator for reproducible sensors and values\n verbose : bool\n Verbosity setting for the brute force distribution parameter search\n true_bg_trace : float array\n The time-series of true bgs the iCGM distribution is fit to\n true_dataset_name : str\n Name of the true bg dataset used to fit\n \"\"\"\n\n if sc_thresholds is None:\n sc_thresholds = [\n 0.85,\n 0.70,\n 0.80,\n 0.98,\n 0.99,\n 0.99,\n 0.87,\n ] # This is required only for iCGM sensors (Criteria A-G)\n\n self.sc_thresholds = sc_thresholds\n self.batch_training_size = batch_training_size\n self.use_g6_accuracy_in_loss = use_g6_accuracy_in_loss\n self.bias_type = bias_type\n self.bias_drift_type = bias_drift_type\n self.random_seed = random_seed\n self.verbose = verbose\n self.true_bg_trace = true_bg_trace\n self.true_dataset_name = true_dataset_name\n\n # pick delay based upon data in:\n # Vettoretti et al., 2019, Sensors 2019, 19, 5320\n if use_g6_accuracy_in_loss:\n self.delay = 5 # time delay between iCGM value and true value\n else:\n self.delay = 10\n\n self.johnson_parameter_search_range, self.search_range_inputs = sf.get_search_range()\n\n # set the random seed for reproducibility\n np.random.seed(seed=random_seed)\n\n self.icgm_traces = None\n self.individual_sensor_properties = None\n self.batch_sensor_brute_search_results = None\n self.batch_sensor_properties = None\n self.dist_params = None\n\n return\n\n def fit(self, true_bg_trace=None):\n \"\"\"Fits the optimal sensor characteristics fit to a true_bg_trace using a brute search range\n\n Parameters\n ----------\n true_bg_trace : float array\n The true_bg_trace (mg/dL) used to fit a johnsonsu distribution\n training_size : int\n Number of sensors used when fitting the optimal distribution of sensor characteristics\n\n \"\"\"\n\n if true_bg_trace is None:\n raise Exception(\"No true_bg_trace given\")\n\n self.true_bg_trace = true_bg_trace\n\n batch_sensor_brute_search_results = brute(\n sf.johnsonsu_icgm_sensor,\n self.johnson_parameter_search_range,\n args=(\n true_bg_trace,\n self.sc_thresholds,\n self.batch_training_size,\n self.bias_type,\n self.bias_drift_type,\n self.delay,\n self.random_seed,\n self.verbose,\n self.use_g6_accuracy_in_loss,\n ),\n workers=-1,\n full_output=True,\n finish=fmin, # fmin will look for a local minimum around the grid point\n )\n\n self.batch_sensor_brute_search_results = batch_sensor_brute_search_results\n self.dist_params = self.batch_sensor_brute_search_results[0]\n\n return\n\n def generate_sensors(self, n_sensors, sensor_start_datetime, sensor_start_time_index=0):\n\n if self.dist_params is None:\n raise Exception(\"iCGM Sensor Generator has not been fit() to a true_bg_trace distribution.\")\n\n (\n a,\n b,\n mu,\n sigma,\n noise_coefficient,\n bias_drift_range_min,\n bias_drift_range_max,\n bias_drift_oscillations,\n ) = self.dist_params\n\n bias_drift_range = [bias_drift_range_min, bias_drift_range_max]\n\n # STEP 3 apply the results\n # Convert to a generate_sensor(global_params) --> Sensor(obj)\n self.icgm_traces, self.individual_sensor_properties = sf.generate_icgm_sensors(\n self.true_bg_trace,\n dist_params=self.dist_params[:4],\n n_sensors=n_sensors,\n bias_type=self.bias_type,\n bias_drift_type=self.bias_drift_type,\n bias_drift_range=bias_drift_range,\n bias_drift_oscillations=bias_drift_oscillations,\n noise_coefficient=noise_coefficient,\n delay=self.delay,\n random_seed=self.random_seed,\n )\n\n sensors = []\n\n for sensor_num in range(n_sensors):\n sensor_properties = self.individual_sensor_properties.loc[sensor_num]\n sensors.append(\n iCGMSensor(\n sensor_properties=sensor_properties,\n time_index=sensor_start_time_index,\n current_datetime=sensor_start_datetime,\n )\n )\n\n self.n_sensors = n_sensors\n self.sensors = sensors # Array of sensor objects\n\n return sensors\n"
] | [
[
"scipy.optimize.brute",
"numpy.random.seed"
]
] |
tomroesch/iqplot | [
"e13f9ac888d75093da05353ba80786804ec99418"
] | [
"iqplot/dist.py"
] | [
"\"\"\"Visualization of how data are distributed, split or colored by a\ncategorical variable.\"\"\"\n\nimport copy\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nimport colorcet\n\nimport bokeh.models\nimport bokeh.plotting\n\nfrom . import utils\n\n\ndef ecdf(\n data=None,\n q=None,\n cats=None,\n q_axis=\"x\",\n palette=None,\n order=None,\n p=None,\n show_legend=True,\n legend_label=None,\n legend_location=\"right\",\n legend_orientation=\"vertical\",\n tooltips=None,\n complementary=False,\n kind=\"collection\",\n style=\"dots\",\n conf_int=False,\n ptiles=[2.5, 97.5],\n n_bs_reps=10000,\n click_policy=\"hide\",\n marker=\"circle\",\n marker_kwargs=None,\n line_kwargs=None,\n conf_int_kwargs=None,\n horizontal=None,\n val=None,\n **kwargs,\n):\n \"\"\"\n Make an ECDF plot.\n\n Parameters\n ----------\n data : Pandas DataFrame, 1D Numpy array, or xarray\n DataFrame containing tidy data for plotting. If a Numpy array,\n a single category is assumed and a box plot generated from\n data.\n q : hashable\n Name of column to use as quantitative variable if `data` is a\n Pandas DataFrame. Otherwise, `q` is used as the quantitative\n axis label.\n cats : hashable or list of hashables\n Name of column(s) to use as categorical variable(s).\n q_axis : str, either 'x' or 'y', default 'x'\n Axis along which the quantitative value varies.\n palette : list colors, or single color string \n If a list, color palette to use. If a single string representing\n a color, all glyphs are colored with that color. Default is\n colorcet.b_glasbey_category10 from the colorcet package.\n order : list or None\n If not None, must be a list of unique group names when the input\n data frame is grouped by `cats`. The order of the list specifies\n the ordering of the categorical variables in the legend. If\n None, the categories appear in the order in which they appeared\n in the inputted data frame.\n p : bokeh.plotting.Figure instance, or None (default)\n If None, create a new figure. Otherwise, populate the existing\n figure `p`.\n show_legend : bool, default False\n If True, display legend.\n legend_label : str, default None\n If `cats` is None and `show_legend` is True, then if\n `legend_label` is not None, a legend is created for the glyph\n on the plot and labeled with `legend_label`. Otherwise, no\n legend is created if `cats` is None.\n legend_location : str, default 'right'\n Location of legend. If one of \"right\", \"left\", \"above\", or\n \"below\", the legend is placed outside of the plot area. If one\n of \"top_left\", \"top_center\", \"top_right\", \"center_right\",\n \"bottom_right\", \"bottom_center\", \"bottom_left\", \"center_left\",\n or \"center\", the legend is placed within the plot area. If a\n 2-tuple, legend is placed according to the coordinates in the\n tuple.\n legend_orientation : str, default 'vertical'\n Either 'horizontal' or 'vertical'.\n tooltips : list of 2-tuples\n Specification for tooltips as per Bokeh specifications. For\n example, if we want `col1` and `col2` tooltips, we can use\n `tooltips=[('label 1': '@col1'), ('label 2': '@col2')]`. Ignored\n if `style` is 'staircase'.\n complementary : bool, default False\n If True, plot the empirical complementary cumulative\n distribution function.\n kind : str, default 'collection'\n If 'collection', the figure is populated with a collection of\n ECDFs coded with colors based on the categorical variables. If\n 'colored', the figure is populated with a single ECDF with\n circles colored based on the categorical variables.\n style : str, default 'dots'\n The style of ECDF to make.\n\n - dots: Each data point is plotted as a dot.\n - staircase: ECDF is plotted as a traditional staircase.\n - formal: Strictly adhere to the definition of an ECDF.\n conf_int : bool, default False\n If True, display confidence interval of ECDF.\n ptiles : list, default [2.5, 97.5]\n The percentiles to use for the confidence interval. Ignored if\n `conf_int` is False.\n n_bs_reps : int, default 1000\n Number of bootstrap replicates to do to compute confidence\n interval. Ignored if `conf_int` is False.\n click_policy : str, default 'hide'\n Either 'hide', 'mute', or None; how the glyphs respond when the\n corresponding category is clicked in the legend.\n marker : str, default 'circle'\n Name of marker to be used in the plot (ignored if `style` is\n 'staircase'). Must be one of['asterisk', 'circle',\n 'circle_cross', 'circle_x', 'cross', 'dash', 'diamond',\n 'diamond_cross', 'hex', 'inverted_triangle', 'square',\n 'square_cross', 'square_x', 'triangle', 'x']\n marker_kwargs : dict\n Keyword arguments to be passed to `p.circle()`.\n line_kwargs : dict\n Kwargs to be passed to `p.line()`, `p.ray()`, and `p.segment()`.\n conf_int_kwargs : dict\n kwargs to pass into patches depicting confidence intervals.\n horizontal : bool or None, default None\n Deprecated. Use `q_axis`.\n val : hashable\n Deprecated, use `q`.\n kwargs\n Any kwargs to be passed to `bokeh.plotting.figure()` when making\n the plot.\n\n Returns\n -------\n output : bokeh.plotting.Figure instance\n Plot populated with ECDFs.\n \"\"\"\n # Protect against mutability of dicts\n marker_kwargs = copy.copy(marker_kwargs)\n line_kwargs = copy.copy(line_kwargs)\n conf_int_kwargs = copy.copy(conf_int_kwargs)\n\n q = utils._parse_deprecations(q, q_axis, val, horizontal, \"y\")\n\n if style == \"formal\" and complementary:\n raise NotImplementedError(\"Complementary formal ECDFs not yet implemented.\")\n\n if palette is None:\n palette = colorcet.b_glasbey_category10\n elif type(palette) == str:\n palette = [palette]\n\n data, q, cats, show_legend = utils._data_cats(\n data, q, cats, show_legend, legend_label\n )\n\n cats, cols = utils._check_cat_input(\n data, cats, q, None, None, tooltips, palette, order, marker_kwargs\n )\n\n kwargs = utils._fig_dimensions(kwargs)\n\n if conf_int and \"y_axis_type\" in kwargs and kwargs[\"y_axis_type\"] == \"log\":\n warnings.warn(\n \"Cannot reliably draw confidence intervals with a y-axis on a log scale because zero cannot be represented. Omitting confidence interval.\"\n )\n conf_int = False\n if (\n conf_int\n and \"x_axis_type\" in kwargs\n and kwargs[\"x_axis_type\"] == \"log\"\n and (data[q] <= 0).any()\n ):\n warnings.warn(\n \"Cannot draw confidence intervals with a x-axis on a log scale because some values are negative. Any negative values will be omitted from the ECDF.\"\n )\n conf_int = False\n\n if marker_kwargs is None:\n marker_kwargs = {}\n if line_kwargs is None:\n line_kwargs = {}\n\n y = \"__ECCDF\" if complementary else \"__ECDF\"\n\n if q_axis == \"y\":\n if \"x_axis_label\" not in kwargs:\n if complementary:\n kwargs[\"x_axis_label\"] = \"ECCDF\"\n else:\n kwargs[\"x_axis_label\"] = \"ECDF\"\n else:\n if \"y_axis_label\" not in kwargs:\n if complementary:\n kwargs[\"y_axis_label\"] = \"ECCDF\"\n else:\n kwargs[\"y_axis_label\"] = \"ECDF\"\n\n if q_axis == \"y\":\n if \"y_axis_label\" not in kwargs:\n kwargs[\"y_axis_label\"] = q\n else:\n if \"x_axis_label\" not in kwargs:\n kwargs[\"x_axis_label\"] = q\n\n if style in [\"formal\", \"staircase\"] and \"line_width\" not in line_kwargs:\n line_kwargs[\"line_width\"] = 2\n\n if conf_int_kwargs is None:\n conf_int_kwargs = {}\n if \"fill_alpha\" not in conf_int_kwargs:\n conf_int_kwargs[\"fill_alpha\"] = 0.5\n if \"line_alpha\" not in conf_int_kwargs and \"line_color\" not in conf_int_kwargs:\n conf_int_kwargs[\"line_alpha\"] = 0\n\n df = data.copy()\n if kind == \"collection\":\n if style == \"dots\":\n df[y] = df.groupby(cats)[q].transform(_ecdf_y, complementary=complementary)\n elif kind == \"colored\":\n df[y] = df[q].transform(_ecdf_y, complementary=complementary)\n cols += [y]\n else:\n raise RuntimeError(\"`kind` must be in `['collection', 'colored']\")\n\n _, df[\"__label\"] = utils._source_and_labels_from_cats(df, cats)\n cols += [\"__label\"]\n\n if order is not None:\n if type(cats) in [list, tuple]:\n df[\"__sort\"] = df.apply(lambda r: order.index(tuple(r[cats])), axis=1)\n else:\n df[\"__sort\"] = df.apply(lambda r: order.index(r[cats]), axis=1)\n df = df.sort_values(by=\"__sort\")\n\n if p is None:\n p = bokeh.plotting.figure(**kwargs)\n\n if style == \"dots\":\n marker_fun = utils._get_marker(p, marker)\n\n if tooltips is not None:\n if style in [\"formal\", \"staircase\"]:\n warnings.warn(\n \"Cannot have tooltips for formal ECDFs because there are no points to hover over. Omitting tooltips\"\n )\n else:\n p.add_tools(bokeh.models.HoverTool(tooltips=tooltips))\n\n markers = []\n lines = []\n patches = []\n labels = []\n\n if kind == \"collection\":\n # Explicitly loop to enable click policies on the legend\n # (not possible with factors)\n for i, (name, g) in enumerate(df.groupby(cats, sort=False)):\n labels.append(g[\"__label\"].iloc[0])\n if conf_int:\n conf_int_kwargs[\"fill_color\"] = palette[i % len(palette)]\n # conf_int_kwargs[\"legend_label\"] = g[\"__label\"].iloc[0]\n p, patch = _ecdf_conf_int(\n p,\n g[q],\n complementary=complementary,\n q_axis=q_axis,\n n_bs_reps=n_bs_reps,\n ptiles=ptiles,\n **conf_int_kwargs,\n )\n patches.append(patch)\n\n marker_kwargs[\"color\"] = palette[i % len(palette)]\n # marker_kwargs[\"legend_label\"] = g[\"__label\"].iloc[0]\n line_kwargs[\"color\"] = palette[i % len(palette)]\n # line_kwargs[\"legend_label\"] = g[\"__label\"].iloc[0]\n if style == \"staircase\":\n p, new_line = _staircase_ecdf(\n p,\n data=g[q],\n complementary=complementary,\n q_axis=q_axis,\n line_kwargs=line_kwargs,\n )\n lines.append(new_line)\n elif style == \"dots\":\n if q_axis == \"y\":\n markers.append(marker_fun(source=g, x=y, y=q, **marker_kwargs))\n else:\n markers.append(marker_fun(source=g, x=q, y=y, **marker_kwargs))\n elif style == \"formal\":\n p, circle, segment = _formal_ecdf(\n p,\n data=g[q],\n complementary=complementary,\n q_axis=q_axis,\n marker_kwargs=marker_kwargs,\n line_kwargs=line_kwargs,\n )\n markers.append(circle)\n lines.append(segment)\n elif kind == \"colored\":\n if style in [\"formal\", \"staircase\"]:\n raise RuntimeError(\n \"Cannot have a formal or staircase ECDF with `kind='colored'`.\"\n )\n\n if conf_int:\n if \"fill_color\" not in conf_int_kwargs:\n conf_int_kwargs[\"fill_color\"] = \"gray\"\n\n p, patch = _ecdf_conf_int(\n p,\n df[q],\n complementary=complementary,\n q_axis=q_axis,\n n_bs_reps=n_bs_reps,\n ptiles=ptiles,\n **conf_int_kwargs,\n )\n\n y = \"__ECCDF\" if complementary else \"__ECDF\"\n\n # Explicitly loop to enable click policies on the legend (not possible with factors)\n for i, (name, g) in enumerate(df.groupby(cats, sort=False)):\n source = bokeh.models.ColumnDataSource(g[cols])\n mkwargs = marker_kwargs\n # mkwargs[\"legend_label\"] = g[\"__label\"].iloc[0]\n mkwargs[\"color\"] = palette[i % len(palette)]\n labels.append(g[\"__label\"].iloc[0])\n if q_axis == \"y\":\n markers.append(marker_fun(source=source, x=y, y=q, **mkwargs))\n else:\n markers.append(marker_fun(source=source, x=q, y=y, **mkwargs))\n\n return _dist_legend(\n p,\n show_legend,\n legend_location,\n legend_orientation,\n click_policy,\n labels,\n markers,\n lines,\n patches,\n )\n\n\ndef histogram(\n data=None,\n q=None,\n cats=None,\n palette=None,\n order=None,\n q_axis=\"x\",\n p=None,\n rug=True,\n rug_height=0.05,\n show_legend=None,\n legend_label=None,\n legend_location=\"right\",\n legend_orientation=\"vertical\",\n bins=\"freedman-diaconis\",\n density=False,\n kind=\"step_filled\",\n click_policy=\"hide\",\n line_kwargs=None,\n fill_kwargs=None,\n rug_kwargs=None,\n horizontal=None,\n val=None,\n **kwargs,\n):\n \"\"\"\n Make a plot of histograms.\n\n Parameters\n ----------\n data : Pandas DataFrame, 1D Numpy array, or xarray\n DataFrame containing tidy data for plotting. If a Numpy array,\n a single category is assumed and a box plot generated from\n data.\n q : hashable\n Name of column to use as quantitative variable if `data` is a\n Pandas DataFrame. Otherwise, `q` is used as the quantitative\n axis label.\n cats : hashable or list of hashables\n Name of column(s) to use as categorical variable(s).\n q_axis : str, either 'x' or 'y', default 'x'\n Axis along which the quantitative value varies.\n palette : list colors, or single color string \n If a list, color palette to use. If a single string representing\n a color, all glyphs are colored with that color. Default is\n colorcet.b_glasbey_category10 from the colorcet package.\n order : list or None\n If not None, must be a list of unique group names when the input\n data frame is grouped by `cats`. The order of the list specifies\n the ordering of the categorical variables in the legend. If\n None, the categories appear in the order in which they appeared\n in the inputted data frame.\n p : bokeh.plotting.Figure instance, or None (default)\n If None, create a new figure. Otherwise, populate the existing\n figure `p`.\n legend_label : str, default None\n If `cats` is None and `show_legend` is True, then if\n `legend_label` is not None, a legend is created for the glyph\n on the plot and labeled with `legend_label`. Otherwise, no\n legend is created if `cats` is None.\n legend_location : str, default 'right'\n Location of legend. If one of \"right\", \"left\", \"above\", or\n \"below\", the legend is placed outside of the plot area. If one\n of \"top_left\", \"top_center\", \"top_right\", \"center_right\",\n \"bottom_right\", \"bottom_center\", \"bottom_left\", \"center_left\",\n or \"center\", the legend is placed within the plot area. If a\n 2-tuple, legend is placed according to the coordinates in the\n tuple.\n legend_orientation : str, default 'vertical'\n Either 'horizontal' or 'vertical'.\n bins : int, array_like, or str, default 'freedman-diaconis'\n If int or array_like, setting for `bins` kwarg to be passed to\n `np.histogram()`. If 'exact', then each unique value in the\n data gets its own bin. If 'integer', then integer data is\n assumed and each integer gets its own bin. If 'sqrt', uses the\n square root rule to determine number of bins. If\n `freedman-diaconis`, uses the Freedman-Diaconis rule for number\n of bins.\n rug : bool, default True\n If True, also include a rug plot. If, however, `bins` is 'exact'\n or 'integer', the `rug` kwarg is ignored.\n rug_height : float, default 0.05\n Height of the rug plot as a fraction of the highest point in the\n histograms.\n density : bool, default False\n If True, normalize the histograms. Otherwise, base the\n histograms on counts.\n kind : str, default 'step_filled'\n The kind of histogram to display. Allowed values are 'step' and\n 'step_filled'.\n click_policy : str, default 'hide'\n Either 'hide', 'mute', or None; how the glyphs respond when the\n corresponding category is clicked in the legend.\n line_kwargs : dict\n Keyword arguments to pass to `p.line()` in constructing the\n histograms. By default, {\"line_width\": 2}.\n fill_kwargs : dict\n Keyword arguments to pass to `p.patch()` when making the fill\n for the step-filled histogram. Ignored if `kind = 'step'`. By\n default {\"fill_alpha\": 0.3, \"line_alpha\": 0}.\n rug_kwargs : dict\n Keyword arguments to pass to `p.multi_line()` when making the\n rug plot.\n horizontal : bool or None, default None\n Deprecated. Use `q_axis`.\n val : hashable\n Deprecated, use `q`.\n kwargs\n Any kwargs to be passed to `bokeh.plotting.figure()` when making\n the plot.\n\n Returns\n -------\n output : Bokeh figure\n Figure populated with histograms.\n \"\"\"\n # Protect against mutability of dicts\n line_kwargs = copy.copy(line_kwargs)\n fill_kwargs = copy.copy(fill_kwargs)\n rug_kwargs = copy.copy(rug_kwargs)\n\n if type(bins) == str and bins in [\"integer\", \"exact\"]:\n rug = False\n\n q = utils._parse_deprecations(q, q_axis, val, horizontal, \"y\")\n\n if palette is None:\n palette = colorcet.b_glasbey_category10\n elif type(palette) == str:\n palette = [palette]\n\n df, q, cats, show_legend = utils._data_cats(\n data, q, cats, show_legend, legend_label\n )\n\n if show_legend is None:\n if cats is None:\n show_legend = False\n else:\n show_legend = True\n\n if type(bins) == str and bins not in [\n \"integer\",\n \"exact\",\n \"sqrt\",\n \"freedman-diaconis\",\n ]:\n raise RuntimeError(\"Invalid bin specification.\")\n\n if cats is None:\n df[\"__cat\"] = \"__dummy_cat\"\n if show_legend:\n raise RuntimeError(\"No legend to show if `cats` is None.\")\n if order is not None:\n raise RuntimeError(\"No `order` is allowed if `cats` is None.\")\n cats = \"__cat\"\n\n cats, cols = utils._check_cat_input(\n df, cats, q, None, None, None, palette, order, kwargs\n )\n\n kwargs = utils._fig_dimensions(kwargs)\n\n if line_kwargs is None:\n line_kwargs = {\"line_width\": 2}\n if fill_kwargs is None:\n fill_kwargs = {}\n if \"fill_alpha\" not in fill_kwargs:\n fill_kwargs[\"fill_alpha\"] = 0.3\n if \"line_alpha\" not in fill_kwargs:\n fill_kwargs[\"line_alpha\"] = 0\n\n _, df[\"__label\"] = utils._source_and_labels_from_cats(df, cats)\n cols += [\"__label\"]\n\n if order is not None:\n if type(cats) in [list, tuple]:\n df[\"__sort\"] = df.apply(lambda r: order.index(tuple(r[cats])), axis=1)\n else:\n df[\"__sort\"] = df.apply(lambda r: order.index(r[cats]), axis=1)\n df = df.sort_values(by=\"__sort\")\n\n if type(bins) == str and bins == \"exact\":\n a = np.unique(df[q])\n if len(a) == 1:\n bins = np.array([a[0] - 0.5, a[0] + 0.5])\n else:\n bins = np.concatenate(\n (\n (a[0] - (a[1] - a[0]) / 2,),\n (a[1:] + a[:-1]) / 2,\n (a[-1] + (a[-1] - a[-2]) / 2,),\n )\n )\n elif type(bins) == str and bins == \"integer\":\n if np.any(df[q] != np.round(df[q])):\n raise RuntimeError(\"'integer' bins chosen, but data are not integer.\")\n bins = np.arange(df[q].min() - 1, df[q].max() + 1) + 0.5\n\n if p is None:\n kwargs = utils._fig_dimensions(kwargs)\n\n if \"x_axis_label\" not in kwargs:\n kwargs[\"x_axis_label\"] = q\n\n if \"y_axis_label\" not in kwargs:\n if density:\n kwargs[\"y_axis_label\"] = \"density\"\n else:\n kwargs[\"y_axis_label\"] = \"count\"\n if \"y_range\" not in kwargs:\n kwargs[\"y_range\"] = bokeh.models.DataRange1d(start=0)\n\n p = bokeh.plotting.figure(**kwargs)\n\n # Explicitly loop to enable click policies on the legend (not possible with factors)\n max_height = 0\n lines = []\n labels = []\n patches = []\n for i, (name, g) in enumerate(df.groupby(cats, sort=False)):\n e0, f0 = _compute_histogram(g[q], bins, density)\n\n max_height = max(f0.max(), max_height)\n\n line_kwargs[\"color\"] = palette[i % len(palette)]\n\n if q_axis == \"y\":\n lines.append(p.line(f0, e0, **line_kwargs))\n else:\n lines.append(p.line(e0, f0, **line_kwargs))\n labels.append(g[\"__label\"].iloc[0])\n\n if kind == \"step_filled\":\n x2 = [e0.min(), e0.max()]\n y2 = [0, 0]\n fill_kwargs[\"color\"] = palette[i % len(palette)]\n if q_axis == \"y\":\n p, patch = utils._fill_between(p, f0, e0, y2, x2, **fill_kwargs)\n else:\n p, patch = utils._fill_between(p, e0, f0, x2, y2, **fill_kwargs)\n patches.append(patch)\n\n # Put in the rug plot\n if rug:\n if rug_kwargs is None:\n rug_kwargs = dict(alpha=0.5, line_width=0.5)\n elif type(rug_kwargs) != dict:\n raise RuntimeError(\"`rug_kwargs` must be a dictionary.\")\n if \"alpha\" not in rug_kwargs and \"line_alpha\" not in rug_kwargs:\n rug_kwargs[\"alpha\"] = 0.5\n if \"line_width\" not in rug_kwargs:\n rug_kwargs[\"line_width\"] = 0.5\n\n y = [0, max_height * rug_height]\n\n for i, (name, g) in enumerate(df.groupby(cats, sort=False)):\n xs = [[q_val, q_val] for q_val in g[q].values]\n ys = [y] * len(g)\n if \"color\" not in rug_kwargs and \"line_color\" not in rug_kwargs:\n p.multi_line(xs, ys, color=palette[i % len(palette)], **rug_kwargs)\n else:\n p.multi_line(xs, ys, **rug_kwargs)\n\n return _dist_legend(\n p,\n show_legend,\n legend_location,\n legend_orientation,\n click_policy,\n labels,\n [],\n lines,\n patches,\n )\n\n\ndef _staircase_ecdf(p, data, complementary=False, q_axis=\"x\", line_kwargs={}):\n \"\"\"\n Create a plot of an ECDF.\n\n Parameters\n ----------\n p : bokeh.plotting.Figure instance, or None (default)\n If None, create a new figure. Otherwise, populate the existing\n figure `p`.\n data : array_like\n One-dimensional array of data. Nan's are ignored.\n complementary : bool, default False\n If True, plot the empirical complementary cumulative\n distribution functon.\n q_axis : str, default 'x'\n Which axis has the quantitative variable.\n line_kwargs : dict\n kwargs to be passed into p.line and p.ray.\n\n Returns\n -------\n output : bokeh.plotting.Figure instance\n Plot populated with ECDF.\n \"\"\"\n # Extract data\n data = utils._convert_data(data)\n\n # Data points on ECDF\n x, y = _ecdf_vals(data, True, complementary)\n\n # Line of steps\n if q_axis == \"y\":\n line = p.line(y, x, **line_kwargs)\n elif q_axis == \"x\":\n line = p.line(x, y, **line_kwargs)\n\n # Rays for ends\n if q_axis == \"y\":\n if complementary:\n p.ray(x=1, y=x[0], length=0, angle=-np.pi / 2, **line_kwargs)\n p.ray(x=0, y=x[-1], length=0, angle=np.pi / 2, **line_kwargs)\n else:\n p.ray(x=0, y=x[0], length=0, angle=-np.pi / 2, **line_kwargs)\n p.ray(x=1, y=x[-1], length=0, angle=np.pi / 2, **line_kwargs)\n elif q_axis == \"x\":\n if complementary:\n p.ray(x=x[0], y=1, length=0, angle=np.pi, **line_kwargs)\n p.ray(x=x[-1], y=0, length=0, angle=0, **line_kwargs)\n else:\n p.ray(x=x[0], y=0, length=0, angle=np.pi, **line_kwargs)\n p.ray(x=x[-1], y=1, length=0, angle=0, **line_kwargs)\n\n return p, line\n\n\ndef _formal_ecdf(\n p, data, complementary=False, q_axis=\"x\", marker_kwargs={}, line_kwargs={}\n):\n \"\"\"\n Create a plot of an ECDF.\n\n Parameters\n ----------\n p : bokeh.plotting.Figure instance, or None (default)\n If None, create a new figure. Otherwise, populate the existing\n figure `p`.\n data : array_like\n One-dimensional array of data. Nan's are ignored.\n complementary : bool, default False\n If True, plot the empirical complementary cumulative\n distribution functon.\n marker_kwargs : dict\n Any kwargs to be passed to p.circle().\n line_kwargs : dict\n Any kwargs to be passed to p.segment() and p.ray().\n\n Returns\n -------\n output : bokeh.plotting.Figure instance\n Plot populated with ECDF.\n \"\"\"\n # Extract data\n data = utils._convert_data(data)\n\n # Data points on ECDF\n x, y = _ecdf_vals(data, complementary)\n\n # Copy of marker kwargs for unfilled points\n unfilled_kwargs = marker_kwargs.copy()\n unfilled_kwargs[\"fill_color\"] = \"white\"\n\n if q_axis == \"y\":\n segment = p.segment(y[:-1], x[:-1], y[1:], x[:-1], **line_kwargs)\n p.ray(x=0, y=x[0], angle=-np.pi / 2, length=0, **line_kwargs)\n p.ray(x=1, y=x[-1], angle=np.pi / 2, length=0, **line_kwargs)\n circle = p.circle(y, x, **marker_kwargs)\n p.circle([0], [0], **unfilled_kwargs)\n p.circle(y[:-1], x[1:], **unfilled_kwargs)\n elif q_axis == \"x\":\n segment = p.segment(x[:-1], y[:-1], x[1:], y[:-1], **line_kwargs)\n p.ray(x=x[0], y=0, angle=np.pi, length=0, **line_kwargs)\n p.ray(x=x[-1], y=1, angle=0, length=0, **line_kwargs)\n circle = p.circle(x, y, **marker_kwargs)\n p.circle([0], [0], **unfilled_kwargs)\n p.circle(x[1:], y[:-1], **unfilled_kwargs)\n\n return p, circle, segment\n\n\ndef _ecdf_vals(data, staircase=False, complementary=False):\n \"\"\"Get x, y, values of an ECDF for plotting.\n Parameters\n ----------\n data : ndarray\n One dimensional Numpy array with data.\n staircase : bool, default False\n If True, generate x and y values for ECDF (staircase). If\n False, generate x and y values for ECDF as dots.\n complementary : bool\n If True, return values for ECCDF.\n\n Returns\n -------\n x : ndarray\n x-values for plot\n y : ndarray\n y-values for plot\n \"\"\"\n x = np.sort(data)\n y = np.arange(1, len(data) + 1) / len(data)\n\n if staircase:\n x, y = _to_staircase(x, y)\n if complementary:\n y = 1 - y\n elif complementary:\n y = 1 - y + 1 / len(y)\n\n return x, y\n\n\ndef _to_staircase(x, y):\n \"\"\"Convert to formal ECDF.\"\"\"\n # Set up output arrays\n x_staircase = np.empty(2 * len(x))\n y_staircase = np.empty(2 * len(x))\n\n # y-values for steps\n y_staircase[0] = 0\n y_staircase[1::2] = y\n y_staircase[2::2] = y[:-1]\n\n # x- values for steps\n x_staircase[::2] = x\n x_staircase[1::2] = x\n\n return x_staircase, y_staircase\n\n\ndef _ecdf_conf_int(\n p,\n data,\n complementary=False,\n q_axis=\"x\",\n n_bs_reps=1000,\n ptiles=[2.5, 97.5],\n **kwargs,\n):\n \"\"\"Add an ECDF confidence interval to a plot.\n\n This method of computing a confidence interval can be thought of as\n computing confidence intervals of the *inverse* ECDF in the sense\n that we compute a confidence interval for the x-values for each of\n the discrete values of the ECDF. This is equivalent to computing\n bootstrap confidence intervals for the ECDF. Here is why.\n\n Imagine we draw bootstrap samples and for each we make an ECDF.\n Let's say we make 5 such ECDFs and we wish to compute a 60%\n confidence interval. (You can generalize to arbitrary number of\n ECDFs and confidence interval.)\n\n Each of these 5 ECDFs can be defined as starting at the same point\n and ending at the same point. Specifically, they start at\n x = min(data), y = 0 and end at x = max(data), y = 1. Furthermore,\n they are all monotonically increasing functions.\n\n Now, let's say we are constructing a confidence interval for the\n ECDF at position x. To do so, we put a dot on the second ECDF from\n the top at x and a dot on the second ECDF from the bottom. This\n gives us the middle 60% of ECDF values.\n\n Now, say we are constructing a confidence interval for the IECDF. We\n go to ECDF value y and we find the second ECDF from the left and\n place a dot on it. We also put a dot on the second ECDF from the\n right.\n\n Because all ECDFs are monotonic and start and end at the same\n points, the dot we put on the second-leftmost ECDF is also on the\n second curve from the top for some other x. Similarly, the\n second-rightmost ECDF is also on the second curve from the bottom\n for some other x. (You can sketch this out, and it becomes clear.)\n\n So, any dot we put on an ECDF for computing a confidence interval\n for an IECDF is also a dot we would put on an ECDF for computing a\n confidence of the ECDF. If we want to compute the confidence\n interval over the whole domain of x-values, we will cover the same\n set of points if we compute the confidence interval of the ECDF or\n the IECDF. So, we end up filling between the same two sets of\n curves.\n\n It turns out that the IECDF formulation is actually much easier to\n implement.\n \"\"\"\n data = utils._convert_data(data)\n\n bs_reps = np.array(\n [np.sort(np.random.choice(data, size=len(data))) for _ in range(n_bs_reps)]\n )\n\n # Compute the confidence intervals\n iecdf_low, iecdf_high = np.percentile(np.array(bs_reps), ptiles, axis=0)\n\n # y-values for ECDFs\n y = np.arange(1, len(data) + 1) / len(data)\n\n # Make them staircases\n x_low, y_plot = _to_staircase(x=iecdf_low, y=y)\n x_high, _ = _to_staircase(x=iecdf_high, y=y)\n\n if q_axis == \"y\":\n if complementary:\n p, patch = utils._fill_between(\n p, x1=1 - y_plot, y1=x_low, x2=1 - y_plot, y2=x_high, **kwargs\n )\n else:\n p, patch = utils._fill_between(\n p, x1=y_plot, y1=x_low, x2=y_plot, y2=x_high, **kwargs\n )\n elif q_axis == \"x\":\n if complementary:\n p, patch = utils._fill_between(\n p, x1=x_low, y1=1 - y_plot, x2=x_high, y2=1 - y_plot, **kwargs\n )\n else:\n p, patch = utils._fill_between(\n p, x1=x_low, y1=y_plot, x2=x_high, y2=y_plot, **kwargs\n )\n else:\n raise RuntimeError(\"`q_axis` must be either 'x' or 'y'.\")\n\n return p, patch\n\n\ndef _ecdf_y(data, complementary=False):\n \"\"\"Give y-values of an ECDF for an unsorted column in a data frame.\n\n Parameters\n ----------\n data : Pandas Series\n Series (or column of a DataFrame) from which to generate ECDF\n values\n complementary : bool, default False\n If True, give the ECCDF values.\n\n Returns\n -------\n output : Pandas Series\n Corresponding y-values for an ECDF when plotted with dots.\n\n Notes\n -----\n .. This only works for plotting an ECDF with points, not for formal\n or staircase ECDFs\n \"\"\"\n if complementary:\n return 1 - data.rank(method=\"first\") / len(data) + 1 / len(data)\n else:\n return data.rank(method=\"first\") / len(data)\n\n\ndef _dist_legend(\n p,\n show_legend,\n legend_location,\n legend_orientation,\n click_policy,\n labels,\n markers,\n lines,\n patches,\n):\n \"\"\"Add a legend to a histogram or ECDF plot.\n \"\"\"\n if show_legend:\n if len(markers) > 0:\n if len(lines) > 0:\n if len(patches) > 0:\n items = [\n (label, [marker, line, patch])\n for label, marker, line, patch in zip(\n labels, markers, lines, patches\n )\n ]\n else:\n items = [\n (label, [marker, line])\n for label, marker, line in zip(labels, lines, markers)\n ]\n else:\n if len(patches) > 0:\n items = [\n (label, [marker, patch])\n for label, marker, patch in zip(labels, markers, patches)\n ]\n else:\n items = [\n (label, [marker]) for label, marker in zip(labels, markers)\n ]\n else:\n if len(patches) > 0:\n items = [\n (label, [line, patch])\n for label, line, patch in zip(labels, lines, patches)\n ]\n else:\n items = [(label, [line]) for label, line in zip(labels, lines)]\n\n if len(p.legend) == 1:\n for item in items:\n p.legend.items.append(\n bokeh.models.LegendItem(label=item[0], renderers=item[1])\n )\n else:\n if len(p.legend) > 1:\n warnings.warn(\n \"Ambiguous which legend to add glyphs to. Creating new legend.\"\n )\n if legend_location in [\"right\", \"left\", \"above\", \"below\"]:\n legend = bokeh.models.Legend(\n items=items, location=\"center\", orientation=legend_orientation\n )\n p.add_layout(legend, legend_location)\n elif (\n legend_location\n in [\n \"top_left\",\n \"top_center\",\n \"top_right\",\n \"center_right\",\n \"bottom_right\",\n \"bottom_center\",\n \"bottom_left\",\n \"center_left\",\n \"center\",\n ]\n or type(legend_location) == tuple\n ):\n legend = bokeh.models.Legend(\n items=items,\n location=legend_location,\n orientation=legend_orientation,\n )\n p.add_layout(legend, \"center\")\n else:\n raise RuntimeError(\n 'Invalid `legend_location`. Must be a 2-tuple specifying location or one of [\"right\", \"left\", \"above\", \"below\", \"top_left\", \"top_center\", \"top_right\", \"center_right\", \"bottom_right\", \"bottom_center\", \"bottom_left\", \"center_left\", \"center\"]'\n )\n\n p.legend.click_policy = click_policy\n\n return p\n\n\ndef _compute_histogram(data, bins, density):\n if type(bins) == str and bins == \"sqrt\":\n bins = int(np.ceil(np.sqrt(len(data))))\n elif type(bins) == str and bins == \"freedman-diaconis\":\n h = 2 * (np.percentile(data, 75) - np.percentile(data, 25)) / np.cbrt(len(data))\n if h == 0.0:\n bins = 3\n else:\n bins = int(np.ceil((data.max() - data.min()) / h))\n\n f, e = np.histogram(data, bins=bins, density=density)\n e0 = np.empty(2 * len(e))\n f0 = np.empty(2 * len(e))\n e0[::2] = e\n e0[1::2] = e\n f0[0] = 0\n f0[-1] = 0\n f0[1:-1:2] = f\n f0[2:-1:2] = f\n\n return e0, f0\n"
] | [
[
"numpy.histogram",
"numpy.round",
"numpy.percentile",
"numpy.sort",
"numpy.concatenate",
"numpy.array",
"numpy.unique"
]
] |
ccacNMorris/dat129_ccac | [
"587e35f7886d1e883ad988cbe2ec027eb9cf3043"
] | [
"icon.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 2 18:34:05 2020\n\n@author: leemshari\n\"\"\"\n# I wanted to create an n cubed icon\n#My goal was to get this to get this to print with & or @ signs but I was unable to \n#get this array to look decent with anything but integers.\n#I changed the dtype to str and the int_array[a,b] to @ but the array would be off \nimport numpy as np\n#function to pass in an array and tuple so that we can get image\ndef icon (tuple_list,int_array):\n#tuple is basically coordinates for the image to populate in\n for a,b in tuple_list:\n#I set the value that will populated at the coordinates to 3\n int_array[a,b] = 3\n\n return int_array\n\n#function to manipulate arrary and elements in it\ndef roll_rotate(a_tuple,a_array):\n#We want the array with the image to rotate and roll \n b_array = icon(a_tuple,a_array)\n#Numpy has different functions already built into it to manipulate arrays\n print(np.roll(b_array,1))\n \n print('')\n \n print(np.flipud(b_array)) \n \n#Inention was to scale array up to 15x15 array \ndef resize(b_tuple,b_array):\n#Need to grab image again so that it can be manipulated \n c_array = icon(b_tuple,b_array)\n#Output makes the icon unreadable unfortunately but this numpy function will make it bigger \n print(np.resize(c_array,(15,15)))\n\ndef main():\n#Tuple that will be passed into the functions above\n image = ((0,6),(0,7),(0,8),(1,8),(2,7),(2,8),(3,8),(4,1),(4,6),(4,7),(4,8),\n (5,1),(5,2),(5,3),(5,4),(5,5),(6,1),(6,5),(7,1),(7,5),(8,1),(8,5),(9,1),(9,5))\n#Array full of zeros that will be populated with 3s at correct coordinates\n image_array = np.zeros((10,10), dtype = int)\n#printing image with tuple and array passed in\n print(icon(image,image_array))\n \n print('')\n#Calling function to manipulate array \n roll_rotate(image,image_array)\n \n print('')\n#Calling function to scale array up \n resize(image,image_array)\n\nmain()\n\n\n"
] | [
[
"numpy.roll",
"numpy.flipud",
"numpy.zeros",
"numpy.resize"
]
] |
KaijuML/data2text-macro-plan-py | [
"17cebc5db507723d601d21a075adea59b0bd9ffb"
] | [
"onmt/translate/translation.py"
] | [
"\"\"\" Translation main class \"\"\"\nfrom __future__ import unicode_literals, print_function\n\nimport torch\nfrom onmt.inputters.text_dataset import TextMultiField\n\n\nclass TranslationBuilder(object):\n \"\"\"\n Build a word-based translation from the batch output\n of translator and the underlying dictionaries.\n\n Replacement based on \"Addressing the Rare Word\n Problem in Neural Machine Translation\" :cite:`Luong2015b`\n\n Args:\n data (onmt.inputters.Dataset): Data.\n fields (List[Tuple[str, torchtext.data.Field]]): data fields\n n_best (int): number of translations produced\n replace_unk (bool): replace unknown words using attention\n has_tgt (bool): will the batch have gold targets\n \"\"\"\n\n def __init__(self, data, fields, n_best=1, replace_unk=False,\n has_tgt=False, phrase_table=\"\"):\n self.data = data\n self.fields = fields\n self._has_text_src = isinstance(\n dict(self.fields)[\"src\"], TextMultiField)\n self.n_best = n_best\n self.replace_unk = replace_unk\n self.phrase_table = phrase_table\n self.has_tgt = has_tgt\n\n def _build_target_tokens(self, src, src_vocab, src_raw, pred, attn):\n tgt_field = dict(self.fields)[\"tgt\"].base_field\n eos_idx = tgt_field.vocab.stoi[tgt_field.eos_token]\n vocab = tgt_field.vocab\n tokens = []\n for tok in pred:\n tokens.append(str(tok.item()))\n if tokens[-1] == eos_idx:\n tokens = tokens[:-1]\n break\n if self.replace_unk and attn is not None and src is not None:\n for i in range(len(tokens)):\n if tokens[i] == tgt_field.unk_token:\n _, max_index = attn[i][:len(src_raw)].max(0)\n tokens[i] = src_raw[max_index.item()]\n if self.phrase_table != \"\":\n with open(self.phrase_table, \"r\") as f:\n for line in f:\n if line.startswith(src_raw[max_index.item()]):\n tokens[i] = line.split('|||')[1].strip()\n return tokens\n\n def from_batch(self, translation_batch):\n batch = translation_batch[\"batch\"]\n assert(len(translation_batch[\"gold_score\"]) ==\n len(translation_batch[\"predictions\"]))\n batch_size = batch.batch_size\n\n preds, pred_score, attn, gold_score, indices = list(zip(\n *sorted(zip(translation_batch[\"predictions\"],\n translation_batch[\"scores\"],\n translation_batch[\"attention\"],\n translation_batch[\"gold_score\"],\n batch.indices.data),\n key=lambda x: x[-1])))\n\n # Sorting\n inds, perm = torch.sort(batch.indices)\n if self._has_text_src:\n src = batch.src[0][:, :, 0].index_select(1, perm)\n else:\n src = None\n tgt = batch.tgt[:, :, 0].index_select(1, perm) \\\n if self.has_tgt else None\n\n translations = []\n for b in range(batch_size):\n if self._has_text_src:\n src_vocab = self.data.src_vocabs[inds[b]] \\\n if self.data.src_vocabs else None\n src_raw = self.data.examples[inds[b]].src[0]\n else:\n src_vocab = None\n src_raw = None\n pred_sents = [self._build_target_tokens(\n src[:, b] if src is not None else None,\n src_vocab, src_raw,\n preds[b][n], attn[b][n])\n for n in range(self.n_best)]\n gold_sent = None\n if tgt is not None:\n gold_sent = self._build_target_tokens(\n src[:, b] if src is not None else None,\n src_vocab, src_raw,\n tgt[1:, b] if tgt is not None else None, None)\n\n translation = Translation(\n src[:, b] if src is not None else None,\n src_raw, pred_sents, attn[b], pred_score[b],\n gold_sent, gold_score[b]\n )\n translations.append(translation)\n\n return translations\n\n\nclass Translation(object):\n \"\"\"Container for a translated sentence.\n\n Attributes:\n src (LongTensor): Source word IDs.\n src_raw (List[str]): Raw source words.\n pred_sents (List[List[str]]): Words from the n-best translations.\n pred_scores (List[List[float]]): Log-probs of n-best translations.\n attns (List[FloatTensor]) : Attention distribution for each\n translation.\n gold_sent (List[str]): Words from gold translation.\n gold_score (List[float]): Log-prob of gold translation.\n \"\"\"\n\n __slots__ = [\"src\", \"src_raw\", \"pred_sents\", \"attns\", \"pred_scores\",\n \"gold_sent\", \"gold_score\"]\n\n def __init__(self, src, src_raw, pred_sents,\n attn, pred_scores, tgt_sent, gold_score):\n self.src = src\n self.src_raw = src_raw\n self.pred_sents = pred_sents\n self.attns = attn\n self.pred_scores = pred_scores\n self.gold_sent = tgt_sent\n self.gold_score = gold_score\n\n def log(self, sent_number):\n \"\"\"\n Log translation.\n \"\"\"\n\n msg = ['\\nSENT {}: {}\\n'.format(sent_number, self.src_raw)]\n\n best_pred = self.pred_sents[0]\n best_score = self.pred_scores[0]\n pred_sent = ' '.join(best_pred)\n msg.append('PRED {}: {}\\n'.format(sent_number, pred_sent))\n msg.append(\"PRED SCORE: {:.4f}\\n\".format(best_score))\n\n if self.gold_sent is not None:\n tgt_sent = ' '.join(self.gold_sent)\n msg.append('GOLD {}: {}\\n'.format(sent_number, tgt_sent))\n msg.append((\"GOLD SCORE: {:.4f}\\n\".format(self.gold_score)))\n if len(self.pred_sents) > 1:\n msg.append('\\nBEST HYP:\\n')\n for score, sent in zip(self.pred_scores, self.pred_sents):\n msg.append(\"[{:.4f}] {}\\n\".format(score, sent))\n\n return \"\".join(msg)\n"
] | [
[
"torch.sort"
]
] |
jonathansick/skyoffset | [
"369f54d8a237f48cd56f550e80bf1d39b355bfcd"
] | [
"skyoffset/diffplot.py"
] | [
"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nPlot distributions of difference pixels.\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport astropy.io.fits\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nimport matplotlib.gridspec as gridspec\n\n\ndef plot_diffs(mosaic_doc, plot_dir):\n \"\"\"Make diff pixels histogram plots for all differences in the given\n mosaic document.\n \n Parameters\n ----------\n mosaic_doc : dict\n The document from MosaicDB for this mosaic.\n plot_dir : str\n Directory to save plots to.\n \"\"\"\n if not os.path.exists(plot_dir):\n os.makedirs(plot_dir)\n for pair_key, diff in mosaic_doc['couplings']['diff_paths'].iteritems():\n median = mosaic_doc['couplings']['diffs'][pair_key]\n sigma = mosaic_doc['couplings']['sigmas'][pair_key]\n plot_path = os.path.join(plot_dir, pair_key)\n plot_diff(diff, median, sigma, plot_path)\n\n\ndef plot_diff(diff_path, median, sigma, plot_path):\n \"\"\"Plot histogram of the difference image.\"\"\"\n fits = astropy.io.fits.open(diff_path)\n pixels = fits[0].data\n pixels = pixels[np.isfinite(pixels)].ravel()\n\n fig = Figure(figsize=(3.5, 3.5))\n canvas = FigureCanvas(fig)\n gs = gridspec.GridSpec(1, 1, left=0.15, right=0.95, bottom=0.15, top=0.95,\n wspace=None, hspace=None, width_ratios=None, height_ratios=None)\n ax = fig.add_subplot(gs[0])\n ax.hist(pixels, 1000, histtype='stepfilled',\n edgecolor='None', facecolor='dodgerblue')\n ax.axvline(median, ls='-', c='k', lw=2)\n ax.axvline(median - sigma, ls='--', c='k', lw=1)\n ax.axvline(median + sigma, ls='--', c='k', lw=1)\n ax.text(0.1, 0.9, r\"$%.2f \\pm %.2f$\" % (median, sigma),\n ha='left', va='top',\n transform=ax.transAxes)\n ax.set_xlim(median - 3 * sigma, median + 3 * sigma)\n gs.tight_layout(fig, pad=1.08, h_pad=None, w_pad=None, rect=None)\n canvas.print_figure(plot_path + \".pdf\", format=\"pdf\")\n\n fits.close()\n"
] | [
[
"matplotlib.figure.Figure",
"matplotlib.gridspec.GridSpec",
"numpy.isfinite",
"matplotlib.backends.backend_agg.FigureCanvasAgg"
]
] |
Alenichel/CodiglioniNichelini_recsys-polimi-2019 | [
"ca97e71da7612644833c20155a0d4d3a57850527",
"ca97e71da7612644833c20155a0d4d3a57850527"
] | [
"src/run_utils.py",
"src/Base/Evaluation/metrics.py"
] | [
"#!/usr/bin/env python3\n\nimport os\nfrom enum import Enum\nimport numpy as np\nimport scipy.sparse as sps\nfrom sklearn.preprocessing import LabelEncoder\nfrom tqdm import tqdm, trange\nfrom cython_modules.leave_one_out import train_test_loo_split as __train_test_loo_split_cython\nfrom csv_utils import load_csv, export_csv\nfrom multiprocessing import Pool\nfrom collections import namedtuple\n\n\nclass DataFiles:\n TRAIN = 'data/data_train.csv'\n TARGET_USERS_TEST = 'data/data_target_users_test.csv'\n ICM_ASSET = 'data/data_ICM_asset.csv'\n ICM_PRICE = 'data/data_ICM_price.csv'\n ICM_SUBCLASS = 'data/data_ICM_sub_class.csv'\n UCM_AGE = 'data/data_UCM_age.csv'\n UCM_REGION = 'data/data_UCM_region.csv'\n CLUSTERS = 'data/user_clustered.csv'\n\n\nclass SplitType(Enum):\n PROBABILISTIC = 1\n LOO = 2\n LOO_CYTHON = 3\n\n\ndef set_seed(seed):\n print('seed = {0}'.format(seed))\n os.environ['RECSYS_SEED'] = str(seed)\n np.random.seed(seed)\n\n\ndef get_seed():\n env = os.getenv('RECSYS_SEED')\n if env:\n return int(env)\n return -1\n\n\ndef build_urm():\n urm_data = load_csv(DataFiles.TRAIN)\n urm_data = [[int(row[i]) if i <= 1 else int(float(row[i])) for i in range(len(row))] for row in urm_data]\n users, items, ratings = map(np.array, zip(*urm_data))\n return sps.csr_matrix((ratings, (users, items)))\n\n\ndef clusterize():\n data = load_csv(DataFiles.CLUSTERS)\n data = [[int(row[i]) for i in range(len(row))] for row in data]\n _, user_ids, cluster_ids = map(list, zip(*data))\n assert len(user_ids) == len(cluster_ids)\n data_len = len(user_ids)\n clusters = dict()\n for n in range(max(cluster_ids) + 1):\n clusters[n] = list()\n for i in range(data_len):\n user_id = user_ids[i]\n cluster_id = cluster_ids[i]\n clusters[cluster_id].append(user_id)\n return clusters\n\n\ndef get_cold_users(urm_train, return_warm=False):\n profile_lengths = np.ediff1d(urm_train.indptr)\n cold_users = np.where(profile_lengths == 0)[0]\n if return_warm:\n warm_users = np.where(profile_lengths > 0)[0]\n return cold_users, warm_users\n return cold_users\n\n\ndef build_price_icm(n_items):\n price_icm_items, _, price_icm_values = __load_icm_csv(DataFiles.ICM_PRICE, third_type=float)\n price_icm_values = __encode_values(price_icm_values)\n n_features = max(price_icm_values) + 1\n shape = (n_items, n_features)\n ones = np.ones(len(price_icm_values))\n price_icm = sps.csr_matrix((ones, (price_icm_items, price_icm_values)), shape=shape, dtype=int)\n return price_icm\n\n\ndef build_asset_icm(n_items):\n asset_icm_items, _, asset_icm_values = __load_icm_csv(DataFiles.ICM_ASSET, third_type=float)\n asset_icm_values += 1\n asset_icm_values = __encode_values(asset_icm_values)\n n_features = max(asset_icm_values) + 1\n shape = (n_items, n_features)\n ones = np.ones(len(asset_icm_values))\n asset_icm = sps.csr_matrix((ones, (asset_icm_items, asset_icm_values)), shape=shape, dtype=int)\n return asset_icm\n\n\ndef build_subclass_icm(n_items):\n subclass_icm_items, subclass_icm_features, subclass_icm_values = __load_icm_csv(DataFiles.ICM_SUBCLASS, third_type=float)\n n_features = max(subclass_icm_features) + 1\n shape = (n_items, n_features)\n subclass_icm = sps.csr_matrix((subclass_icm_values, (subclass_icm_items, subclass_icm_features)), shape=shape, dtype=int)\n return subclass_icm\n\n\ndef build_icm(n_items):\n price_icm = build_price_icm(n_items)\n asset_icm = build_asset_icm(n_items)\n subclass_icm = build_subclass_icm(n_items)\n return sps.hstack((price_icm, asset_icm, subclass_icm)).tocsr()\n\n\ndef build_age_ucm(n_users):\n age_ucm_users, age_ucm_features, age_ucm_values = __load_icm_csv(DataFiles.UCM_AGE, third_type=float)\n n_features = max(age_ucm_features) + 1\n shape = (n_users, n_features)\n age_ucm = sps.csr_matrix((age_ucm_values, (age_ucm_users, age_ucm_features)), shape=shape, dtype=int)\n return age_ucm\n\n\ndef build_region_ucm(n_users):\n region_ucm_users, region_ucm_features, region_ucm_values = __load_icm_csv(DataFiles.UCM_REGION, third_type=float)\n n_features = max(region_ucm_features) + 1\n shape = (n_users, n_features)\n region_ucm = sps.csr_matrix((region_ucm_values, (region_ucm_users, region_ucm_features)), shape=shape, dtype=int)\n return region_ucm\n\n\ndef build_ucm(n_users):\n age_ucm = build_age_ucm(n_users)\n region_ucm = build_region_ucm(n_users)\n return sps.hstack((age_ucm, region_ucm))\n\n\ndef build_target_users():\n target_users = load_csv(DataFiles.TARGET_USERS_TEST)\n return [int(x[0]) for x in target_users]\n\n\ndef build_all_matrices():\n urm = build_urm()\n n_users, n_items = urm.shape\n icm = build_icm(n_items)\n ucm = build_ucm(n_users)\n target_users = build_target_users()\n return urm, icm, ucm, target_users\n\n\ndef train_test_split(urm, split_type=SplitType.PROBABILISTIC, split=0.8):\n if split_type == SplitType.PROBABILISTIC:\n return __train_test_split(urm, split)\n elif split_type == SplitType.LOO:\n return __train_test_loo_split(urm)\n elif split_type == SplitType.LOO_CYTHON:\n return __train_test_loo_split_cython(urm)\n\n\ndef evaluate(recommender, urm_test, excluded_users=[], cython=False, verbose=True):\n from evaluation import evaluate_algorithm\n if cython:\n if verbose:\n print('Ignoring argument excluded_users')\n from cython_modules.evaluation import evaluate_cython\n if verbose:\n print('Using Cython evaluation')\n return evaluate_cython(recommender, urm_test, verbose=verbose)\n else:\n return evaluate_algorithm(recommender, urm_test, excluded_users=excluded_users, verbose=verbose)\n\n\ndef evaluate_mp(recommender, urm_tests, excluded_users=[], cython=False, verbose=True, n_processes=0):\n assert type(urm_tests) == list\n assert len(urm_tests) >= 1\n assert type(n_processes) == int\n if n_processes == 0:\n n_processes = len(urm_tests)\n with Pool(processes=n_processes) as pool:\n args = [(recommender, urm_test, excluded_users, cython, verbose) for urm_test in urm_tests]\n maps = pool.starmap(evaluate, args, chunksize=1)\n maps = [x['MAP'] for x in maps]\n return np.mean(maps)\n\n\ndef export(target_users, recommender):\n print('Exporting recommendations...')\n data = list()\n for u_id in tqdm(target_users, desc='Export'):\n data.append((u_id, recommender.recommend(u_id, at=10)))\n export_csv(('user_id', 'item_list'), data)\n print('OK')\n\n\ndef __train_test_split(urm, split=0.8):\n print('Using probabilistic splitting ({0:.2f}/{1:.2f})'.format(split, 1-split))\n urm = urm.tocoo()\n num_interactions = urm.nnz\n shape = urm.shape\n train_mask = np.random.choice([True, False], num_interactions, p=[split, 1-split])\n urm_train = sps.coo_matrix((urm.data[train_mask], (urm.row[train_mask], urm.col[train_mask])), shape=shape)\n urm_train = urm_train.tocsr()\n test_mask = np.logical_not(train_mask)\n urm_test = sps.coo_matrix((urm.data[test_mask], (urm.row[test_mask], urm.col[test_mask])), shape=shape)\n urm_test = urm_test.tocsr()\n return urm_train, urm_test\n\n\ndef __train_test_loo_split(urm):\n print('Using LeaveOneOut')\n urm = urm.tocsr()\n num_users = urm.shape[0]\n num_items = urm.shape[1]\n urm_train = urm.copy()\n urm_test = sps.lil_matrix((num_users, num_items), dtype=int)\n for user_id in trange(num_users, desc='LeaveOneOut'):\n start_pos = urm_train.indptr[user_id]\n end_pos = urm_train.indptr[user_id + 1]\n user_profile = urm_train.indices[start_pos:end_pos]\n if user_profile.size > 0:\n item_id = np.random.choice(user_profile, 1)\n urm_train[user_id, item_id] = 0\n urm_test[user_id, item_id] = 1\n urm_test = sps.csr_matrix(urm_test, dtype=int, shape=urm.shape)\n urm_train.eliminate_zeros()\n urm_test.eliminate_zeros()\n return urm_train, urm_test\n\n\ndef __load_icm_csv(filename, third_type):\n data = load_csv(filename)\n data = [[int(row[i]) if i <= 1 else third_type(row[i]) for i in range(len(row))] for row in data]\n items, features, values = map(np.array, zip(*data))\n return items, features, values\n\n\ndef __encode_values(values):\n le = LabelEncoder()\n le.fit(values)\n return le.transform(values)\n\n\ngroup_struct = namedtuple('group_struct', ['in_group', 'not_in_group'])\n\n\ndef user_segmenter(urm_train, n_groups=10):\n groups = dict()\n users = dict()\n profile_length = np.ediff1d(urm_train.indptr)\n group_size = int(profile_length.size/n_groups)\n sorted_users = np.argsort(profile_length)\n for group_id in range(n_groups):\n start_pos = group_id * group_size\n end_pos = min((group_id + 1) * group_size, len(profile_length))\n users_in_group = sorted_users[start_pos:end_pos]\n for user in users_in_group:\n users[user] = group_id\n users_not_in_group_flag = np.isin(sorted_users, users_in_group, invert=True)\n users_not_in_group = sorted_users[users_not_in_group_flag]\n groups[group_id] = group_struct(in_group=users_in_group, not_in_group=users_not_in_group)\n return groups, users\n\n\ndef multiple_splitting(seeds=(4951, 893, 2618, 39, 4947)):\n urm, icm, ucm, target_users = build_all_matrices()\n trains = list()\n tests = list()\n for seed in seeds:\n set_seed(seed)\n urm_train, urm_test = train_test_split(urm)\n trains.append(urm_train)\n tests.append(urm_test)\n return trains, tests, seeds\n\n\nif __name__ == '__main__':\n from evaluation import evaluate_by_cluster\n from cf import ItemCFKNNRecommender\n from basic_recommenders import TopPopRecommender\n\n np.random.seed(42)\n urm, icm, ucm, target_users = build_all_matrices()\n urm_train, urm_test = train_test_split(urm, SplitType.PROBABILISTIC)\n top_pop = TopPopRecommender()\n top_pop.fit(urm_train)\n cf = ItemCFKNNRecommender(fallback_recommender=top_pop)\n cf.fit(urm_train, top_k=690, shrink=66, normalize=False, similarity='tanimoto')\n evaluate_by_cluster(cf, urm_test, clusterise())",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: Maurizio Ferrari Dacrema, Massimo Quadrana\n\"\"\"\n\n\nimport numpy as np\nimport unittest\n\nclass Metrics_Object(object):\n \"\"\"\n Abstract class that should be used as superclass of all metrics requiring an object, therefore a state, to be computed\n \"\"\"\n def __init__(self):\n pass\n\n def add_recommendations(self, recommended_items_ids):\n raise NotImplementedError()\n\n def get_metric_value(self):\n raise NotImplementedError()\n\n def merge_with_other(self, other_metric_object):\n raise NotImplementedError()\n\n\n\nclass Coverage_Item(Metrics_Object):\n \"\"\"\n Item coverage represents the percentage of the overall items which were recommended\n https://gab41.lab41.org/recommender-systems-its-not-all-about-the-accuracy-562c7dceeaff\n \"\"\"\n\n def __init__(self, n_items, ignore_items):\n super(Coverage_Item, self).__init__()\n self.recommended_mask = np.zeros(n_items, dtype=np.bool)\n self.n_ignore_items = len(ignore_items)\n\n def add_recommendations(self, recommended_items_ids):\n if len(recommended_items_ids) > 0:\n self.recommended_mask[recommended_items_ids] = True\n\n def get_metric_value(self):\n return self.recommended_mask.sum()/(len(self.recommended_mask)-self.n_ignore_items)\n\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Coverage_Item, \"Coverage_Item: attempting to merge with a metric object of different type\"\n\n self.recommended_mask = np.logical_or(self.recommended_mask, other_metric_object.recommended_mask)\n\n\n\n\nclass Coverage_User(Metrics_Object):\n \"\"\"\n User coverage represents the percentage of the overall users for which we can make recommendations.\n If there is at least one recommendation the user is considered as covered\n https://gab41.lab41.org/recommender-systems-its-not-all-about-the-accuracy-562c7dceeaff\n \"\"\"\n\n def __init__(self, n_users, ignore_users):\n super(Coverage_User, self).__init__()\n self.users_mask = np.zeros(n_users, dtype=np.bool)\n self.n_ignore_users = len(ignore_users)\n\n def add_recommendations(self, recommended_items_ids, user_id):\n self.users_mask[user_id] = len(recommended_items_ids)>0\n\n def get_metric_value(self):\n return self.users_mask.sum()/(len(self.users_mask)-self.n_ignore_users)\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Coverage_User, \"Coverage_User: attempting to merge with a metric object of different type\"\n\n self.users_mask = np.logical_or(self.users_mask, other_metric_object.users_mask)\n\n\n\n\nclass MAP(Metrics_Object):\n \"\"\"\n Mean Average Precision, defined as the mean of the AveragePrecision over all users\n\n \"\"\"\n\n def __init__(self):\n super(MAP, self).__init__()\n self.cumulative_AP = 0.0\n self.n_users = 0\n\n def add_recommendations(self, is_relevant, pos_items):\n self.cumulative_AP += average_precision(is_relevant, pos_items)\n self.n_users += 1\n\n def get_metric_value(self):\n return self.cumulative_AP/self.n_users\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is MAP, \"MAP: attempting to merge with a metric object of different type\"\n\n self.cumulative_AP += other_metric_object.cumulative_AP\n self.n_users += other_metric_object.n_users\n\n\n\n\n\nclass MRR(Metrics_Object):\n \"\"\"\n Mean Reciprocal Rank, defined as the mean of the Reciprocal Rank over all users\n\n \"\"\"\n\n def __init__(self):\n super(MRR, self).__init__()\n self.cumulative_RR = 0.0\n self.n_users = 0\n\n def add_recommendations(self, is_relevant):\n self.cumulative_RR += rr(is_relevant)\n self.n_users += 1\n\n def get_metric_value(self):\n return self.cumulative_RR/self.n_users\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is MAP, \"MRR: attempting to merge with a metric object of different type\"\n\n self.cumulative_RR += other_metric_object.cumulative_RR\n self.n_users += other_metric_object.n_users\n\n\n\n\n\nclass Gini_Diversity(Metrics_Object):\n \"\"\"\n Gini diversity index, computed from the Gini Index but with inverted range, such that high values mean higher diversity\n This implementation ignores zero-occurrence items\n\n # From https://github.com/oliviaguest/gini\n # based on bottom eq: http://www.statsdirect.com/help/content/image/stat0206_wmf.gif\n # from: http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm\n #\n # http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.459.8174&rep=rep1&type=pdf\n \"\"\"\n\n def __init__(self, n_items, ignore_items):\n super(Gini_Diversity, self).__init__()\n self.recommended_counter = np.zeros(n_items, dtype=np.float)\n self.ignore_items = ignore_items.astype(np.int).copy()\n\n def add_recommendations(self, recommended_items_ids):\n if len(recommended_items_ids) > 0:\n self.recommended_counter[recommended_items_ids] += 1\n\n def get_metric_value(self):\n\n recommended_counter = self.recommended_counter.copy()\n\n recommended_counter_mask = np.ones_like(recommended_counter, dtype = np.bool)\n recommended_counter_mask[self.ignore_items] = False\n recommended_counter_mask[recommended_counter == 0] = False\n\n recommended_counter = recommended_counter[recommended_counter_mask]\n\n n_items = len(recommended_counter)\n\n recommended_counter_sorted = np.sort(recommended_counter) # values must be sorted\n index = np.arange(1, n_items+1) # index per array element\n\n #gini_index = (np.sum((2 * index - n_items - 1) * recommended_counter_sorted)) / (n_items * np.sum(recommended_counter_sorted))\n gini_diversity = 2*np.sum((n_items + 1 - index)/(n_items+1) * recommended_counter_sorted/np.sum(recommended_counter_sorted))\n\n return gini_diversity\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Gini_Diversity, \"Gini_Diversity: attempting to merge with a metric object of different type\"\n\n self.recommended_counter += other_metric_object.recommended_counter\n\n\n\n\nclass Diversity_Herfindahl(Metrics_Object):\n \"\"\"\n The Herfindahl index is also known as Concentration index, it is used in economy to determine whether the market quotas\n are such that an excessive concentration exists. It is here used as a diversity index, if high means high diversity.\n\n It is known to have a small value range in recommender systems, between 0.9 and 1.0\n\n The Herfindahl index is a function of the square of the probability an item has been recommended to any user, hence\n The Herfindahl index is equivalent to MeanInterList diversity as they measure the same quantity.\n\n # http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.459.8174&rep=rep1&type=pdf\n \"\"\"\n\n def __init__(self, n_items, ignore_items):\n super(Diversity_Herfindahl, self).__init__()\n self.recommended_counter = np.zeros(n_items, dtype=np.float)\n self.ignore_items = ignore_items.astype(np.int).copy()\n\n def add_recommendations(self, recommended_items_ids):\n if len(recommended_items_ids) > 0:\n self.recommended_counter[recommended_items_ids] += 1\n\n def get_metric_value(self):\n\n recommended_counter = self.recommended_counter.copy()\n\n recommended_counter_mask = np.ones_like(recommended_counter, dtype = np.bool)\n recommended_counter_mask[self.ignore_items] = False\n\n recommended_counter = recommended_counter[recommended_counter_mask]\n\n if recommended_counter.sum() != 0:\n herfindahl_index = 1 - np.sum((recommended_counter / recommended_counter.sum()) ** 2)\n else:\n herfindahl_index = np.nan\n\n return herfindahl_index\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Diversity_Herfindahl, \"Diversity_Herfindahl: attempting to merge with a metric object of different type\"\n\n self.recommended_counter += other_metric_object.recommended_counter\n\n\n\n\n\nclass Shannon_Entropy(Metrics_Object):\n \"\"\"\n Shannon Entropy is a well known metric to measure the amount of information of a certain string of data.\n Here is applied to the global number of times an item has been recommended.\n\n It has a lower bound and can reach values over 12.0 for random recommenders.\n A high entropy means that the distribution is random uniform across all users.\n\n Note that while a random uniform distribution\n (hence all items with SIMILAR number of occurrences)\n will be highly diverse and have high entropy, a perfectly uniform distribution\n (hence all items with EXACTLY IDENTICAL number of occurrences)\n will have 0.0 entropy while being the most diverse possible.\n\n \"\"\"\n\n def __init__(self, n_items, ignore_items):\n super(Shannon_Entropy, self).__init__()\n self.recommended_counter = np.zeros(n_items, dtype=np.float)\n self.ignore_items = ignore_items.astype(np.int).copy()\n\n def add_recommendations(self, recommended_items_ids):\n if len(recommended_items_ids) > 0:\n self.recommended_counter[recommended_items_ids] += 1\n\n def get_metric_value(self):\n\n assert np.all(self.recommended_counter >= 0.0), \"Shannon_Entropy: self.recommended_counter contains negative counts\"\n\n recommended_counter = self.recommended_counter.copy()\n\n # Ignore from the computation both ignored items and items with zero occurrence.\n # Zero occurrence items will have zero probability and will not change the result, butt will generate nans if used in the log\n recommended_counter_mask = np.ones_like(recommended_counter, dtype = np.bool)\n recommended_counter_mask[self.ignore_items] = False\n recommended_counter_mask[recommended_counter == 0] = False\n\n recommended_counter = recommended_counter[recommended_counter_mask]\n\n n_recommendations = recommended_counter.sum()\n\n recommended_probability = recommended_counter/n_recommendations\n\n shannon_entropy = -np.sum(recommended_probability * np.log2(recommended_probability))\n\n return shannon_entropy\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Gini_Diversity, \"Shannon_Entropy: attempting to merge with a metric object of different type\"\n\n assert np.all(self.recommended_counter >= 0.0), \"Shannon_Entropy: self.recommended_counter contains negative counts\"\n assert np.all(other_metric_object.recommended_counter >= 0.0), \"Shannon_Entropy: other.recommended_counter contains negative counts\"\n\n self.recommended_counter += other_metric_object.recommended_counter\n\n\n\n\n\nimport scipy.sparse as sps\n\n\n\nclass Novelty(Metrics_Object):\n \"\"\"\n Novelty measures how \"novel\" a recommendation is in terms of how popular the item was in the train set.\n\n Due to this definition, the novelty of a cold item (i.e. with no interactions in the train set) is not defined,\n in this implementation cold items are ignored and their contribution to the novelty is 0.\n\n A recommender with high novelty will be able to recommend also long queue (i.e. unpopular) items.\n\n Mean self-information (Zhou 2010)\n \"\"\"\n\n def __init__(self, URM_train):\n super(Novelty, self).__init__()\n\n URM_train = sps.csc_matrix(URM_train)\n URM_train.eliminate_zeros()\n self.item_popularity = np.ediff1d(URM_train.indptr)\n\n self.novelty = 0.0\n self.n_evaluated_users = 0\n self.n_items = len(self.item_popularity)\n self.n_interactions = self.item_popularity.sum()\n\n\n def add_recommendations(self, recommended_items_ids):\n\n self.n_evaluated_users += 1\n\n if len(recommended_items_ids)>0:\n recommended_items_popularity = self.item_popularity[recommended_items_ids]\n\n probability = recommended_items_popularity/self.n_interactions\n probability = probability[probability!=0]\n\n self.novelty += np.sum(-np.log2(probability)/self.n_items)\n\n\n def get_metric_value(self):\n\n if self.n_evaluated_users == 0:\n return 0.0\n\n return self.novelty/self.n_evaluated_users\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Novelty, \"Novelty: attempting to merge with a metric object of different type\"\n\n self.novelty = self.novelty + other_metric_object.novelty\n self.n_evaluated_users = self.n_evaluated_users + other_metric_object.n_evaluated_users\n\n\n\n\n\n\n\nclass AveragePopularity(Metrics_Object):\n \"\"\"\n Average popularity the recommended items have in the train data.\n The popularity is normalized by setting as 1 the item with the highest popularity in the train data\n \"\"\"\n\n def __init__(self, URM_train):\n super(AveragePopularity, self).__init__()\n\n URM_train = sps.csc_matrix(URM_train)\n URM_train.eliminate_zeros()\n item_popularity = np.ediff1d(URM_train.indptr)\n\n\n self.cumulative_popularity = 0.0\n self.n_evaluated_users = 0\n self.n_items = URM_train.shape[0]\n self.n_interactions = item_popularity.sum()\n\n self.item_popularity_normalized = item_popularity/item_popularity.max()\n\n\n def add_recommendations(self, recommended_items_ids):\n\n self.n_evaluated_users += 1\n\n if len(recommended_items_ids)>0:\n recommended_items_popularity = self.item_popularity_normalized[recommended_items_ids]\n\n self.cumulative_popularity += np.sum(recommended_items_popularity)/len(recommended_items_ids)\n\n\n def get_metric_value(self):\n\n if self.n_evaluated_users == 0:\n return 0.0\n\n return self.cumulative_popularity/self.n_evaluated_users\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Novelty, \"AveragePopularity: attempting to merge with a metric object of different type\"\n\n self.cumulative_popularity = self.cumulative_popularity + other_metric_object.cumulative_popularity\n self.n_evaluated_users = self.n_evaluated_users + other_metric_object.n_evaluated_users\n\n\n\n\n\n\nclass Diversity_similarity(Metrics_Object):\n \"\"\"\n Intra list diversity computes the diversity of items appearing in the recommendations received by each single user, by using an item_diversity_matrix.\n\n It can be used, for example, to compute the diversity in terms of features for a collaborative recommender.\n\n A content-based recommender will have low IntraList diversity if that is computed on the same features the recommender uses.\n A TopPopular recommender may exhibit high IntraList diversity.\n\n \"\"\"\n\n def __init__(self, item_diversity_matrix):\n super(Diversity_similarity, self).__init__()\n\n assert np.all(item_diversity_matrix >= 0.0) and np.all(item_diversity_matrix <= 1.0), \\\n \"item_diversity_matrix contains value greated than 1.0 or lower than 0.0\"\n\n self.item_diversity_matrix = item_diversity_matrix\n\n self.n_evaluated_users = 0\n self.diversity = 0.0\n\n\n def add_recommendations(self, recommended_items_ids):\n\n current_recommended_items_diversity = 0.0\n\n for item_index in range(len(recommended_items_ids)-1):\n\n item_id = recommended_items_ids[item_index]\n\n item_other_diversity = self.item_diversity_matrix[item_id, recommended_items_ids]\n item_other_diversity[item_index] = 0.0\n\n current_recommended_items_diversity += np.sum(item_other_diversity)\n\n\n self.diversity += current_recommended_items_diversity/(len(recommended_items_ids)*(len(recommended_items_ids)-1))\n\n self.n_evaluated_users += 1\n\n\n def get_metric_value(self):\n\n if self.n_evaluated_users == 0:\n return 0.0\n\n return self.diversity/self.n_evaluated_users\n\n def merge_with_other(self, other_metric_object):\n assert other_metric_object is Diversity_similarity, \"Diversity: attempting to merge with a metric object of different type\"\n\n self.diversity = self.diversity + other_metric_object.diversity\n self.n_evaluated_users = self.n_evaluated_users + other_metric_object.n_evaluated_users\n\n\n\n\nclass Diversity_MeanInterList(Metrics_Object):\n \"\"\"\n MeanInterList diversity measures the uniqueness of different users' recommendation lists.\n\n It can be used to measure how \"diversified\" are the recommendations different users receive.\n\n While the original proposal called this metric \"Personalization\", we do not use this name since the highest MeanInterList diversity\n is exhibited by a non personalized Random recommender.\n\n It can be demonstrated that this metric does not require to compute the common items all possible couples of users have in common\n but rather it is only sensitive to the total amount of time each item has been recommended.\n\n MeanInterList diversity is a function of the square of the probability an item has been recommended to any user, hence\n MeanInterList diversity is equivalent to the Herfindahl index as they measure the same quantity.\n\n A TopPopular recommender that does not remove seen items will have 0.0 MeanInterList diversity.\n\n\n pag. 3, http://www.pnas.org/content/pnas/107/10/4511.full.pdf\n\n @article{zhou2010solving,\n title={Solving the apparent diversity-accuracy dilemma of recommender systems},\n author={Zhou, Tao and Kuscsik, Zolt{\\'a}n and Liu, Jian-Guo and Medo, Mat{\\'u}{\\v{s}} and Wakeling, Joseph Rushton and Zhang, Yi-Cheng},\n journal={Proceedings of the National Academy of Sciences},\n volume={107},\n number={10},\n pages={4511--4515},\n year={2010},\n publisher={National Acad Sciences}\n }\n\n # The formula is diversity_cumulative += 1 - common_recommendations(user1, user2)/cutoff\n # for each couple of users, except the diagonal. It is VERY computationally expensive\n # We can move the 1 and cutoff outside of the summation. Remember to exclude the diagonal\n # co_counts = URM_predicted.dot(URM_predicted.T)\n # co_counts[np.arange(0, n_user, dtype=np.int):np.arange(0, n_user, dtype=np.int)] = 0\n # diversity = (n_user**2 - n_user) - co_counts.sum()/self.cutoff\n\n # If we represent the summation of co_counts separating it for each item, we will have:\n # co_counts.sum() = co_counts_item1.sum() + co_counts_item2.sum() ...\n # If we know how many times an item has been recommended, co_counts_item1.sum() can be computed as how many couples of\n # users have item1 in common. If item1 has been recommended n times, the number of couples is n*(n-1)\n # Therefore we can compute co_counts.sum() value as:\n # np.sum(np.multiply(item-occurrence, item-occurrence-1))\n\n # The naive implementation URM_predicted.dot(URM_predicted.T) might require an hour of computation\n # The last implementation has a negligible computational time even for very big datasets\n\n \"\"\"\n\n def __init__(self, n_items, cutoff):\n super(Diversity_MeanInterList, self).__init__()\n\n self.recommended_counter = np.zeros(n_items, dtype=np.float)\n\n self.n_evaluated_users = 0\n self.n_items = n_items\n self.diversity = 0.0\n self.cutoff = cutoff\n\n\n def add_recommendations(self, recommended_items_ids):\n\n assert len(recommended_items_ids) <= self.cutoff, \"Diversity_MeanInterList: recommended list is contains more elements than cutoff\"\n\n self.n_evaluated_users += 1\n\n if len(recommended_items_ids) > 0:\n self.recommended_counter[recommended_items_ids] += 1\n\n\n\n\n def get_metric_value(self):\n\n # Requires to compute the number of common elements for all couples of users\n if self.n_evaluated_users == 0:\n return 1.0\n\n cooccurrences_cumulative = np.sum(self.recommended_counter**2) - self.n_evaluated_users*self.cutoff\n\n # All user combinations except diagonal\n all_user_couples_count = self.n_evaluated_users**2 - self.n_evaluated_users\n\n diversity_cumulative = all_user_couples_count - cooccurrences_cumulative/self.cutoff\n\n self.diversity = diversity_cumulative/all_user_couples_count\n\n return self.diversity\n\n\n def get_theoretical_max(self):\n\n global_co_occurrence_count = (self.n_evaluated_users*self.cutoff)**2/self.n_items - self.n_evaluated_users*self.cutoff\n\n mild = 1 - 1/(self.n_evaluated_users**2 - self.n_evaluated_users)*(global_co_occurrence_count/self.cutoff)\n\n return mild\n\n def merge_with_other(self, other_metric_object):\n\n assert other_metric_object is Diversity_MeanInterList, \"Diversity_MeanInterList: attempting to merge with a metric object of different type\"\n\n assert np.all(self.recommended_counter >= 0.0), \"Diversity_MeanInterList: self.recommended_counter contains negative counts\"\n assert np.all(other_metric_object.recommended_counter >= 0.0), \"Diversity_MeanInterList: other.recommended_counter contains negative counts\"\n\n self.recommended_counter += other_metric_object.recommended_counter\n self.n_evaluated_users += other_metric_object.n_evaluated_users\n\n\n\n\n\ndef roc_auc(is_relevant):\n\n ranks = np.arange(len(is_relevant))\n pos_ranks = ranks[is_relevant]\n neg_ranks = ranks[~is_relevant]\n auc_score = 0.0\n\n if len(neg_ranks) == 0:\n return 1.0\n\n if len(pos_ranks) > 0:\n for pos_pred in pos_ranks:\n auc_score += np.sum(pos_pred < neg_ranks, dtype=np.float32)\n auc_score /= (pos_ranks.shape[0] * neg_ranks.shape[0])\n\n assert 0 <= auc_score <= 1, auc_score\n return auc_score\n\n\n\ndef arhr(is_relevant):\n # average reciprocal hit-rank (ARHR) of all relevant items\n # As opposed to MRR, ARHR takes into account all relevant items and not just the first\n # pag 17\n # http://glaros.dtc.umn.edu/gkhome/fetch/papers/itemrsTOIS04.pdf\n # https://emunix.emich.edu/~sverdlik/COSC562/ItemBasedTopTen.pdf\n\n p_reciprocal = 1/np.arange(1,len(is_relevant)+1, 1.0, dtype=np.float64)\n arhr_score = is_relevant.dot(p_reciprocal)\n\n #assert 0 <= arhr_score <= p_reciprocal.sum(), \"arhr_score {} should be between 0 and {}\".format(arhr_score, p_reciprocal.sum())\n assert not np.isnan(arhr_score), \"ARHR is NaN\"\n return arhr_score\n\n\n\ndef precision(is_relevant):\n\n if len(is_relevant) == 0:\n precision_score = 0.0\n else:\n precision_score = np.sum(is_relevant, dtype=np.float32) / len(is_relevant)\n\n assert 0 <= precision_score <= 1, precision_score\n return precision_score\n\n\ndef precision_recall_min_denominator(is_relevant, n_test_items):\n\n if len(is_relevant) == 0:\n precision_score = 0.0\n else:\n precision_score = np.sum(is_relevant, dtype=np.float32) / min(n_test_items, len(is_relevant))\n\n assert 0 <= precision_score <= 1, precision_score\n return precision_score\n\n\ndef rmse(all_items_predicted_ratings, relevant_items, relevant_items_rating):\n\n # Important, some items will have -np.inf score and are treated as if they did not exist\n\n # RMSE with test items\n relevant_items_error = (all_items_predicted_ratings[relevant_items]-relevant_items_rating)**2\n\n finite_prediction_mask = np.isfinite(relevant_items_error)\n\n if finite_prediction_mask.sum() == 0:\n rmse = np.nan\n\n else:\n relevant_items_error = relevant_items_error[finite_prediction_mask]\n\n squared_error = np.sum(relevant_items_error)\n\n # # Second the RMSE against all non-test items assumed having true rating 0\n # # In order to avoid the need of explicitly indexing all non-relevant items, use a difference\n # squared_error += np.sum(all_items_predicted_ratings[np.isfinite(all_items_predicted_ratings)]**2) - \\\n # np.sum(all_items_predicted_ratings[relevant_items][np.isfinite(all_items_predicted_ratings[relevant_items])]**2)\n\n mean_squared_error = squared_error/finite_prediction_mask.sum()\n rmse = np.sqrt(mean_squared_error)\n\n return rmse\n\n\ndef recall(is_relevant, pos_items):\n\n recall_score = np.sum(is_relevant, dtype=np.float32) / pos_items.shape[0]\n\n assert 0 <= recall_score <= 1, recall_score\n return recall_score\n\n\ndef rr(is_relevant):\n # reciprocal rank of the FIRST relevant item in the ranked list (0 if none)\n\n ranks = np.arange(1, len(is_relevant) + 1)[is_relevant]\n\n if len(ranks) > 0:\n return 1. / ranks[0]\n else:\n return 0.0\n\n\ndef average_precision(is_relevant, pos_items):\n\n if len(is_relevant) == 0:\n a_p = 0.0\n else:\n p_at_k = is_relevant * np.cumsum(is_relevant, dtype=np.float32) / (1 + np.arange(is_relevant.shape[0]))\n a_p = np.sum(p_at_k) / np.min([pos_items.shape[0], is_relevant.shape[0]])\n\n assert 0 <= a_p <= 1, a_p\n return a_p\n\n\ndef ndcg(ranked_list, pos_items, relevance=None, at=None):\n\n if relevance is None:\n relevance = np.ones_like(pos_items)\n assert len(relevance) == pos_items.shape[0]\n\n # Create a dictionary associating item_id to its relevance\n # it2rel[item] -> relevance[item]\n it2rel = {it: r for it, r in zip(pos_items, relevance)}\n\n # Creates array of length \"at\" with the relevance associated to the item in that position\n rank_scores = np.asarray([it2rel.get(it, 0.0) for it in ranked_list[:at]], dtype=np.float32)\n\n # IDCG has all relevances to 1, up to the number of items in the test set\n ideal_dcg = dcg(np.sort(relevance)[::-1])\n\n # DCG uses the relevance of the recommended items\n rank_dcg = dcg(rank_scores)\n\n if rank_dcg == 0.0:\n return 0.0\n\n ndcg_ = rank_dcg / ideal_dcg\n # assert 0 <= ndcg_ <= 1, (rank_dcg, ideal_dcg, ndcg_)\n return ndcg_\n\n\ndef dcg(scores):\n return np.sum(np.divide(np.power(2, scores) - 1, np.log(np.arange(scores.shape[0], dtype=np.float32) + 2)),\n dtype=np.float32)\n\n\nmetrics = ['AUC', 'Precision' 'Recall', 'MAP', 'NDCG']\n\n\ndef pp_metrics(metric_names, metric_values, metric_at):\n \"\"\"\n Pretty-prints metric values\n :param metrics_arr:\n :return:\n \"\"\"\n assert len(metric_names) == len(metric_values)\n if isinstance(metric_at, int):\n metric_at = [metric_at] * len(metric_values)\n return ' '.join(['{}: {:.4f}'.format(mname, mvalue) if mcutoff is None or mcutoff == 0 else\n '{}@{}: {:.4f}'.format(mname, mcutoff, mvalue)\n for mname, mcutoff, mvalue in zip(metric_names, metric_at, metric_values)])\n\n\nclass TestAUC(unittest.TestCase):\n def runTest(self):\n pos_items = np.asarray([2, 4])\n ranked_list = np.asarray([1, 2, 3, 4, 5])\n self.assertTrue(np.allclose(roc_auc(ranked_list, pos_items),\n (2. / 3 + 1. / 3) / 2))\n\n\nclass TestRecall(unittest.TestCase):\n def runTest(self):\n pos_items = np.asarray([2, 4, 5, 10])\n ranked_list_1 = np.asarray([1, 2, 3, 4, 5])\n ranked_list_2 = np.asarray([10, 5, 2, 4, 3])\n ranked_list_3 = np.asarray([1, 3, 6, 7, 8])\n self.assertTrue(np.allclose(recall(ranked_list_1, pos_items), 3. / 4))\n self.assertTrue(np.allclose(recall(ranked_list_2, pos_items), 1.0))\n self.assertTrue(np.allclose(recall(ranked_list_3, pos_items), 0.0))\n\n thresholds = [1, 2, 3, 4, 5]\n values = [0.0, 1. / 4, 1. / 4, 2. / 4, 3. / 4]\n for at, val in zip(thresholds, values):\n self.assertTrue(np.allclose(np.asarray(recall(ranked_list_1, pos_items, at=at)), val))\n\n\nclass TestPrecision(unittest.TestCase):\n def runTest(self):\n pos_items = np.asarray([2, 4, 5, 10])\n ranked_list_1 = np.asarray([1, 2, 3, 4, 5])\n ranked_list_2 = np.asarray([10, 5, 2, 4, 3])\n ranked_list_3 = np.asarray([1, 3, 6, 7, 8])\n self.assertTrue(np.allclose(precision(ranked_list_1, pos_items), 3. / 5))\n self.assertTrue(np.allclose(precision(ranked_list_2, pos_items), 4. / 5))\n self.assertTrue(np.allclose(precision(ranked_list_3, pos_items), 0.0))\n\n thresholds = [1, 2, 3, 4, 5]\n values = [0.0, 1. / 2, 1. / 3, 2. / 4, 3. / 5]\n for at, val in zip(thresholds, values):\n self.assertTrue(np.allclose(np.asarray(precision(ranked_list_1, pos_items, at=at)), val))\n\n\nclass TestRR(unittest.TestCase):\n def runTest(self):\n pos_items = np.asarray([2, 4, 5, 10])\n ranked_list_1 = np.asarray([1, 2, 3, 4, 5])\n ranked_list_2 = np.asarray([10, 5, 2, 4, 3])\n ranked_list_3 = np.asarray([1, 3, 6, 7, 8])\n self.assertTrue(np.allclose(rr(ranked_list_1, pos_items), 1. / 2))\n self.assertTrue(np.allclose(rr(ranked_list_2, pos_items), 1.))\n self.assertTrue(np.allclose(rr(ranked_list_3, pos_items), 0.0))\n\n thresholds = [1, 2, 3, 4, 5]\n values = [0.0, 1. / 2, 1. / 2, 1. / 2, 1. / 2]\n for at, val in zip(thresholds, values):\n self.assertTrue(np.allclose(np.asarray(rr(ranked_list_1, pos_items, at=at)), val))\n\n\nclass TestMAP(unittest.TestCase):\n def runTest(self):\n pos_items = np.asarray([2, 4, 5, 10])\n ranked_list_1 = np.asarray([1, 2, 3, 4, 5])\n ranked_list_2 = np.asarray([10, 5, 2, 4, 3])\n ranked_list_3 = np.asarray([1, 3, 6, 7, 8])\n ranked_list_4 = np.asarray([11, 12, 13, 14, 15, 16, 2, 4, 5, 10])\n ranked_list_5 = np.asarray([2, 11, 12, 13, 14, 15, 4, 5, 10, 16])\n self.assertTrue(np.allclose(map(ranked_list_1, pos_items), (1. / 2 + 2. / 4 + 3. / 5) / 4))\n self.assertTrue(np.allclose(map(ranked_list_2, pos_items), 1.0))\n self.assertTrue(np.allclose(map(ranked_list_3, pos_items), 0.0))\n self.assertTrue(np.allclose(map(ranked_list_4, pos_items), (1. / 7 + 2. / 8 + 3. / 9 + 4. / 10) / 4))\n self.assertTrue(np.allclose(map(ranked_list_5, pos_items), (1. + 2. / 7 + 3. / 8 + 4. / 9) / 4))\n\n thresholds = [1, 2, 3, 4, 5]\n values = [\n 0.0,\n 1. / 2 / 2,\n 1. / 2 / 3,\n (1. / 2 + 2. / 4) / 4,\n (1. / 2 + 2. / 4 + 3. / 5) / 4\n ]\n for at, val in zip(thresholds, values):\n self.assertTrue(np.allclose(np.asarray(map(ranked_list_1, pos_items, at)), val))\n\n\nclass TestNDCG(unittest.TestCase):\n def runTest(self):\n pos_items = np.asarray([2, 4, 5, 10])\n pos_relevances = np.asarray([5, 4, 3, 2])\n ranked_list_1 = np.asarray([1, 2, 3, 4, 5]) # rel = 0, 5, 0, 4, 3\n ranked_list_2 = np.asarray([10, 5, 2, 4, 3]) # rel = 2, 3, 5, 4, 0\n ranked_list_3 = np.asarray([1, 3, 6, 7, 8]) # rel = 0, 0, 0, 0, 0\n idcg = ((2 ** 5 - 1) / np.log(2) +\n (2 ** 4 - 1) / np.log(3) +\n (2 ** 3 - 1) / np.log(4) +\n (2 ** 2 - 1) / np.log(5))\n self.assertTrue(np.allclose(dcg(np.sort(pos_relevances)[::-1]), idcg))\n self.assertTrue(np.allclose(ndcg(ranked_list_1, pos_items, pos_relevances),\n ((2 ** 5 - 1) / np.log(3) +\n (2 ** 4 - 1) / np.log(5) +\n (2 ** 3 - 1) / np.log(6)) / idcg))\n self.assertTrue(np.allclose(ndcg(ranked_list_2, pos_items, pos_relevances),\n ((2 ** 2 - 1) / np.log(2) +\n (2 ** 3 - 1) / np.log(3) +\n (2 ** 5 - 1) / np.log(4) +\n (2 ** 4 - 1) / np.log(5)) / idcg))\n self.assertTrue(np.allclose(ndcg(ranked_list_3, pos_items, pos_relevances), 0.0))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.ediff1d",
"scipy.sparse.csr_matrix",
"numpy.random.seed",
"numpy.random.choice",
"numpy.argsort",
"numpy.logical_not",
"scipy.sparse.coo_matrix",
"numpy.where",
"numpy.isin",
"scipy.sparse.lil_matrix",
"sklearn.preprocessing.LabelEncoder",
"scipy.sparse.hstack",
"numpy.mean"
],
[
"numpy.sqrt",
"numpy.logical_or",
"numpy.sum",
"numpy.ediff1d",
"numpy.cumsum",
"numpy.log2",
"numpy.zeros",
"scipy.sparse.csc_matrix",
"numpy.ones_like",
"numpy.asarray",
"numpy.arange",
"numpy.all",
"numpy.power",
"numpy.min",
"numpy.log",
"numpy.isnan",
"numpy.sort",
"numpy.isfinite"
]
] |
PatternRecognition/OpenBMI | [
"75daf901b2dbe215852cbff243606dcfcd10f05c"
] | [
"PR_BCI_team/Team_StarLab/DKHan/examples/Deep_Learning_Models/OpenBMI_dataset/Preprocessing/convert_gigamne_to_smt.py"
] | [
"import scipy.io as sio\nimport numpy as np\nimport os\nimport mne\nimport gigadata\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.model_selection import ShuffleSplit, cross_val_score\n\nfrom pyriemann.estimation import Covariances\n\nfrom mne import Epochs, pick_types, find_events\nfrom mne.channels import read_layout\nfrom mne.io import concatenate_raws, read_raw_edf\nfrom mne.datasets import eegbci\nfrom mne.decoding import CSP\nfrom datetime import datetime\n\nimport pickle\n\nwith open('MI_62ch_100Hz_4-50.pkl', 'rb') as f:\n data = pickle.load(f)\n\nsess = 2\nsub= 43\n\nfor sess in [1,2]:\n print(\"session:\",sess)\n for sub in range(1,55):\n print(\"subject#\",sub)\n if sess == 1 :\n epochs = data[sub-1]\n else :\n epochs = data[sub+53]\n\n\n epochs_train = epochs.copy()\n\n\n if sess == 1 and sub ==1:\n epochs_data_train = epochs_train.get_data()\n labels = epochs.events[:, -1] - 1\n else:\n epoch_temp = epochs_train.get_data()\n epochs_data_train = np.append(epochs_data_train, epoch_temp,axis=0)\n label_temp = epochs.events[:, -1] - 1\n labels = np.hstack((labels, label_temp))\n\n print(epochs_data_train.shape)\n\n\nnp.save('x_data_450',epochs_data_train)\nnp.save('y_data',labels)\n\n"
] | [
[
"numpy.save",
"numpy.hstack",
"numpy.append"
]
] |
FanWangEcon/pyfan | [
"126e91c0c6d930f1c335a07396d1d2145b247cea"
] | [
"pyfan/amto/array/geomspace.py"
] | [
"'''\nCreated on May 24, 2018\n\n@author: fan\n\nTo have a better grid denser at the beginning\n'''\n\nimport time as time\nimport numpy as np\n\nfrom numba import jit\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\n# @vectorize([float64(float64, float64, float64, float64, float64, float64, float64, float64)])\ndef grid_to_geom_short(choice_grid, choice_grid_max, choice_grid_min,\n start, stop, num, geom_ratio, a):\n scaler = (choice_grid_max - choice_grid_min) / (stop - start)\n __, displacement, multiplier, a, b = gen_geom_grid(start, stop, num, geom_ratio, a)\n\n return grid_to_geom_short_core(choice_grid, a, scaler, displacement, multiplier, geom_ratio)\n\n\n@jit(nopython=True, parallel=True)\ndef grid_to_geom_short_core(choice_grid, a, scaler, displacement, multiplier, geom_ratio):\n # choice_grid_geom = ((choice_grid/scaler) + displacement)/multiplier\n # but a is 1\n # choice_grid_geom_base = (np.log(choice_grid_geom/a))/np.log(geom_ratio)\n\n return np.log((((choice_grid / scaler) + displacement) / multiplier) / a) / np.log(geom_ratio)\n\n\n# @njit\ndef grid_to_geom(choice_grid, choice_grid_max, choice_grid_min,\n start, stop, num, geom_ratio, a):\n \"\"\"\n the code now is under the assumption that initial start and end were 0 and 1 \n \n Given geom_grid results, how do we go back to actual data grid. \n So for interpolation. \n interpolate not on actual K and B scales, but on any even grid, as long\n as the grid count is right. \n \n interp_K_grid = np.linspace(0,1,n)\n \n but then there is a vector of actual choices kn_vec, how to map kn_vec to interp_K_grid?\n \n Parameters\n ----------\n choice_grid:\n this is the choice grid, on the actual choice scale\n start: float\n from gen_geom_grid\n stop: float\n from gen_geom_grid\n num: int\n from gen_geom_grid\n geom_ratio: float\n from gen_geom_grid \n \"\"\"\n # logger.debug('enter grid_to_geom')\n\n '''\n 0. Choice Grid Rescaling\n '''\n scaler = (choice_grid_max - choice_grid_min) / (stop - start)\n\n '''\n A. Reverse engineer from vector to geom scale\n '''\n startTime = time.time()\n __, displacement, multiplier, a, b = gen_geom_grid(start, stop, num, geom_ratio, a)\n # logger.debug('displacement:%s', displacement)\n # logger.debug('multiplier:%s', multiplier)\n # logger.debug('a:%s', a)\n # logger.debug('b:%s', b)\n # t = time.time() - startTime\n # print('Step aa:', t)\n\n '''choice_grid_geom is now between a and b'''\n # startTime = time.time()\n choice_grid_geom = ((choice_grid / scaler) + displacement) / multiplier\n # logger.debug('choice_grid:\\n%s', choice_grid)\n # logger.debug('choice_grid_geom:\\n%s', choice_grid_geom)\n t = time.time() - startTime\n print('Step aaa:', t)\n\n '''\n B.\n a <= choice_grid_geom = a*(geom_ratio)^{x} <= b\n \n solve for x\n \n log(choice_grid_geom) = log(a) + x*log(geom_ratio)\n x = (log(choice_grid_geom) - log(a))/log(geom_ratio)\n \n '''\n startTime = time.time()\n choice_grid_geom_base = (np.log(choice_grid_geom / a)) / np.log(geom_ratio)\n # choice_grid_geom_base = (np.log(choice_grid_geom))/np.log(geom_ratio)\n # logger.debug('choice_grid_geom_base:\\n%s', choice_grid_geom_base)\n t = time.time() - startTime\n print('Step bb:', t)\n\n return choice_grid_geom_base\n\n\n# @njit\ndef gen_geom_grid(start, stop, num, geom_ratio, a):\n \"\"\" \n Specify geom_ratio, the z below:\n a*z^0=a\n a*z^1\n a*z^2\n ...\n ...\n a*z^49=b\n Then generate the grid points that is consistent with the geom_ratio\n \n Parameters\n ----------\n start: float\n same as in linspace\n stop: float\n same as in linspace\n num: int\n same as in linspace\n geom_ratio: float\n z value below kind of except for rescaling\n \"\"\"\n\n '''\n A. Start with a and b\n '''\n # a = 1\n b = a * geom_ratio ** (num - 1)\n\n geom_base = a * (geom_ratio) ** np.arange(num)\n # geom_base2 = np.geomspace(a, b, num)\n # logger.debug('geom_base:\\n%s', geom_base)\n\n '''\n B. Rescaling\n '''\n multiplier = ((stop - start) / (b - a))\n geom_base_scaled = geom_base * multiplier\n # logger.debug('geom_base_scaled:\\n%s', geom_base_scaled)\n\n displacement = (np.min(geom_base_scaled) - start)\n geom_base_scaled = geom_base_scaled - displacement\n # logger.debug('geom_base_scaled:\\n%s', geom_base_scaled)\n\n # logger.debug('geom_base_scaled diff:\\n%s', np.diff(geom_base_scaled))\n\n return geom_base_scaled, displacement, multiplier, a, b\n\n\ndef tester(a=1, b=51, max_power=49):\n \"\"\"\n 1. 1 to 51, geomspace \n \"\"\"\n list_geom_1t51 = np.geomspace(a, b, max_power + 1)\n print('list_geom_1t51:', list_geom_1t51)\n\n \"\"\"\n 2. what is the list above, what does it mean?\n the point is to start from a=start, end at b=end, find:\n \n a*z^0=a\n a*z^1\n a*z^2\n ...\n ...\n a*z^49=b\n \n 50 numbers like above. \n a is determined by start of geomspace\n b is determined by: \n \n \"\"\"\n z = (b / a) ** (1 / max_power)\n print('z:', z)\n list_geom_fan = a * z ** np.linspace(0, max_power, max_power + 1)\n print('list_geom_fan:', list_geom_fan)\n\n return list_geom_fan\n\n\ndef tester_plus1(a=0, b=50, max_power=49, adjust=1):\n \"\"\"\n to accomndate zero, \n \"\"\"\n list_geom_1t51 = np.geomspace(a + adjust, b + adjust, max_power + 1)\n list_geom_1t51 = list_geom_1t51 - 1\n print('list_geom_1t51:', list_geom_1t51)\n\n \"\"\"\n 2. what is the list above, what does it mean?\n the point is to start from a=start, end at b=end, find:\n \n a*z^0=a\n a*z^1\n a*z^2\n ...\n ...\n a*z^49=b\n \n 50 numbers like above. \n a is determined by start of geomspace\n b is determined by: \n \n \"\"\"\n z = ((b + adjust) / (a + adjust)) ** (1 / max_power)\n print('z:', z)\n list_geom_fan = (a + adjust) * z ** np.linspace(0, max_power, max_power + 1) - 1\n print('list_geom_fan:', list_geom_fan)\n\n \"\"\"\n 3. So suppose I have list_geom now, how to I take it back to geomspace?\n \"\"\"\n lencount = len(list_geom_1t51)\n equi_space = np.linspace(1, lencount, lencount)\n print('equi_space:', equi_space)\n\n\nif __name__ == \"__main__\":\n FORMAT = '%(filename)s - %(funcName)s - %(lineno)d - %(asctime)s - %(levelname)s %(message)s'\n # np.set_printoptions(precision=4, linewidth=100, suppress=True, threshold=np.nan)\n np.set_printoptions(precision=3, linewidth=100, suppress=True, threshold=3000)\n logging.basicConfig(level=logging.DEBUG, format=FORMAT)\n\n tester(a=1, b=2, max_power=49)\n tester(a=1, b=3, max_power=49)\n tester(a=1, b=4, max_power=49)\n print((tester(a=1, b=5, max_power=49) - 1) / 4)\n print('')\n print('')\n print('')\n tester(a=1, b=51, max_power=49)\n tester(a=0.1, b=51, max_power=10)\n tester(a=100, b=200, max_power=3)\n\n tester_plus1()\n\n print('')\n print('')\n print('')\n\n start = 0\n stop = 1\n num = 11\n geom_ratio = 1.2\n gen_geom_grid(start, stop, num, geom_ratio, 1)\n\n start = 10\n stop = 20\n num = 11\n geom_ratio = 1.00000001\n gen_geom_grid(start, stop, num, geom_ratio, 1)\n\n start = -10\n stop = 20\n num = 11\n geom_ratio = 1.1\n gen_geom_grid(start, stop, num, geom_ratio, 1)\n\n start = -3.5\n stop = -3.1\n num = 3\n geom_ratio = 1.1\n gen_geom_grid(start, stop, num, geom_ratio, 1)\n\n start = 0\n stop = 1\n num = 50\n geom_ratio = 1.03\n geom_base_scaled, __, __, __, __, = gen_geom_grid(start, stop, num, geom_ratio, 1)\n choice_grid_max = stop\n choice_grid_min = start\n grid_to_geom(geom_base_scaled, choice_grid_max, choice_grid_min,\n start, stop, num, geom_ratio, 1)\n\n geom_base_scaled = np.linspace(0, 30, 22)\n choice_grid_max = 30\n choice_grid_min = 0\n grid_to_geom(geom_base_scaled, choice_grid_max, choice_grid_min,\n start, stop, num, geom_ratio, 1)\n"
] | [
[
"numpy.set_printoptions",
"numpy.geomspace",
"numpy.arange",
"numpy.log",
"numpy.min",
"numpy.linspace"
]
] |
amh28/NIF | [
"92a2f447738224fb10b83fa60c78a35e0c25ac34"
] | [
"niftynet/layer/discrete_label_normalisation.py"
] | [
"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, division\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\nimport niftynet.utilities.histogram_standardisation as hs\nfrom niftynet.layer.base_layer import DataDependentLayer\nfrom niftynet.layer.base_layer import Invertible\nfrom niftynet.utilities.user_parameters_helper import standardise_string\nfrom niftynet.utilities.util_common import print_progress_bar\n\n\nclass DiscreteLabelNormalisationLayer(DataDependentLayer, Invertible):\n def __init__(self,\n image_name,\n modalities,\n model_filename,\n name='label_norm'):\n\n super(DiscreteLabelNormalisationLayer, self).__init__(name=name)\n # mapping is a complete cache of the model file, the total number of\n # modalities are listed in self.modalities\n self.image_name = image_name\n self.modalities = modalities\n self.model_file = os.path.abspath(model_filename)\n assert not os.path.isdir(self.model_file), \\\n \"model_filename is a directory, please change histogram_ref_file\"\n self.label_map = hs.read_mapping_file(self.model_file)\n\n @property\n def key(self):\n # provide a readable key for the label mapping item\n key_from = \"{}_{}-from\".format(self.image_name, self.modalities)\n key_to = \"{}_{}-to\".format(self.image_name, self.modalities)\n return standardise_string(key_from), standardise_string(key_to)\n\n def layer_op(self, image, mask=None):\n assert self.is_ready(), \\\n \"discrete_label_normalisation layer needs to be trained first.\"\n # mask is not used for label mapping\n if isinstance(image, dict):\n if self.image_name not in image:\n return image, mask\n label_data = np.asarray(image[self.image_name])\n else:\n label_data = np.asarray(image)\n\n mapping_from = self.label_map[self.key[0]]\n mapping_to = self.label_map[self.key[1]]\n\n image_shape = label_data.shape\n label_data = label_data.reshape(-1)\n mapped_data = np.zeros_like(label_data)\n for (original, new_id) in zip(mapping_from, mapping_to):\n mapped_data[label_data == original] = new_id\n label_data = mapped_data.reshape(image_shape)\n\n if isinstance(image, dict):\n image[self.image_name] = label_data\n return image, mask\n return label_data, mask\n\n def inverse_op(self, image, mask=None):\n assert self.is_ready(), \\\n \"discrete_label_normalisation layer needs to be trained first.\"\n # mask is not used for label mapping\n if isinstance(image, dict):\n label_data = np.asarray(image[self.image_name])\n else:\n label_data = np.asarray(image)\n\n mapping_from = self.label_map[self.key[0]]\n mapping_to = self.label_map[self.key[1]]\n\n image_shape = label_data.shape\n label_data = label_data.reshape(-1)\n mapped_data = np.zeros_like(label_data)\n for (new_id, original) in zip(mapping_from, mapping_to):\n mapped_data[label_data == original] = new_id\n label_data = mapped_data.reshape(image_shape)\n if isinstance(image, dict):\n image[self.image_name] = label_data\n return image, mask\n return label_data, mask\n\n def is_ready(self):\n mapping_from = self.label_map.get(self.key[0], None)\n if mapping_from is None:\n # tf.logging.warning('could not find mapping key %s', self.key[0])\n return False\n mapping_to = self.label_map.get(self.key[1], None)\n if mapping_to is None:\n # tf.logging.warning('could not find mapping key %s', self.key[1])\n return False\n assert len(mapping_from) == len(mapping_to), \\\n \"mapping is not one-to-one, \" \\\n \"corrupted mapping file? {}\".format(self.model_file)\n return True\n\n def train(self, image_list):\n # check modalities to train, using the first subject in subject list\n # to find input modality list\n assert image_list is not None, \"nothing to training for this layer\"\n if self.is_ready():\n tf.logging.info(\n \"label mapping ready for {}:{}, {} classes\".format(\n self.image_name,\n self.modalities,\n len(self.label_map[self.key[0]])))\n return\n tf.logging.info(\n \"Looking for the set of unique discrete labels from input {}\"\n \" using {} subjects\".format(self.image_name, len(image_list)))\n label_map = find_set_of_labels(image_list, self.image_name, self.key)\n # merging trained_mapping dict and self.mapping dict\n self.label_map.update(label_map)\n all_maps = hs.read_mapping_file(self.model_file)\n all_maps.update(self.label_map)\n hs.write_all_mod_mapping(self.model_file, all_maps)\n\n\ndef find_set_of_labels(image_list, field, output_key):\n label_set = set()\n for idx, image in enumerate(image_list):\n assert field in image, \\\n \"no {} data provided in for label mapping\".format(field)\n print_progress_bar(idx, len(image_list),\n prefix='searching unique labels from training files',\n decimals=1, length=10, fill='*')\n unique_label = np.unique(image[field].get_data())\n if len(unique_label) > 500 or len(unique_label) <= 1:\n tf.logging.warning(\n 'unusual discrete values: number of unique '\n 'labels to normalise %s', len(unique_label))\n label_set.update(set(unique_label))\n label_set = list(label_set)\n label_set.sort()\n try:\n mapping_from_to = dict()\n mapping_from_to[output_key[0]] = tuple(label_set)\n mapping_from_to[output_key[1]] = tuple(range(0, len(label_set)))\n except (IndexError, ValueError):\n tf.logging.fatal(\"unable to create mappings keys: %s, image name %s\",\n output_key, field)\n raise\n return mapping_from_to\n"
] | [
[
"tensorflow.logging.fatal",
"numpy.zeros_like",
"numpy.asarray"
]
] |
manopapad/legate.numpy | [
"896f4fd9b32db445da6cdabf7b78d523fca96936"
] | [
"tests/universal_functions_tests/true_divide_tests/broadcast.py"
] | [
"# Copyright 2021 NVIDIA Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport random\n\nimport numpy as np\n\nimport legate.numpy as lg\n\n\ndef test():\n anp = np.random.randn(4, 5)\n b = random.randint(1, 13)\n a = lg.array(anp)\n\n # test true_divide with scalar on rhs\n assert np.array_equal(lg.true_divide(a, b), np.true_divide(anp, b))\n\n # test divide with scalar on lhs\n assert np.array_equal(lg.true_divide(b, a), np.true_divide(b, anp))\n\n return\n\n\nif __name__ == \"__main__\":\n test()\n"
] | [
[
"numpy.random.randn",
"numpy.true_divide"
]
] |
Azmal16/Covid_Symptoms_Predict_with_Machine_Learning | [
"d1e1fde73aa307bdbcfeac27088f49e4c1cb4555"
] | [
"app.py"
] | [
"import numpy as np\nfrom flask import Flask, request, jsonify, render_template\nimport pickle\n\napp = Flask(__name__)\nmodel = pickle.load(open('model.pkl', 'rb'))\n\n\[email protected]('/')\ndef home():\n return render_template('index.html')\n\n\[email protected]('/predict', methods=['GET', 'POST'])\ndef predict():\n '''\n For rendering results on HTML GUI\n '''\n int_features = [int(x) for x in request.form.getlist('comp_select')]\n final_features = [np.array(int_features)]\n prediction = model.predict(final_features)\n # print(final_features)\n output = prediction[0]*10\n\n if (output <= 20):\n return render_template('index.html', prediction_text='Your symptoms match with {} % symptoms of the Covid Patients.\\n You are at Low Risk of getting Covid-19.\\n Please answer the questions below to predict again.'.format(output))\n\n elif (output > 20 and output <= 60):\n return render_template('index.html', prediction_text='Your symptoms match with {} % symptoms of the Covid Patients.\\n You are at Medium Risk of getting Covid-19.\\n We recommend you to have a Covid Test.\\n Please answer the questions below to predict again.'.format(output))\n\n else:\n return render_template('index.html', prediction_text='Your symptoms match with {} % symptoms of the Covid Patients.\\n You are at High Risk of getting Covid-19.\\n We recommend you to have a Covid Test as soon as possible.\\n Please answer the questions below to predict again.'.format(output))\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n"
] | [
[
"numpy.array"
]
] |
yongsheng268/tfx | [
"6283fffb3ac81e2f213b4895fbe19623dfa9c4f5"
] | [
"tfx/utils/channel.py"
] | [
"# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Definition of TFX Channel type.\n\nDeprecated: please see the new location of this module at `tfx.types.channel`\nand `tfx.types.channel_utils`.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom typing import Dict, Iterable, List, Union, Text\n\nfrom tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import\nfrom tfx import types\nfrom tfx.types import channel_utils\n\n\[email protected](\n None,\n 'tfx.utils.channel.Channel has been renamed to tfx.types.Channel as of '\n 'TFX 0.14.0.')\nclass Channel(types.Channel):\n pass\n\n\[email protected](None,\n 'tfx.utils.channel.as_channel has been renamed to '\n 'tfx.types.channel_utils.as_channel as of TFX 0.14.0.')\ndef as_channel(source: Union[Channel, Iterable[types.Artifact]]) -> Channel:\n return channel_utils.as_channel(source)\n\n\[email protected](\n None, 'tfx.utils.channel.unwrap_channel_dict has been renamed to '\n 'tfx.types.channel_utils.unwrap_channel_dict as of TFX 0.14.0.')\ndef unwrap_channel_dict(\n channel_dict: Dict[Text, Channel]) -> Dict[Text, List[types.Artifact]]:\n return channel_utils.unwrap_channel_dict(channel_dict)\n"
] | [
[
"tensorflow.python.util.deprecation.deprecated"
]
] |
everestocean/Algorithm | [
"a7e2ce796daf50488420290176dc8c1ccccb109f"
] | [
"machine_learning/deep_learning/deep_learning/tweet_sentiment.py"
] | [
"# -*- coding=utf-8 -*-\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.layers.convolutional import Conv1D\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nimport pandas as pd\nimport numpy as np\nimport spacy\n\nnlp=spacy.load(\"en\")\n\n#load the dataset\ntrain=pd.read_csv(\"../datasets/training.1600000.processed.noemoticon.csv\" , encoding= \"latin-1\")\nY_train = train[train.columns[0]]\nX_train = train[train.columns[5]]\n\n# split the data into test and train\nfrom sklearn.model_selection import train_test_split\ntrainset1x, trainset2x, trainset1y, trainset2y = train_test_split(X_train.values, Y_train.values, test_size=0.02,random_state=42 )\ntrainset2y=pd.get_dummies(trainset2y)\n\n# function to remove stopwords\ndef stopwords(sentence):\n new=[]\n sentence=nlp(sentence)\n for w in sentence:\n if (w.is_stop == False) & (w.pos_ !=\"PUNCT\"):\n new.append(w.string.strip())\n c=\" \".join(str(x) for x in new)\n return c\n\n# function to lemmatize the tweets\ndef lemmatize(sentence):\n sentence=nlp(sentence)\n str=\"\"\n for w in sentence:\n str+=\" \"+w.lemma_\n return nlp(str)\n\n#loading the glove model\ndef loadGloveModel(gloveFile):\n print(\"Loading Glove Model\")\n f = open(gloveFile,'r')\n model = {}\n for line in f:\n splitLine = line.split()\n word = splitLine[0]\n embedding = [float(val) for val in splitLine[1:]]\n model[word] = embedding\n print (\"Done.\"),len(model),(\" words loaded!\")\n return model\n\n# save the glove model\nmodel=loadGloveModel(\"/mnt/hdd/datasets/glove/glove.twitter.27B.200d.txt\")\n\n#vectorising the sentences\ndef sent_vectorizer(sent, model):\n sent_vec = np.zeros(200)\n numw = 0\n for w in sent.split():\n try:\n sent_vec = np.add(sent_vec, model[str(w)])\n numw+=1\n except:\n pass\n return sent_vec\n\n#obtain a clean vector\ncleanvector=[]\nfor i in range(trainset2x.shape[0]):\n document=trainset2x[i]\n document=document.lower()\n document=lemmatize(document)\n document=str(document)\n cleanvector.append(sent_vectorizer(document,model))\n\n#Getting the input and output in proper shape\ncleanvector=np.array(cleanvector)\ncleanvector =cleanvector.reshape(len(cleanvector),200,1)\n\n#tokenizing the sequences\ntokenizer = Tokenizer(num_words=16000)\ntokenizer.fit_on_texts(trainset2x)\nsequences = tokenizer.texts_to_sequences(trainset2x)\nword_index = tokenizer.word_index\nprint('Found %s unique tokens.' % len(word_index))\ndata = pad_sequences(sequences, maxlen=15, padding=\"post\")\nprint(data.shape)\n\n#reshape the data and preparing to train\ndata=data.reshape(len(cleanvector),15,1)\nfrom sklearn.model_selection import train_test_split\ntrainx, validx, trainy, validy = train_test_split(data, trainset2y, test_size=0.3,random_state=42)\n\n\n#calculate the number of words\nnb_words=len(tokenizer.word_index)+1\n\n#obtain theembedding matrix\nembedding_matrix = np.zeros((nb_words, 200))\nfor word, i in word_index.items():\n embedding_vector = model.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\nprint('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))\n\ntrainy=np.array(trainy)\nvalidy=np.array(validy)\n\n\n# building a simple RNN model\ndef modelbuild():\n model = Sequential()\n model.add(keras.layers.InputLayer(input_shape=(15, 1)))\n keras.layers.embeddings.Embedding(nb_words, 15, weights=[embedding_matrix], input_length=15,\n trainable=False)\n\n model.add(keras.layers.recurrent.SimpleRNN(units=100, activation='relu',\n use_bias=True))\n model.add(keras.layers.Dense(units=1000, input_dim=2000, activation='sigmoid'))\n model.add(keras.layers.Dense(units=500, input_dim=1000, activation='relu'))\n model.add(keras.layers.Dense(units=2, input_dim=500, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\n# compiling the model\nfinalmodel = modelbuild()\nfinalmodel.fit(trainx, trainy, epochs=10, batch_size=120, validation_data=(validx, validy))"
] | [
[
"numpy.sum",
"numpy.zeros",
"pandas.read_csv",
"numpy.array",
"sklearn.model_selection.train_test_split",
"pandas.get_dummies"
]
] |
xnone/CS224n-Natural-Language-Processing-with-Deep-Learning | [
"0ec63dcd6b8671712c0206cc64b3e4c2fc6f97a3"
] | [
"Assignments/assignment1/q3_sgd.py"
] | [
"#!/usr/bin/env python\n\n# Save parameters every a few SGD iterations as fail-safe\nSAVE_PARAMS_EVERY = 5000\n\nimport glob\nimport random\nimport numpy as np\nimport os.path as op\nimport pickle\n\n\ndef load_saved_params():\n \"\"\"\n A helper function that loads previously saved parameters and resets\n iteration start.\n \"\"\"\n st = 0\n for f in glob.glob(\"saved_params_*.npy\"):\n iter = int(op.splitext(op.basename(f))[0].split(\"_\")[2])\n if (iter > st):\n st = iter\n\n if st > 0:\n with open(\"saved_params_%d.npy\" % st, \"r\") as f:\n params = pickle.load(f)\n state = pickle.load(f)\n return st, params, state\n else:\n return st, None, None\n\n\ndef save_params(iter, params):\n with open(\"saved_params_%d.npy\" % iter, \"w\") as f:\n pickle.dump(params, f)\n pickle.dump(random.getstate(), f)\n\n\ndef sgd(f, x0, step, iterations, postprocessing=None, useSaved=False,\n PRINT_EVERY=10):\n \"\"\" Stochastic Gradient Descent\n\n Implement the stochastic gradient descent method in this function.\n\n Arguments:\n f -- the function to optimize, it should take a single\n argument and yield two outputs, a cost and the gradient\n with respect to the arguments\n x0 -- the initial point to start SGD from\n step -- the step size for SGD\n iterations -- total iterations to run SGD for\n postprocessing -- postprocessing function for the parameters\n if necessary. In the case of word2vec we will need to\n normalize the word vectors to have unit length.\n PRINT_EVERY -- specifies how many iterations to output loss\n\n Return:\n x -- the parameter value after SGD finishes\n \"\"\"\n\n # Anneal learning rate every several iterations\n ANNEAL_EVERY = 20000\n\n if useSaved:\n start_iter, oldx, state = load_saved_params()\n if start_iter > 0:\n x0 = oldx\n step *= 0.5 ** (start_iter / ANNEAL_EVERY)\n\n if state:\n random.setstate(state)\n else:\n start_iter = 0\n\n x = x0\n\n if not postprocessing:\n postprocessing = lambda x: x\n\n expcost = None\n\n for iter in range(start_iter + 1, iterations + 1):\n # Don't forget to apply the postprocessing after every iteration!\n # You might want to print the progress every few iterations.\n\n cost = None\n ### YOUR CODE HERE\n cost, grad = f(x)\n x -= step * grad\n postprocessing(x)\n ### END YOUR CODE\n\n if iter % PRINT_EVERY == 0:\n if not expcost:\n expcost = cost\n else:\n expcost = .95 * expcost + .05 * cost\n print(\"iter %d: %f\" % (iter, expcost))\n\n if iter % SAVE_PARAMS_EVERY == 0 and useSaved:\n save_params(iter, x)\n\n if iter % ANNEAL_EVERY == 0:\n step *= 0.5\n\n return x\n\n\ndef sanity_check():\n quad = lambda x: (np.sum(x ** 2), x * 2)\n\n print(\"Running sanity checks...\")\n t1 = sgd(quad, 0.5, 0.01, 1000, PRINT_EVERY=100)\n print(\"test 1 result:\", t1)\n assert abs(t1) <= 1e-6\n\n t2 = sgd(quad, 0.0, 0.01, 1000, PRINT_EVERY=100)\n print(\"test 2 result:\", t2)\n assert abs(t2) <= 1e-6\n\n t3 = sgd(quad, -1.5, 0.01, 1000, PRINT_EVERY=100)\n print(\"test 3 result:\", t3)\n assert abs(t3) <= 1e-6\n\n print(\"\")\n\n\ndef your_sanity_checks():\n \"\"\"\n Use this space add any additional sanity checks by running:\n python q3_sgd.py\n This function will not be called by the autograder, nor will\n your additional tests be graded.\n \"\"\"\n print(\"Running your sanity checks...\")\n ### YOUR CODE HERE\n # raise NotImplementedError\n ### END YOUR CODE\n\n\nif __name__ == \"__main__\":\n sanity_check()\n your_sanity_checks()\n"
] | [
[
"numpy.sum"
]
] |
rcsmit/COVIDcases | [
"8952931ee8316644dee55aad3f94c98f510e2f14"
] | [
"not_active_on_streamlit/prepare_casuslandelijk.py"
] | [
"# PREPARE A CSV-FILE TO ENABLE AN STACKED PLOT FOR POSITIVE TESTS, HOSPITALIZATIONS AND DECEASED\n# Hospitalizations and deceased are not lagged in time, the date of the result of the \"desease onset\", positieve test or notification is leading\n# https://data.rivm.nl/geonetwork/srv/dut/catalog.search#/metadata/2c4357c8-76e4-4662-9574-1deb8a73f724\n\n# MARCH 2021, Rene Smit (@rcsmit) - MIT license\n\n# Fields in\n# Date_file;Date_statistics;Date_statistics_type;Agegroup;Sex;\n# Province;Hospital_admission;Deceased;Week_of_death;Municipal_health_service\n\n# Fields out\n# pos_test_Date_statistics,pos_test_0-9,pos_test_10-19,pos_test_20-29,pos_test_30-39,\n# pos_test_40-49,pos_test_50-59,pos_test_60-69,pos_test_70-79,pos_test_80-89,pos_test_90+,\n# pos_test_<50,pos_test_Unknown,hosp_Date_statistics,hosp_0-9,hosp_10-19,hosp_20-29,hosp_30-39,\n# hosp_40-49,hosp_50-59,hosp_60-69,hosp_70-79,hosp_80-89,hosp_90+,hosp_<50,hosp_Unknown,\n# deceased_Date_statistics,deceased_0-9,deceased_10-19,deceased_20-29,deceased_30-39,\n# deceased_40-49,deceased_50-59,deceased_60-69,deceased_70-79,deceased_80-89,deceased_90+,\n# deceased_<50,deceased_Unknown\n\n\nimport pandas as pd\nimport numpy as np\nimport datetime as dt\nfrom datetime import datetime\n\ndef save_df(df, name):\n \"\"\" save dataframe on harddisk \"\"\"\n OUTPUT_DIR = (\n \"C:\\\\Users\\\\rcxsm\\\\Documents\\\\phyton_scripts\\\\covid19_seir_models\\\\output\\\\\"\n )\n OUTPUT_DIR = (\n \"C:\\\\Users\\\\rcxsm\\\\Documents\\\\phyton_scripts\\\\covid19_seir_models\\\\COVIDcases\\\\input\\\\\")\n name_ = OUTPUT_DIR + name + \".csv\"\n compression_opts = dict(method=None, archive_name=name_)\n df.to_csv(name_, index=False, compression=compression_opts)\n\n print(\"--- Saving \" + name_ + \" ---\")\n\n\ndef drop_columns(df, what_to_drop):\n \"\"\" drop columns. what_to_drop : list \"\"\"\n if what_to_drop != None:\n print(\"dropping \" + str(what_to_drop))\n for d in what_to_drop:\n df = df.drop(columns=[d], axis=1)\n return df\n\n\ndef main_x():\n # online version : https://data.rivm.nl/covid-19/COVID-19_casus_landelijk.csv\n url1 = \"C:\\\\Users\\\\rcxsm\\\\Documents\\\\phyton_scripts\\\\covid19_seir_models\\\\input\\\\COVID-19_casus_landelijk.csv\"\n df = pd.read_csv(url1, delimiter=\";\", low_memory=False)\n df[\"Date_statistics\"] = pd.to_datetime(df[\"Date_statistics\"], format=\"%Y-%m-%d\")\n df.rename(\n columns={\n \"Date_file\": \"count\",\n },\n inplace=True,\n )\n\n #until = dt.datetime.strptime(\"2021-1-1\", \"%Y-%m-%d\").date()\n #mask = (df[\"Date_statistics\"].dt.date >= dt.datetime.strptime(\"2020-1-1\", \"%Y-%m-%d\").date()) & (df[\"Date_statistics\"].dt.date <= until)\n #df = df.loc[mask]\n\n df_hospital = df[df[\"Hospital_admission\"] == \"Yes\"].copy(deep=False)\n df_deceased = df[df[\"Deceased\"] == \"Yes\"].copy(deep=False)\n\n df_all = df.groupby([ \"Agegroup\"], sort=True).count().reset_index()\n df_hospital = df_hospital.groupby([ \"Agegroup\"], sort=True).count().reset_index()\n df_deceased = df_deceased.groupby([\"Date_statistics\", \"Agegroup\"], sort=True).count().reset_index()\n #df_deceased = df_deceased.groupby([ \"Agegroup\"], sort=True).count().reset_index()\n\n df = df.groupby([\"Date_statistics\", \"Agegroup\"], sort=True).count().reset_index()\n print (\"CASES\")\n #df_all = df_all[[\"Agegroup\", \"count\"]]\n #df_hospital = df_hospital[[\"Agegroup\", \"count\"]]\n print (df_all)\n print (\"ZIEKENHUISOPNAMES\")\n print (df_hospital)\n\n df_pivot = (\n pd.pivot_table(\n df,\n values=\"count\",\n index=[\"Date_statistics\"],\n columns=[\"Agegroup\"],\n aggfunc=np.sum,\n )\n .reset_index()\n .copy(deep=False)\n )\n\n df_pivot_hospital = (\n pd.pivot_table(\n df_hospital,\n values=\"count\",\n index=[\"Date_statistics\"],\n columns=[\"Agegroup\"],\n aggfunc=np.sum,\n )\n .reset_index()\n .copy(deep=False)\n )\n\n df_pivot_deceased = (\n pd.pivot_table(\n df_deceased,\n values=\"count\",\n index=[\"Date_statistics\"],\n columns=[\"Agegroup\"],\n aggfunc=np.sum,\n )\n .reset_index()\n .copy(deep=False)\n )\n\n df_pivot = df_pivot.add_prefix(\"pos_test_\")\n df_pivot_hospital = df_pivot_hospital.add_prefix(\"hosp_\")\n save_df(df_pivot_hospital, \"df_hospital_per_dag_vanuit_casus_landelijk\")\n df_pivot_deceased = df_pivot_deceased.add_prefix(\"deceased_\")\n print(df_pivot_deceased.dtypes)\n todrop = [\n \"Date_statistics_type\",\n \"Sex\",\n \"Province\",\n \"Hospital_admission\",\n \"Deceased\",\n \"Week_of_death\",\n \"Municipal_health_service\",\n ]\n df = drop_columns(df, todrop)\n save_df(df, \"landelijk_leeftijd_2_vanuit_casus_landelijk\")\n\n save_df(df_pivot, \"landelijk_leeftijd_pivot_vanuit_casus_landelijk\")\n save_df(df_pivot_hospital, \"landelijk_leeftijd_pivot_hospital_vanuit_casus_landelijk\")\n save_df(df_pivot_deceased, \"landelijk_leeftijd_pivot_deceased_vanuit_casus_landelijk\")\n\n\n df_pivot_cases_per_week = df_pivot.groupby(pd.Grouper(key='pos_test_Date_statistics', freq='W')).sum()\n df_pivot_cases_per_week.index -= pd.Timedelta(days=6)\n df_pivot_cases_per_week[\"weekstart\"]= df_pivot_cases_per_week.index\n save_df(df_pivot_cases_per_week, \"landelijk_leeftijd_pivot_per_week_vanuit_casus_landelijk\")\n\n df_temp = pd.merge(\n df_pivot,\n df_pivot_hospital,\n how=\"outer\",\n left_on=\"pos_test_Date_statistics\",\n right_on=\"hosp_Date_statistics\",\n )\n df_temp = pd.merge(\n df_temp,\n df_pivot_deceased,\n how=\"outer\",\n left_on=\"pos_test_Date_statistics\",\n right_on=\"deceased_Date_statistics\",\n )\n\n df_temp_per_week = df_temp.groupby(pd.Grouper(key='pos_test_Date_statistics', freq='W')).sum()\n df_temp_per_week.index -= pd.Timedelta(days=6)\n print(df_temp_per_week)\n df_temp_per_week[\"weekstart\"]= df_temp_per_week.index\n save_df(df_temp, \"final_result_vanuit_casus_landelijk\")\n save_df(df_temp_per_week, \"final_result_per_week_vanuit_casus_landelijk\")\n\n\ndef main_week_data():\n \"\"\"Het maken van weekcijfers en gemiddelden tbv cases_hospital_decased_NL.py\n \"\"\"\n # online version : https://data.rivm.nl/covid-19/COVID-19_casus_landelijk.csv\n url1 = \"C:\\\\Users\\\\rcxsm\\\\Documents\\\\phyton_scripts\\\\covid19_seir_models\\\\input\\\\COVID-19_casus_landelijk.csv\"\n df = pd.read_csv(url1, delimiter=\";\", low_memory=False)\n todrop = [\n \"Date_statistics_type\",\n \"Sex\",\n \"Province\",\n \"Week_of_death\",\n \"Municipal_health_service\",\n ]\n df = drop_columns(df, todrop)\n\n df[\"Date_statistics\"] = pd.to_datetime(df[\"Date_statistics\"], format=\"%Y-%m-%d\")\n df = df.replace(\"Yes\", 1)\n df = df.replace(\"No\", 0)\n df = df.replace(\"Unknown\", 0)\n df[\"cases\"] = 1\n print(df)\n #df = df.groupby([ \"Date_statistics\", \"Agegroup\"], sort=True).sum().reset_index()\n df_week = df.groupby([ pd.Grouper(key='Date_statistics', freq='W'), \"Agegroup\",] ).sum().reset_index()\n print (df)\n df_week[\"Hosp_per_reported\"] = df_week[\"Hospital_admission\"]/df_week[\"cases\"]\n df_week[\"Deceased_per_reported\"] = df_week[\"Deceased\"]/df_week[\"cases\"]\n save_df(df_week, \"landelijk_leeftijd_week_vanuit_casus_landelijk_20211006\")\n\nmain_week_data()\n"
] | [
[
"pandas.read_csv",
"pandas.Timedelta",
"pandas.to_datetime",
"pandas.Grouper",
"pandas.merge",
"pandas.pivot_table"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.