repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
anthonyhu/tumblr-sentiment
[ "33607d3662842815e6ae8d4a981b782ec3c485e8" ]
[ "datasets/dataset_utils.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains utilities for downloading and converting datasets.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport tarfile\n\nfrom six.moves import urllib\nimport tensorflow as tf\n\nLABELS_FILENAME = 'labels.txt'\n\n\ndef int64_feature(values):\n \"\"\"Returns a TF-Feature of int64s.\n\n Args:\n values: A scalar or list of values.\n\n Returns:\n a TF-Feature.\n \"\"\"\n if not isinstance(values, (tuple, list)):\n values = [values]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))\n\n\ndef bytes_feature(values):\n \"\"\"Returns a TF-Feature of bytes.\n\n Args:\n values: A string.\n\n Returns:\n a TF-Feature.\n \"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))\n\n\ndef image_to_tfexample(image_data, image_format, height, width, class_id):\n return tf.train.Example(features=tf.train.Features(feature={\n 'image/encoded': bytes_feature(image_data),\n 'image/format': bytes_feature(image_format),\n 'image/class/label': int64_feature(class_id),\n 'image/height': int64_feature(height),\n 'image/width': int64_feature(width),\n }))\n\ndef image_to_tfexample_with_text(image_data, image_format, height, width, text_data, seq_len, class_id, post_id, day):\n return tf.train.Example(features=tf.train.Features(feature={\n 'image/encoded': bytes_feature(image_data),\n 'image/format': bytes_feature(image_format),\n 'image/class/label': int64_feature(class_id),\n 'image/height': int64_feature(height),\n 'image/width': int64_feature(width),\n 'text': int64_feature(text_data),\n 'seq_len': int64_feature(seq_len),\n 'post_id': int64_feature(post_id),\n 'day': int64_feature(day),\n }))\n\ndef download_and_uncompress_tarball(tarball_url, dataset_dir):\n \"\"\"Downloads the `tarball_url` and uncompresses it locally.\n\n Args:\n tarball_url: The URL of a tarball file.\n dataset_dir: The directory where the temporary files are stored.\n \"\"\"\n filename = tarball_url.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(tarball_url, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dataset_dir)\n\n\ndef write_label_file(labels_to_class_names, dataset_dir, photos_subdir,\n filename=LABELS_FILENAME):\n \"\"\"Writes a file with the list of class names.\n\n Args:\n labels_to_class_names: A map of (integer) labels to class names.\n dataset_dir: The directory containing the data\n photos_subdir: The subdirectory in which the labels file should be written.\n filename: The filename where the class names are written.\n \"\"\"\n labels_filename = os.path.join(dataset_dir, photos_subdir, filename)\n with tf.gfile.Open(labels_filename, 'w') as f:\n for label in labels_to_class_names:\n class_name = labels_to_class_names[label]\n f.write('%d:%s\\n' % (label, class_name))\n\n\ndef has_labels(dataset_dir, photos_subdir, filename=LABELS_FILENAME):\n \"\"\"Specifies whether or not the dataset directory contains a label map file.\n\n Args:\n dataset_dir: The main directory.\n photos_subdir: The subdirectory in which the labels file is found.\n filename: The filename where the class names are written.\n\n Returns:\n `True` if the labels file exists and `False` otherwise.\n \"\"\"\n return tf.gfile.Exists(os.path.join(dataset_dir, photos_subdir, filename))\n\n\ndef read_label_file(dataset_dir, photos_subdir, filename=LABELS_FILENAME):\n \"\"\"Reads the labels file and returns a mapping from ID to class name.\n\n Args:\n dataset_dir: The main directory.\n photos_subdir: The subdirectory in which the labels file is found.\n filename: The filename where the class names are written.\n\n Returns:\n A map from a label (integer) to class name.\n \"\"\"\n labels_filename = os.path.join(dataset_dir, photos_subdir, filename)\n with tf.gfile.Open(labels_filename, 'rb') as f:\n lines = f.read().decode()\n lines = lines.split('\\n')\n lines = filter(None, lines)\n\n labels_to_class_names = {}\n for line in lines:\n index = line.index(':')\n labels_to_class_names[int(line[:index])] = line[index+1:]\n return labels_to_class_names\n" ]
[ [ "tensorflow.train.Int64List", "tensorflow.train.BytesList", "tensorflow.gfile.Open" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BiolabHHU/Image-denoising-with-MRFNet
[ "79420d707058de0ac04522d499adef79b5f6fc6e" ]
[ "MRFNETgray/utils.py" ]
[ "import math\nimport torch.nn as nn\nimport numpy as np\nfrom skimage.metrics import peak_signal_noise_ratio, structural_similarity\n\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')\n elif classname.find('BatchNorm') != -1:\n # nn.init.uniform(m.weight.data, 1.0, 0.02)\n m.weight.data.normal_(mean=0, std=math.sqrt(2. / 9. / 64.)).clamp_(-0.025, 0.025)\n nn.init.constant(m.bias.data, 0.0)\n\n\ndef batch_PSNR(img, imclean, data_range):\n Img = img.data.cpu().numpy().astype(np.float32)\n Iclean = imclean.data.cpu().numpy().astype(np.float32)\n PSNR = 0\n for i in range(Img.shape[0]):\n PSNR += peak_signal_noise_ratio(Iclean[i, :, :, :], Img[i, :, :, :], data_range=data_range)\n return PSNR / Img.shape[0]\n\n\ndef batch_ssim(img, imclean):\n Img = img.data.cpu().numpy().astype(np.float32)\n Iclean = imclean.data.cpu().numpy().astype(np.float32)\n ssim = 0\n for i in range(Img.shape[0]):\n ssim += structural_similarity(Iclean[i, :, :], Img[i, :, :], data_range=1.)\n return ssim / Img.shape[0]\n\n\ndef data_augmentation(image, mode):\n out = np.transpose(image, (1, 2, 0))\n # out = image\n if mode == 0:\n # original\n out = out\n elif mode == 1:\n # flip up and down\n out = np.flipud(out)\n elif mode == 2:\n # rotate counterwise 90 degree\n out = np.rot90(out)\n elif mode == 3:\n # rotate 90 degree and flip up and down\n out = np.rot90(out)\n out = np.flipud(out)\n elif mode == 4:\n # rotate 180 degree\n out = np.rot90(out, k=2)\n elif mode == 5:\n # rotate 180 degree and flip\n out = np.rot90(out, k=2)\n out = np.flipud(out)\n elif mode == 6:\n # rotate 270 degree\n out = np.rot90(out, k=3)\n elif mode == 7:\n # rotate 270 degree and flip\n out = np.rot90(out, k=3)\n out = np.flipud(out)\n return np.transpose(out, (2, 0, 1))\n # return out\n" ]
[ [ "torch.nn.init.kaiming_normal", "numpy.rot90", "numpy.flipud", "numpy.transpose", "torch.nn.init.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dadepo/spark
[ "8dbb7cb5cc0b6dd6639badedc69310ba4078542b" ]
[ "python/pyspark/sql/pandas/conversion.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport sys\nimport warnings\nif sys.version >= '3':\n basestring = unicode = str\n xrange = range\nelse:\n from itertools import izip as zip\n\nfrom pyspark import since\nfrom pyspark.rdd import _load_from_socket\nfrom pyspark.sql.pandas.serializers import ArrowCollectSerializer\nfrom pyspark.sql.types import IntegralType\nfrom pyspark.sql.types import *\nfrom pyspark.traceback_utils import SCCallSiteSync\nfrom pyspark.util import _exception_message\n\n\nclass PandasConversionMixin(object):\n \"\"\"\n Min-in for the conversion from Spark to pandas. Currently, only :class:`DataFrame`\n can use this class.\n \"\"\"\n\n @since(1.3)\n def toPandas(self):\n \"\"\"\n Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.\n\n This is only available if Pandas is installed and available.\n\n .. note:: This method should only be used if the resulting Pandas's :class:`DataFrame` is\n expected to be small, as all the data is loaded into the driver's memory.\n\n .. note:: Usage with spark.sql.execution.arrow.pyspark.enabled=True is experimental.\n\n >>> df.toPandas() # doctest: +SKIP\n age name\n 0 2 Alice\n 1 5 Bob\n \"\"\"\n from pyspark.sql.dataframe import DataFrame\n\n assert isinstance(self, DataFrame)\n\n from pyspark.sql.pandas.utils import require_minimum_pandas_version\n require_minimum_pandas_version()\n\n import numpy as np\n import pandas as pd\n\n if self.sql_ctx._conf.pandasRespectSessionTimeZone():\n timezone = self.sql_ctx._conf.sessionLocalTimeZone()\n else:\n timezone = None\n\n if self.sql_ctx._conf.arrowPySparkEnabled():\n use_arrow = True\n try:\n from pyspark.sql.pandas.types import to_arrow_schema\n from pyspark.sql.pandas.utils import require_minimum_pyarrow_version\n\n require_minimum_pyarrow_version()\n to_arrow_schema(self.schema)\n except Exception as e:\n\n if self.sql_ctx._conf.arrowPySparkFallbackEnabled():\n msg = (\n \"toPandas attempted Arrow optimization because \"\n \"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, \"\n \"failed by the reason below:\\n %s\\n\"\n \"Attempting non-optimization as \"\n \"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to \"\n \"true.\" % _exception_message(e))\n warnings.warn(msg)\n use_arrow = False\n else:\n msg = (\n \"toPandas attempted Arrow optimization because \"\n \"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has \"\n \"reached the error below and will not continue because automatic fallback \"\n \"with 'spark.sql.execution.arrow.pyspark.fallback.enabled' has been set to \"\n \"false.\\n %s\" % _exception_message(e))\n warnings.warn(msg)\n raise\n\n # Try to use Arrow optimization when the schema is supported and the required version\n # of PyArrow is found, if 'spark.sql.execution.arrow.pyspark.enabled' is enabled.\n if use_arrow:\n try:\n from pyspark.sql.pandas.types import _check_dataframe_localize_timestamps\n import pyarrow\n batches = self._collect_as_arrow()\n if len(batches) > 0:\n table = pyarrow.Table.from_batches(batches)\n # Pandas DataFrame created from PyArrow uses datetime64[ns] for date type\n # values, but we should use datetime.date to match the behavior with when\n # Arrow optimization is disabled.\n pdf = table.to_pandas(date_as_object=True)\n return _check_dataframe_localize_timestamps(pdf, timezone)\n else:\n return pd.DataFrame.from_records([], columns=self.columns)\n except Exception as e:\n # We might have to allow fallback here as well but multiple Spark jobs can\n # be executed. So, simply fail in this case for now.\n msg = (\n \"toPandas attempted Arrow optimization because \"\n \"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has \"\n \"reached the error below and can not continue. Note that \"\n \"'spark.sql.execution.arrow.pyspark.fallback.enabled' does not have an \"\n \"effect on failures in the middle of \"\n \"computation.\\n %s\" % _exception_message(e))\n warnings.warn(msg)\n raise\n\n # Below is toPandas without Arrow optimization.\n pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)\n\n dtype = {}\n for field in self.schema:\n pandas_type = PandasConversionMixin._to_corrected_pandas_type(field.dataType)\n # SPARK-21766: if an integer field is nullable and has null values, it can be\n # inferred by pandas as float column. Once we convert the column with NaN back\n # to integer type e.g., np.int16, we will hit exception. So we use the inferred\n # float type, not the corrected type from the schema in this case.\n if pandas_type is not None and \\\n not(isinstance(field.dataType, IntegralType) and field.nullable and\n pdf[field.name].isnull().any()):\n dtype[field.name] = pandas_type\n # Ensure we fall back to nullable numpy types, even when whole column is null:\n if isinstance(field.dataType, IntegralType) and pdf[field.name].isnull().any():\n dtype[field.name] = np.float64\n if isinstance(field.dataType, BooleanType) and pdf[field.name].isnull().any():\n dtype[field.name] = np.object\n\n for f, t in dtype.items():\n pdf[f] = pdf[f].astype(t, copy=False)\n\n if timezone is None:\n return pdf\n else:\n from pyspark.sql.pandas.types import _check_series_convert_timestamps_local_tz\n for field in self.schema:\n # TODO: handle nested timestamps, such as ArrayType(TimestampType())?\n if isinstance(field.dataType, TimestampType):\n pdf[field.name] = \\\n _check_series_convert_timestamps_local_tz(pdf[field.name], timezone)\n return pdf\n\n @staticmethod\n def _to_corrected_pandas_type(dt):\n \"\"\"\n When converting Spark SQL records to Pandas :class:`DataFrame`, the inferred data type\n may be wrong. This method gets the corrected data type for Pandas if that type may be\n inferred incorrectly.\n \"\"\"\n import numpy as np\n if type(dt) == ByteType:\n return np.int8\n elif type(dt) == ShortType:\n return np.int16\n elif type(dt) == IntegerType:\n return np.int32\n elif type(dt) == LongType:\n return np.int64\n elif type(dt) == FloatType:\n return np.float32\n elif type(dt) == DoubleType:\n return np.float64\n elif type(dt) == BooleanType:\n return np.bool\n elif type(dt) == TimestampType:\n return np.datetime64\n else:\n return None\n\n def _collect_as_arrow(self):\n \"\"\"\n Returns all records as a list of ArrowRecordBatches, pyarrow must be installed\n and available on driver and worker Python environments.\n\n .. note:: Experimental.\n \"\"\"\n from pyspark.sql.dataframe import DataFrame\n\n assert isinstance(self, DataFrame)\n\n with SCCallSiteSync(self._sc):\n port, auth_secret, jsocket_auth_server = self._jdf.collectAsArrowToPython()\n\n # Collect list of un-ordered batches where last element is a list of correct order indices\n try:\n results = list(_load_from_socket((port, auth_secret), ArrowCollectSerializer()))\n finally:\n # Join serving thread and raise any exceptions from collectAsArrowToPython\n jsocket_auth_server.getResult()\n\n # Separate RecordBatches from batch order indices in results\n batches = results[:-1]\n batch_order = results[-1]\n\n # Re-order the batch list using the correct order\n return [batches[i] for i in batch_order]\n\n\nclass SparkConversionMixin(object):\n \"\"\"\n Min-in for the conversion from pandas to Spark. Currently, only :class:`SparkSession`\n can use this class.\n \"\"\"\n def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):\n from pyspark.sql import SparkSession\n\n assert isinstance(self, SparkSession)\n\n from pyspark.sql.pandas.utils import require_minimum_pandas_version\n require_minimum_pandas_version()\n\n if self._wrapped._conf.pandasRespectSessionTimeZone():\n timezone = self._wrapped._conf.sessionLocalTimeZone()\n else:\n timezone = None\n\n # If no schema supplied by user then get the names of columns only\n if schema is None:\n schema = [str(x) if not isinstance(x, basestring) else\n (x.encode('utf-8') if not isinstance(x, str) else x)\n for x in data.columns]\n\n if self._wrapped._conf.arrowPySparkEnabled() and len(data) > 0:\n try:\n return self._create_from_pandas_with_arrow(data, schema, timezone)\n except Exception as e:\n from pyspark.util import _exception_message\n\n if self._wrapped._conf.arrowPySparkFallbackEnabled():\n msg = (\n \"createDataFrame attempted Arrow optimization because \"\n \"'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, \"\n \"failed by the reason below:\\n %s\\n\"\n \"Attempting non-optimization as \"\n \"'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to \"\n \"true.\" % _exception_message(e))\n warnings.warn(msg)\n else:\n msg = (\n \"createDataFrame attempted Arrow optimization because \"\n \"'spark.sql.execution.arrow.pyspark.enabled' is set to true, but has \"\n \"reached the error below and will not continue because automatic \"\n \"fallback with 'spark.sql.execution.arrow.pyspark.fallback.enabled' \"\n \"has been set to false.\\n %s\" % _exception_message(e))\n warnings.warn(msg)\n raise\n data = self._convert_from_pandas(data, schema, timezone)\n return self._create_dataframe(data, schema, samplingRatio, samplingRatio)\n\n def _convert_from_pandas(self, pdf, schema, timezone):\n \"\"\"\n Convert a pandas.DataFrame to list of records that can be used to make a DataFrame\n :return list of records\n \"\"\"\n from pyspark.sql import SparkSession\n\n assert isinstance(self, SparkSession)\n\n if timezone is not None:\n from pyspark.sql.pandas.types import _check_series_convert_timestamps_tz_local\n copied = False\n if isinstance(schema, StructType):\n for field in schema:\n # TODO: handle nested timestamps, such as ArrayType(TimestampType())?\n if isinstance(field.dataType, TimestampType):\n s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)\n if s is not pdf[field.name]:\n if not copied:\n # Copy once if the series is modified to prevent the original\n # Pandas DataFrame from being updated\n pdf = pdf.copy()\n copied = True\n pdf[field.name] = s\n else:\n for column, series in pdf.iteritems():\n s = _check_series_convert_timestamps_tz_local(series, timezone)\n if s is not series:\n if not copied:\n # Copy once if the series is modified to prevent the original\n # Pandas DataFrame from being updated\n pdf = pdf.copy()\n copied = True\n pdf[column] = s\n\n # Convert pandas.DataFrame to list of numpy records\n np_records = pdf.to_records(index=False)\n\n # Check if any columns need to be fixed for Spark to infer properly\n if len(np_records) > 0:\n record_dtype = self._get_numpy_record_dtype(np_records[0])\n if record_dtype is not None:\n return [r.astype(record_dtype).tolist() for r in np_records]\n\n # Convert list of numpy records to python lists\n return [r.tolist() for r in np_records]\n\n def _get_numpy_record_dtype(self, rec):\n \"\"\"\n Used when converting a pandas.DataFrame to Spark using to_records(), this will correct\n the dtypes of fields in a record so they can be properly loaded into Spark.\n :param rec: a numpy record to check field dtypes\n :return corrected dtype for a numpy.record or None if no correction needed\n \"\"\"\n import numpy as np\n cur_dtypes = rec.dtype\n col_names = cur_dtypes.names\n record_type_list = []\n has_rec_fix = False\n for i in xrange(len(cur_dtypes)):\n curr_type = cur_dtypes[i]\n # If type is a datetime64 timestamp, convert to microseconds\n # NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,\n # conversion from [us] or lower will lead to py datetime objects, see SPARK-22417\n if curr_type == np.dtype('datetime64[ns]'):\n curr_type = 'datetime64[us]'\n has_rec_fix = True\n record_type_list.append((str(col_names[i]), curr_type))\n return np.dtype(record_type_list) if has_rec_fix else None\n\n def _create_from_pandas_with_arrow(self, pdf, schema, timezone):\n \"\"\"\n Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting\n to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the\n data types will be used to coerce the data in Pandas to Arrow conversion.\n \"\"\"\n from pyspark.sql import SparkSession\n from pyspark.sql.dataframe import DataFrame\n\n assert isinstance(self, SparkSession)\n\n from pyspark.sql.pandas.serializers import ArrowStreamPandasSerializer\n from pyspark.sql.types import TimestampType\n from pyspark.sql.pandas.types import from_arrow_type, to_arrow_type\n from pyspark.sql.pandas.utils import require_minimum_pandas_version, \\\n require_minimum_pyarrow_version\n\n require_minimum_pandas_version()\n require_minimum_pyarrow_version()\n\n from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype\n import pyarrow as pa\n\n # Create the Spark schema from list of names passed in with Arrow types\n if isinstance(schema, (list, tuple)):\n arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)\n struct = StructType()\n for name, field in zip(schema, arrow_schema):\n struct.add(name, from_arrow_type(field.type), nullable=field.nullable)\n schema = struct\n\n # Determine arrow types to coerce data when creating batches\n if isinstance(schema, StructType):\n arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]\n elif isinstance(schema, DataType):\n raise ValueError(\"Single data type %s is not supported with Arrow\" % str(schema))\n else:\n # Any timestamps must be coerced to be compatible with Spark\n arrow_types = [to_arrow_type(TimestampType())\n if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None\n for t in pdf.dtypes]\n\n # Slice the DataFrame to be batched\n step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up\n pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))\n\n # Create list of Arrow (columns, type) for serializer dump_stream\n arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)]\n for pdf_slice in pdf_slices]\n\n jsqlContext = self._wrapped._jsqlContext\n\n safecheck = self._wrapped._conf.arrowSafeTypeConversion()\n col_by_name = True # col by name only applies to StructType columns, can't happen here\n ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name)\n\n def reader_func(temp_filename):\n return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)\n\n def create_RDD_server():\n return self._jvm.ArrowRDDServer(jsqlContext)\n\n # Create Spark DataFrame from Arrow stream file, using one batch per partition\n jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server)\n jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)\n df = DataFrame(jdf, self._wrapped)\n df._schema = schema\n return df\n\n\ndef _test():\n import doctest\n from pyspark.sql import SparkSession\n import pyspark.sql.pandas.conversion\n globs = pyspark.sql.pandas.conversion.__dict__.copy()\n spark = SparkSession.builder\\\n .master(\"local[4]\")\\\n .appName(\"sql.pandas.conversion tests\")\\\n .getOrCreate()\n globs['spark'] = spark\n (failure_count, test_count) = doctest.testmod(\n pyspark.sql.pandas.conversion, globs=globs,\n optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)\n spark.stop()\n if failure_count:\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n" ]
[ [ "pandas.DataFrame.from_records", "pandas.api.types.is_datetime64tz_dtype", "pandas.api.types.is_datetime64_dtype", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
prise6/smart-iss-posts
[ "fc913078e7fbe6343fd36ec6ca9852322247da5d" ]
[ "iss/clustering/ClassicalClustering.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport os\nimport numpy as np\nfrom iss.clustering import AbstractClustering\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.metrics import silhouette_samples\nfrom iss.tools import Tools\nfrom sklearn.externals import joblib\nfrom sklearn.manifold import TSNE\n\nclass ClassicalClustering(AbstractClustering):\n\n\tdef __init__(self, config, pictures_id = None, pictures_np = None):\n\n\t\tsuper().__init__(config, pictures_id, pictures_np)\n\n\t\tself.pca_fit = None\n\t\tself.pca_args = self.config['PCA']\n\t\tself.pca_reduction = None\n\t\tself.pca_save_name = \"PCA_model.pkl\"\n\n\t\tself.kmeans_fit = None\n\t\tself.kmeans_args = self.config['kmeans']\n\t\tself.kmeans_labels = None\n\t\tself.kmeans_centers = []\n\t\tself.kmeans_save_name = \"kmeans_model.pkl\"\n\n\n\t\tself.cah_fit = None\n\t\tself.cah_args = self.config['CAH']\n\t\tself.cah_labels = None\n\t\tself.cah_save_name = \"cah_model.pkl\"\n\t\t\n\t\tself.tsne_fit = None\n\t\tself.tsne_args = self.config['TSNE']\n\t\tself.tsne_embedding = None\n\n\t\tself.final_labels = None\n\t\tself.silhouette_score_labels = {}\n\n\n\tdef compute_pca(self):\n\n\t\tself.pca_fit = PCA(**self.pca_args)\n\t\tself.pca_fit.fit(self.pictures_np)\n\t\tself.pca_reduction = self.pca_fit.transform(self.pictures_np)\n\n\t\treturn self\n\n\tdef compute_kmeans(self):\n\t\tself.kmeans_fit = KMeans(**self.kmeans_args)\n\t\tself.kmeans_fit.fit(self.pca_reduction)\n\t\tself.kmeans_labels = self.kmeans_fit.labels_\n\t\treturn self\n\n\tdef compute_kmeans_centers(self):\n\t\tfor cl in range(self.kmeans_args['n_clusters']):\n\t\t\ttmp = self.pca_reduction[np.where(self.kmeans_labels == cl)]\n\t\t\tself.kmeans_centers.append(np.mean(tmp, axis = 0))\n\t\treturn self\n\n\tdef compute_cah(self):\n\n\t\tself.cah_fit = AgglomerativeClustering(**self.cah_args)\n\t\tself.cah_fit.fit_predict(self.kmeans_centers)\n\t\tself.cah_labels = self.cah_fit.labels_\n\t\treturn self\n\n\tdef compute_final_labels(self):\n\t\tself.final_labels = np.array([self.cah_labels[old_cl] for old_cl in self.kmeans_labels])\n\n\tdef compute_tsne(self):\n\t\tself.tsne_fit = TSNE(**self.tsne_args)\n\t\tself.tsne_embedding = self.tsne_fit.fit_transform(self.pca_reduction)\n\t\treturn self\n\t\t\n\tdef get_results(self):\n\t\treturn list(zip(self.pictures_id, self.final_labels, self.kmeans_labels, self.pictures_np))\n\n\tdef compute_silhouette_score(self):\n\t\tself.silhouette_score = silhouette_samples(self.pictures_np, self.final_labels)\n\t\tself.silhouette_score_labels = {cluster: np.mean(self.silhouette_score[self.final_labels == cluster]) for \n\t\tcluster in np.unique(self.final_labels)}\n\t\treturn self.silhouette_score_labels\n\n\n\tdef save(self):\n\t\tTools.create_dir_if_not_exists(self.save_directory)\n\n\t\tjoblib.dump(self.pca_fit, os.path.join(self.save_directory, self.pca_save_name))\n\t\tjoblib.dump(self.kmeans_fit, os.path.join(self.save_directory, self.kmeans_save_name))\n\t\tjoblib.dump(self.cah_fit, os.path.join(self.save_directory, self.cah_save_name))\n\n\tdef load(self):\n\t\tself.pca_fit = joblib.load(os.path.join(self.save_directory, self.pca_save_name))\n\t\tself.kmeans_fit = joblib.load(os.path.join(self.save_directory, self.kmeans_save_name))\n\t\tself.cah_fit = joblib.load(os.path.join(self.save_directory, self.cah_save_name))\n\n\n" ]
[ [ "sklearn.cluster.KMeans", "sklearn.metrics.silhouette_samples", "numpy.unique", "sklearn.manifold.TSNE", "numpy.mean", "sklearn.cluster.AgglomerativeClustering", "numpy.array", "numpy.where", "sklearn.decomposition.PCA" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mark-rtb/TensorflowTTS
[ "9999bbc39de6e5b7e5ef9aac25c5256c2bf77051" ]
[ "utils/config_manager.py" ]
[ "import subprocess\nimport shutil\nfrom pathlib import Path\n\nimport numpy as np\nimport tensorflow as tf\nimport ruamel.yaml\n\nfrom model.models import AutoregressiveTransformer, ForwardTransformer\nfrom utils.scheduling import piecewise_linear_schedule, reduction_schedule\n\n\nclass ConfigManager:\n \n def __init__(self, config_path: str, model_kind: str, session_name: str = None):\n if model_kind not in ['autoregressive', 'forward']:\n raise TypeError(f\"model_kind must be in {['autoregressive', 'forward']}\")\n self.config_path = Path(config_path)\n self.model_kind = model_kind\n self.yaml = ruamel.yaml.YAML()\n self.config, self.data_config, self.model_config = self._load_config()\n self.git_hash = self._get_git_hash()\n if session_name is None:\n if self.config['session_name'] is None:\n session_name = self.git_hash\n self.session_name = '_'.join(filter(None, [self.config_path.name, session_name]))\n self.base_dir, self.log_dir, self.train_datadir, self.weights_dir = self._make_folder_paths()\n self.learning_rate = np.array(self.config['learning_rate_schedule'])[0, 1].astype(np.float32)\n if model_kind == 'autoregressive':\n self.max_r = np.array(self.config['reduction_factor_schedule'])[0, 1].astype(np.int32)\n self.stop_scaling = self.config.get('stop_loss_scaling', 1.)\n \n def _load_config(self):\n with open(str(self.config_path / 'data_config.yaml'), 'rb') as data_yaml:\n data_config = self.yaml.load(data_yaml)\n with open(str(self.config_path / f'{self.model_kind}_config.yaml'), 'rb') as model_yaml:\n model_config = self.yaml.load(model_yaml)\n all_config = {}\n all_config.update(model_config)\n all_config.update(data_config)\n return all_config, data_config, model_config\n \n @staticmethod\n def _get_git_hash():\n try:\n return subprocess.check_output([\"git\", \"describe\", \"--always\"]).strip().decode()\n except Exception as e:\n print(f\"WARNING: could not retrieve git hash. {e}\")\n \n def _check_hash(self):\n try:\n git_hash = subprocess.check_output([\"git\", \"describe\", \"--always\"]).strip().decode()\n if self.config['git_hash'] != git_hash:\n print(f\"WARNING: git hash mismatch. Current: {git_hash}. Config hash: {self.config['git_hash']}\")\n except Exception as e:\n print(f\"WARNING: could not check git hash. {e}\")\n \n def _make_folder_paths(self):\n base_dir = Path(self.config['log_directory']) / self.session_name\n log_dir = base_dir / f'{self.model_kind}_logs'\n weights_dir = base_dir / f'{self.model_kind}_weights'\n train_datadir = self.config['train_data_directory']\n if train_datadir is None:\n train_datadir = self.config['data_directory']\n train_datadir = Path(train_datadir)\n return base_dir, log_dir, train_datadir, weights_dir\n \n @staticmethod\n def _print_dict_values(values, key_name, level=0, tab_size=2):\n tab = level * tab_size * ' '\n print(tab + '-', key_name, ':', values)\n \n def _print_dictionary(self, dictionary, recursion_level=0):\n for key in dictionary.keys():\n if isinstance(key, dict):\n recursion_level += 1\n self._print_dictionary(dictionary[key], recursion_level)\n else:\n self._print_dict_values(dictionary[key], key_name=key, level=recursion_level)\n \n def print_config(self):\n print('\\nCONFIGURATION', self.session_name)\n self._print_dictionary(self.config)\n \n def update_config(self):\n self.config['git_hash'] = self.git_hash\n self.model_config['git_hash'] = self.git_hash\n self.data_config['session_name'] = self.session_name\n self.model_config['session_name'] = self.session_name\n self.config['session_name'] = self.session_name\n \n def get_model(self, ignore_hash=False):\n if not ignore_hash:\n self._check_hash()\n if self.model_kind == 'autoregressive':\n return AutoregressiveTransformer(mel_channels=self.config['mel_channels'],\n encoder_model_dimension=self.config['encoder_model_dimension'],\n decoder_model_dimension=self.config['decoder_model_dimension'],\n encoder_num_heads=self.config['encoder_num_heads'],\n decoder_num_heads=self.config['decoder_num_heads'],\n encoder_feed_forward_dimension=self.config[\n 'encoder_feed_forward_dimension'],\n decoder_feed_forward_dimension=self.config[\n 'decoder_feed_forward_dimension'],\n encoder_maximum_position_encoding=self.config[\n 'encoder_max_position_encoding'],\n decoder_maximum_position_encoding=self.config[\n 'decoder_max_position_encoding'],\n encoder_dense_blocks=self.config['encoder_dense_blocks'],\n decoder_dense_blocks=self.config['decoder_dense_blocks'],\n decoder_prenet_dimension=self.config['decoder_prenet_dimension'],\n encoder_prenet_dimension=self.config['encoder_prenet_dimension'],\n postnet_conv_filters=self.config['postnet_conv_filters'],\n postnet_conv_layers=self.config['postnet_conv_layers'],\n postnet_kernel_size=self.config['postnet_kernel_size'],\n dropout_rate=self.config['dropout_rate'],\n max_r=self.max_r,\n mel_start_value=self.config['mel_start_value'],\n mel_end_value=self.config['mel_end_value'],\n phoneme_language=self.config['phoneme_language'],\n debug=self.config['debug'])\n \n else:\n return ForwardTransformer(encoder_model_dimension=self.config['encoder_model_dimension'],\n decoder_model_dimension=self.config['decoder_model_dimension'],\n dropout_rate=self.config['dropout_rate'],\n decoder_num_heads=self.config['decoder_num_heads'],\n encoder_num_heads=self.config['encoder_num_heads'],\n encoder_maximum_position_encoding=self.config['encoder_max_position_encoding'],\n decoder_maximum_position_encoding=self.config['decoder_max_position_encoding'],\n encoder_feed_forward_dimension=self.config['encoder_feed_forward_dimension'],\n decoder_feed_forward_dimension=self.config['decoder_feed_forward_dimension'],\n encoder_attention_conv_filters=self.config[\n 'encoder_attention_conv_filters'],\n decoder_attention_conv_filters=self.config[\n 'decoder_attention_conv_filters'],\n encoder_attention_conv_kernel=self.config['encoder_attention_conv_kernel'],\n decoder_attention_conv_kernel=self.config['decoder_attention_conv_kernel'],\n mel_channels=self.config['mel_channels'],\n postnet_conv_filters=self.config['postnet_conv_filters'],\n postnet_conv_layers=self.config['postnet_conv_layers'],\n postnet_kernel_size=self.config['postnet_kernel_size'],\n encoder_dense_blocks=self.config['encoder_dense_blocks'],\n decoder_dense_blocks=self.config['decoder_dense_blocks'],\n phoneme_language=self.config['phoneme_language'],\n debug=self.config['debug'])\n \n def compile_model(self, model):\n if self.model_kind == 'autoregressive':\n model._compile(stop_scaling=self.stop_scaling, optimizer=self.new_adam(self.learning_rate))\n else:\n model._compile(optimizer=self.new_adam(self.learning_rate))\n \n # TODO: move to model\n @staticmethod\n def new_adam(learning_rate):\n return tf.keras.optimizers.Adam(learning_rate,\n beta_1=0.9,\n beta_2=0.98,\n epsilon=1e-9)\n \n def dump_config(self):\n self.update_config()\n with open(self.base_dir / f'{self.model_kind}_config.yaml', 'w') as model_yaml:\n self.yaml.dump(self.model_config, model_yaml)\n with open(self.base_dir / 'data_config.yaml', 'w') as data_yaml:\n self.yaml.dump(self.data_config, data_yaml)\n \n def create_remove_dirs(self, clear_dir: False, clear_logs: False, clear_weights: False):\n self.base_dir.mkdir(exist_ok=True)\n if clear_dir:\n delete = input(f'Delete {self.log_dir} AND {self.weights_dir}? (y/[n])')\n if delete == 'y':\n shutil.rmtree(self.log_dir, ignore_errors=True)\n shutil.rmtree(self.weights_dir, ignore_errors=True)\n if clear_logs:\n delete = input(f'Delete {self.log_dir}? (y/[n])')\n if delete == 'y':\n shutil.rmtree(self.log_dir, ignore_errors=True)\n if clear_weights:\n delete = input(f'Delete {self.weights_dir}? (y/[n])')\n if delete == 'y':\n shutil.rmtree(self.weights_dir, ignore_errors=True)\n self.log_dir.mkdir(exist_ok=True)\n self.weights_dir.mkdir(exist_ok=True)\n \n def load_model(self, checkpoint_path: str = None, verbose=True):\n model = self.get_model()\n self.compile_model(model)\n ckpt = tf.train.Checkpoint(net=model)\n manager = tf.train.CheckpointManager(ckpt, self.weights_dir,\n max_to_keep=None)\n if checkpoint_path:\n ckpt.restore(checkpoint_path)\n if verbose:\n print(f'restored weights from {checkpoint_path} at step {model.step}')\n else:\n if manager.latest_checkpoint is None:\n print(f'WARNING: could not find weights file. Trying to load from \\n {self.weights_dir}.')\n print('Edit data_config.yaml to point at the right log directory.')\n ckpt.restore(manager.latest_checkpoint)\n if verbose:\n print(f'restored weights from {manager.latest_checkpoint} at step {model.step}')\n decoder_prenet_dropout = piecewise_linear_schedule(model.step, self.config['decoder_prenet_dropout_schedule'])\n reduction_factor = None\n if self.model_kind == 'autoregressive':\n reduction_factor = reduction_schedule(model.step, self.config['reduction_factor_schedule'])\n model.set_constants(reduction_factor=reduction_factor, decoder_prenet_dropout=decoder_prenet_dropout)\n return model\n" ]
[ [ "tensorflow.train.Checkpoint", "numpy.array", "tensorflow.keras.optimizers.Adam", "tensorflow.train.CheckpointManager" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jaymedina/notebooks
[ "48b982081dfc6df83cb8d9170568149a2dd021cc" ]
[ "notebooks/COS/AsnFile/test_asn.py" ]
[ "#!/usr/bin/env python\n#%%[markdown]\n### From here, you can run the `calcos` pipeline on your new association file.\n##### Running `calcos` is explained in *much* more detail in our [Notebook on running the pipeline](https://github.com/spacetelescope/notebooks/blob/master/notebooks/COS/CalCOS/CalCOS.ipynb)\n\n##### In short, to run the `calcos` pipeline, you will need the relavent reference files. These will need be hosted in the directory assigned the environmetn variable `lref`. \n##### No matter *where* you place these files, you *must* create the lref environment variable.\n# %%\nimport os, shutil\nimport calcos\nfrom astropy.table import Table\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\n# %%\ndatadir = Path('./data/')\noutputdir = Path('./output/')\nplotsdir = Path('./output/plots/')\n# %%\n######### SETTING THE lref VARIABLE:\n### YOU LIKELY NEED TO CHANGE THIS LOCATION !\nwhere_i_keep_my_ref_files = \"/grp/hst/cdbs/lref/\"\nos.environ['lref'] = where_i_keep_my_ref_files\n\nprocessed_data_tab = Table.read(str(outputdir/'calcos_processed_1/')+'/ldifcombo_x1dsum.fits')\nfor segment in processed_data_tab:\n wvln, flux = segment[\"WAVELENGTH\", \"FLUX\"]\n plt.plot(wvln, flux)\n \nplt.xlabel('Wavelength [$\\AA$]')\nplt.ylabel('Flux [ergs/s/$cm^2$/$\\AA$]')\n\nplt.title(\"If this graph looks reasonable, your ASN file seems to have worked\\n\")\nplt.tight_layout()\nplt.savefig(str(plotsdir/\"AsnFile_test.png\"), bbox_inches = 'tight', dpi = 200)\n# %%\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TheNeuralBit/google-cloud-python
[ "226cdf12f5dd69afb0ef665bb9e897d32d56f4b6" ]
[ "bigquery/tests/unit/test__pandas_helpers.py" ]
[ "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nimport decimal\nimport functools\nimport warnings\n\ntry:\n import pandas\nexcept ImportError: # pragma: NO COVER\n pandas = None\ntry:\n import pyarrow\n import pyarrow.types\nexcept ImportError: # pragma: NO COVER\n pyarrow = None\nimport pytest\nimport pytz\n\nfrom google.cloud.bigquery import schema\n\n\[email protected]\ndef module_under_test():\n from google.cloud.bigquery import _pandas_helpers\n\n return _pandas_helpers\n\n\ndef is_none(value):\n return value is None\n\n\ndef is_datetime(type_):\n # See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#datetime-type\n return all_(\n pyarrow.types.is_timestamp,\n lambda type_: type_.unit == \"us\",\n lambda type_: type_.tz is None,\n )(type_)\n\n\ndef is_numeric(type_):\n # See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type\n return all_(\n pyarrow.types.is_decimal,\n lambda type_: type_.precision == 38,\n lambda type_: type_.scale == 9,\n )(type_)\n\n\ndef is_timestamp(type_):\n # See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type\n return all_(\n pyarrow.types.is_timestamp,\n lambda type_: type_.unit == \"us\",\n lambda type_: type_.tz == \"UTC\",\n )(type_)\n\n\ndef do_all(functions, value):\n return all((func(value) for func in functions))\n\n\ndef all_(*functions):\n return functools.partial(do_all, functions)\n\n\[email protected](pyarrow is None, \"Requires `pyarrow`\")\ndef test_is_datetime():\n assert is_datetime(pyarrow.timestamp(\"us\", tz=None))\n assert not is_datetime(pyarrow.timestamp(\"ms\", tz=None))\n assert not is_datetime(pyarrow.timestamp(\"us\", tz=\"UTC\"))\n assert not is_datetime(pyarrow.string())\n\n\ndef test_do_all():\n assert do_all((lambda _: True, lambda _: True), None)\n assert not do_all((lambda _: True, lambda _: False), None)\n assert not do_all((lambda _: False,), None)\n\n\ndef test_all_():\n assert all_(lambda _: True, lambda _: True)(None)\n assert not all_(lambda _: True, lambda _: False)(None)\n\n\[email protected](\n \"bq_type,bq_mode,is_correct_type\",\n [\n (\"STRING\", \"NULLABLE\", pyarrow.types.is_string),\n (\"STRING\", None, pyarrow.types.is_string),\n (\"string\", \"NULLABLE\", pyarrow.types.is_string),\n (\"StRiNg\", \"NULLABLE\", pyarrow.types.is_string),\n (\"BYTES\", \"NULLABLE\", pyarrow.types.is_binary),\n (\"INTEGER\", \"NULLABLE\", pyarrow.types.is_int64),\n (\"INT64\", \"NULLABLE\", pyarrow.types.is_int64),\n (\"FLOAT\", \"NULLABLE\", pyarrow.types.is_float64),\n (\"FLOAT64\", \"NULLABLE\", pyarrow.types.is_float64),\n (\"NUMERIC\", \"NULLABLE\", is_numeric),\n (\"BOOLEAN\", \"NULLABLE\", pyarrow.types.is_boolean),\n (\"BOOL\", \"NULLABLE\", pyarrow.types.is_boolean),\n (\"TIMESTAMP\", \"NULLABLE\", is_timestamp),\n (\"DATE\", \"NULLABLE\", pyarrow.types.is_date32),\n (\"TIME\", \"NULLABLE\", pyarrow.types.is_time64),\n (\"DATETIME\", \"NULLABLE\", is_datetime),\n (\"GEOGRAPHY\", \"NULLABLE\", pyarrow.types.is_string),\n (\"UNKNOWN_TYPE\", \"NULLABLE\", is_none),\n # Use pyarrow.list_(item_type) for repeated (array) fields.\n (\n \"STRING\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_string(type_.value_type),\n ),\n ),\n (\n \"STRING\",\n \"repeated\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_string(type_.value_type),\n ),\n ),\n (\n \"STRING\",\n \"RePeAtEd\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_string(type_.value_type),\n ),\n ),\n (\n \"BYTES\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_binary(type_.value_type),\n ),\n ),\n (\n \"INTEGER\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_int64(type_.value_type),\n ),\n ),\n (\n \"INT64\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_int64(type_.value_type),\n ),\n ),\n (\n \"FLOAT\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_float64(type_.value_type),\n ),\n ),\n (\n \"FLOAT64\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_float64(type_.value_type),\n ),\n ),\n (\n \"NUMERIC\",\n \"REPEATED\",\n all_(pyarrow.types.is_list, lambda type_: is_numeric(type_.value_type)),\n ),\n (\n \"BOOLEAN\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_boolean(type_.value_type),\n ),\n ),\n (\n \"BOOL\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_boolean(type_.value_type),\n ),\n ),\n (\n \"TIMESTAMP\",\n \"REPEATED\",\n all_(pyarrow.types.is_list, lambda type_: is_timestamp(type_.value_type)),\n ),\n (\n \"DATE\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_date32(type_.value_type),\n ),\n ),\n (\n \"TIME\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_time64(type_.value_type),\n ),\n ),\n (\n \"DATETIME\",\n \"REPEATED\",\n all_(pyarrow.types.is_list, lambda type_: is_datetime(type_.value_type)),\n ),\n (\n \"GEOGRAPHY\",\n \"REPEATED\",\n all_(\n pyarrow.types.is_list,\n lambda type_: pyarrow.types.is_string(type_.value_type),\n ),\n ),\n (\"RECORD\", \"REPEATED\", is_none),\n (\"UNKNOWN_TYPE\", \"REPEATED\", is_none),\n ],\n)\[email protected](pyarrow is None, \"Requires `pyarrow`\")\ndef test_bq_to_arrow_data_type(module_under_test, bq_type, bq_mode, is_correct_type):\n field = schema.SchemaField(\"ignored_name\", bq_type, mode=bq_mode)\n actual = module_under_test.bq_to_arrow_data_type(field)\n assert is_correct_type(actual)\n\n\[email protected](\"bq_type\", [\"RECORD\", \"record\", \"STRUCT\", \"struct\"])\[email protected](pyarrow is None, \"Requires `pyarrow`\")\ndef test_bq_to_arrow_data_type_w_struct(module_under_test, bq_type):\n fields = (\n schema.SchemaField(\"field01\", \"STRING\"),\n schema.SchemaField(\"field02\", \"BYTES\"),\n schema.SchemaField(\"field03\", \"INTEGER\"),\n schema.SchemaField(\"field04\", \"INT64\"),\n schema.SchemaField(\"field05\", \"FLOAT\"),\n schema.SchemaField(\"field06\", \"FLOAT64\"),\n schema.SchemaField(\"field07\", \"NUMERIC\"),\n schema.SchemaField(\"field08\", \"BOOLEAN\"),\n schema.SchemaField(\"field09\", \"BOOL\"),\n schema.SchemaField(\"field10\", \"TIMESTAMP\"),\n schema.SchemaField(\"field11\", \"DATE\"),\n schema.SchemaField(\"field12\", \"TIME\"),\n schema.SchemaField(\"field13\", \"DATETIME\"),\n schema.SchemaField(\"field14\", \"GEOGRAPHY\"),\n )\n field = schema.SchemaField(\"ignored_name\", bq_type, mode=\"NULLABLE\", fields=fields)\n actual = module_under_test.bq_to_arrow_data_type(field)\n expected = pyarrow.struct(\n (\n pyarrow.field(\"field01\", pyarrow.string()),\n pyarrow.field(\"field02\", pyarrow.binary()),\n pyarrow.field(\"field03\", pyarrow.int64()),\n pyarrow.field(\"field04\", pyarrow.int64()),\n pyarrow.field(\"field05\", pyarrow.float64()),\n pyarrow.field(\"field06\", pyarrow.float64()),\n pyarrow.field(\"field07\", module_under_test.pyarrow_numeric()),\n pyarrow.field(\"field08\", pyarrow.bool_()),\n pyarrow.field(\"field09\", pyarrow.bool_()),\n pyarrow.field(\"field10\", module_under_test.pyarrow_timestamp()),\n pyarrow.field(\"field11\", pyarrow.date32()),\n pyarrow.field(\"field12\", module_under_test.pyarrow_time()),\n pyarrow.field(\"field13\", module_under_test.pyarrow_datetime()),\n pyarrow.field(\"field14\", pyarrow.string()),\n )\n )\n assert pyarrow.types.is_struct(actual)\n assert actual.num_children == len(fields)\n assert actual.equals(expected)\n\n\[email protected](pyarrow is None, \"Requires `pyarrow`\")\ndef test_bq_to_arrow_data_type_w_struct_unknown_subfield(module_under_test):\n fields = (\n schema.SchemaField(\"field1\", \"STRING\"),\n schema.SchemaField(\"field2\", \"INTEGER\"),\n # Don't know what to convert UNKNOWN_TYPE to, let type inference work,\n # instead.\n schema.SchemaField(\"field3\", \"UNKNOWN_TYPE\"),\n )\n field = schema.SchemaField(\"ignored_name\", \"RECORD\", mode=\"NULLABLE\", fields=fields)\n actual = module_under_test.bq_to_arrow_data_type(field)\n assert actual is None\n\n\[email protected](\n \"bq_type,rows\",\n [\n (\"STRING\", [\"abc\", None, \"def\", None]),\n (\"BYTES\", [b\"abc\", None, b\"def\", None]),\n (\"INTEGER\", [123, None, 456, None]),\n (\"INT64\", [-9223372036854775808, None, 9223372036854775807, 123]),\n (\"FLOAT\", [1.25, None, 3.5, None]),\n (\n \"NUMERIC\",\n [\n decimal.Decimal(\"-99999999999999999999999999999.999999999\"),\n None,\n decimal.Decimal(\"99999999999999999999999999999.999999999\"),\n decimal.Decimal(\"999.123456789\"),\n ],\n ),\n (\"BOOLEAN\", [True, None, False, None]),\n (\"BOOL\", [False, None, True, None]),\n # TODO: Once https://issues.apache.org/jira/browse/ARROW-5450 is\n # resolved, test with TIMESTAMP column. Conversion from pyarrow\n # TimestampArray to list of Python objects fails with OverflowError:\n # Python int too large to convert to C long.\n #\n # (\n # \"TIMESTAMP\",\n # [\n # datetime.datetime(1, 1, 1, 0, 0, 0, tzinfo=pytz.utc),\n # None,\n # datetime.datetime(9999, 12, 31, 23, 59, 59, 999999, tzinfo=pytz.utc),\n # datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=pytz.utc),\n # ],\n # ),\n (\n \"DATE\",\n [\n datetime.date(1, 1, 1),\n None,\n datetime.date(9999, 12, 31),\n datetime.date(1970, 1, 1),\n ],\n ),\n (\n \"TIME\",\n [\n datetime.time(0, 0, 0),\n None,\n datetime.time(23, 59, 59, 999999),\n datetime.time(12, 0, 0),\n ],\n ),\n # TODO: Once https://issues.apache.org/jira/browse/ARROW-5450 is\n # resolved, test with DATETIME column. Conversion from pyarrow\n # TimestampArray to list of Python objects fails with OverflowError:\n # Python int too large to convert to C long.\n #\n # (\n # \"DATETIME\",\n # [\n # datetime.datetime(1, 1, 1, 0, 0, 0),\n # None,\n # datetime.datetime(9999, 12, 31, 23, 59, 59, 999999),\n # datetime.datetime(1970, 1, 1, 0, 0, 0),\n # ],\n # ),\n (\n \"GEOGRAPHY\",\n [\n \"POINT(30 10)\",\n None,\n \"LINESTRING (30 10, 10 30, 40 40)\",\n \"POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))\",\n ],\n ),\n ],\n)\[email protected](pandas is None, \"Requires `pandas`\")\[email protected](pyarrow is None, \"Requires `pyarrow`\")\ndef test_bq_to_arrow_array_w_nullable_scalars(module_under_test, bq_type, rows):\n series = pandas.Series(rows, dtype=\"object\")\n bq_field = schema.SchemaField(\"field_name\", bq_type)\n arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)\n roundtrip = arrow_array.to_pylist()\n assert rows == roundtrip\n\n\[email protected](pandas is None, \"Requires `pandas`\")\[email protected](pyarrow is None, \"Requires `pyarrow`\")\ndef test_bq_to_arrow_array_w_arrays(module_under_test):\n rows = [[1, 2, 3], [], [4, 5, 6]]\n series = pandas.Series(rows, dtype=\"object\")\n bq_field = schema.SchemaField(\"field_name\", \"INTEGER\", mode=\"REPEATED\")\n arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)\n roundtrip = arrow_array.to_pylist()\n assert rows == roundtrip\n\n\[email protected](\"bq_type\", [\"RECORD\", \"record\", \"STRUCT\", \"struct\"])\[email protected](pandas is None, \"Requires `pandas`\")\[email protected](pyarrow is None, \"Requires `pyarrow`\")\ndef test_bq_to_arrow_array_w_structs(module_under_test, bq_type):\n rows = [\n {\"int_col\": 123, \"string_col\": \"abc\"},\n None,\n {\"int_col\": 456, \"string_col\": \"def\"},\n ]\n series = pandas.Series(rows, dtype=\"object\")\n bq_field = schema.SchemaField(\n \"field_name\",\n bq_type,\n fields=(\n schema.SchemaField(\"int_col\", \"INTEGER\"),\n schema.SchemaField(\"string_col\", \"STRING\"),\n ),\n )\n arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)\n roundtrip = arrow_array.to_pylist()\n assert rows == roundtrip\n\n\[email protected](pandas is None, \"Requires `pandas`\")\[email protected](pyarrow is None, \"Requires `pyarrow`\")\ndef test_bq_to_arrow_array_w_special_floats(module_under_test):\n bq_field = schema.SchemaField(\"field_name\", \"FLOAT64\")\n rows = [float(\"-inf\"), float(\"nan\"), float(\"inf\"), None]\n series = pandas.Series(rows, dtype=\"object\")\n arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)\n roundtrip = arrow_array.to_pylist()\n assert len(rows) == len(roundtrip)\n assert roundtrip[0] == float(\"-inf\")\n assert roundtrip[1] != roundtrip[1] # NaN doesn't equal itself.\n assert roundtrip[2] == float(\"inf\")\n assert roundtrip[3] is None\n\n\[email protected](pandas is None, \"Requires `pandas`\")\[email protected](pyarrow is None, \"Requires `pyarrow`\")\ndef test_dataframe_to_arrow_w_required_fields(module_under_test):\n bq_schema = (\n schema.SchemaField(\"field01\", \"STRING\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field02\", \"BYTES\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field03\", \"INTEGER\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field04\", \"INT64\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field05\", \"FLOAT\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field06\", \"FLOAT64\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field07\", \"NUMERIC\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field08\", \"BOOLEAN\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field09\", \"BOOL\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field10\", \"TIMESTAMP\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field11\", \"DATE\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field12\", \"TIME\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field13\", \"DATETIME\", mode=\"REQUIRED\"),\n schema.SchemaField(\"field14\", \"GEOGRAPHY\", mode=\"REQUIRED\"),\n )\n dataframe = pandas.DataFrame(\n {\n \"field01\": [\"hello\", \"world\"],\n \"field02\": [b\"abd\", b\"efg\"],\n \"field03\": [1, 2],\n \"field04\": [3, 4],\n \"field05\": [1.25, 9.75],\n \"field06\": [-1.75, -3.5],\n \"field07\": [decimal.Decimal(\"1.2345\"), decimal.Decimal(\"6.7891\")],\n \"field08\": [True, False],\n \"field09\": [False, True],\n \"field10\": [\n datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=pytz.utc),\n datetime.datetime(2012, 12, 21, 9, 7, 42, tzinfo=pytz.utc),\n ],\n \"field11\": [datetime.date(9999, 12, 31), datetime.date(1970, 1, 1)],\n \"field12\": [datetime.time(23, 59, 59, 999999), datetime.time(12, 0, 0)],\n \"field13\": [\n datetime.datetime(1970, 1, 1, 0, 0, 0),\n datetime.datetime(2012, 12, 21, 9, 7, 42),\n ],\n \"field14\": [\n \"POINT(30 10)\",\n \"POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))\",\n ],\n }\n )\n\n arrow_table = module_under_test.dataframe_to_arrow(dataframe, bq_schema)\n arrow_schema = arrow_table.schema\n\n assert len(arrow_schema) == len(bq_schema)\n for arrow_field in arrow_schema:\n assert not arrow_field.nullable\n\n\[email protected](pandas is None, \"Requires `pandas`\")\[email protected](pyarrow is None, \"Requires `pyarrow`\")\ndef test_dataframe_to_arrow_w_unknown_type(module_under_test):\n bq_schema = (\n schema.SchemaField(\"field00\", \"UNKNOWN_TYPE\"),\n schema.SchemaField(\"field01\", \"STRING\"),\n schema.SchemaField(\"field02\", \"BYTES\"),\n schema.SchemaField(\"field03\", \"INTEGER\"),\n )\n dataframe = pandas.DataFrame(\n {\n \"field00\": [\"whoami\", \"whatami\"],\n \"field01\": [\"hello\", \"world\"],\n \"field02\": [b\"abd\", b\"efg\"],\n \"field03\": [1, 2],\n }\n )\n\n with warnings.catch_warnings(record=True) as warned:\n arrow_table = module_under_test.dataframe_to_arrow(dataframe, bq_schema)\n arrow_schema = arrow_table.schema\n\n assert len(warned) == 1\n warning = warned[0]\n assert \"field00\" in str(warning)\n\n assert len(arrow_schema) == len(bq_schema)\n assert arrow_schema[0].name == \"field00\"\n assert arrow_schema[1].name == \"field01\"\n assert arrow_schema[2].name == \"field02\"\n assert arrow_schema[3].name == \"field03\"\n\n\[email protected](pandas is None, \"Requires `pandas`\")\ndef test_dataframe_to_parquet_without_pyarrow(module_under_test, monkeypatch):\n monkeypatch.setattr(module_under_test, \"pyarrow\", None)\n with pytest.raises(ValueError) as exc:\n module_under_test.dataframe_to_parquet(pandas.DataFrame(), (), None)\n assert \"pyarrow is required\" in str(exc)\n\n\[email protected](pandas is None, \"Requires `pandas`\")\[email protected](pyarrow is None, \"Requires `pyarrow`\")\ndef test_dataframe_to_parquet_w_missing_columns(module_under_test, monkeypatch):\n with pytest.raises(ValueError) as exc:\n module_under_test.dataframe_to_parquet(\n pandas.DataFrame(), (schema.SchemaField(\"not_found\", \"STRING\"),), None\n )\n assert \"columns in schema must match\" in str(exc)\n" ]
[ [ "pandas.Series", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
connesy/hep_ml
[ "41e97d598e621ce323a92a607625213ef9d45a36" ]
[ "hep_ml/losses.py" ]
[ "\"\"\"\n**hep_ml.losses** contains different loss functions to use in gradient boosting.\n\nApart from standard classification losses, **hep_ml** contains losses for uniform classification\n(see :class:`BinFlatnessLossFunction`, :class:`KnnFlatnessLossFunction`, :class:`KnnAdaLossFunction`)\nand for ranking (see :class:`RankBoostLossFunction`)\n\n**Interface**\n\nLoss functions inside **hep_ml** are stateful estimators and require initial fitting,\nwhich is done automatically inside gradient boosting.\n\nAll loss function should be derived from AbstractLossFunction and implement this interface.\n\n\nExamples\n________\n\nTraining gradient boosting, optimizing LogLoss and using all features\n\n>>> from hep_ml.gradientboosting import UGradientBoostingClassifier, LogLossFunction\n>>> classifier = UGradientBoostingClassifier(loss=LogLossFunction(), n_estimators=100)\n>>> classifier.fit(X, y, sample_weight=sample_weight)\n\nUsing composite loss function and subsampling:\n\n>>> loss = CompositeLossFunction()\n>>> classifier = UGradientBoostingClassifier(loss=loss, subsample=0.5)\n\nTo get uniform predictions in mass in background (note that mass should not present in features):\n\n>>> loss = BinFlatnessLossFunction(uniform_features=['mass'], uniform_label=0, train_features=['pt', 'flight_time'])\n>>> classifier = UGradientBoostingClassifier(loss=loss)\n\nTo get uniform predictions in both signal and background:\n\n>>> loss = BinFlatnessLossFunction(uniform_features=['mass'], uniform_label=[0, 1], train_features=['pt', 'flight_time'])\n>>> classifier = UGradientBoostingClassifier(loss=loss)\n\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\nimport numbers\nimport warnings\n\nimport numpy\nimport pandas\nfrom scipy import sparse\nfrom scipy.special import expit\nfrom sklearn.utils.validation import check_random_state\nfrom sklearn.base import BaseEstimator\n\nfrom .commonutils import compute_knn_indices_of_signal, check_sample_weight, check_uniform_label, weighted_quantile\nfrom .metrics_utils import bin_to_group_indices, compute_bin_indices, compute_group_weights, \\\n group_indices_to_groups_matrix\n\n__author__ = 'Alex Rogozhnikov'\n\n__all__ = [\n 'AbstractLossFunction',\n 'MSELossFunction',\n 'MAELossFunction',\n 'LogLossFunction',\n 'AdaLossFunction',\n 'CompositeLossFunction',\n 'BinFlatnessLossFunction',\n 'KnnFlatnessLossFunction',\n 'KnnAdaLossFunction',\n 'RankBoostLossFunction',\n 'ReweightLossFunction'\n]\n\n\ndef _compute_positions(y_pred, sample_weight):\n \"\"\"\n For each event computes it position among other events by prediction.\n position = (weighted) part of elements with lower predictions => position belongs to [0, 1]\n\n This function is very close to `scipy.stats.rankdata`, but supports weights.\n \"\"\"\n order = numpy.argsort(y_pred)\n ordered_weights = sample_weight[order]\n ordered_weights /= float(numpy.sum(ordered_weights))\n efficiencies = (numpy.cumsum(ordered_weights) - 0.5 * ordered_weights)\n return efficiencies[numpy.argsort(order)]\n\n\nclass AbstractLossFunction(BaseEstimator):\n \"\"\"\n This is base class for loss functions used in `hep_ml`.\n Main differences compared to `scikit-learn` loss functions:\n\n 1. losses are stateful, and may require fitting of training data before usage.\n 2. thus, when computing gradient, hessian, one shall provide predictions of all events.\n 3. losses are object that shall be passed as estimators to gradient boosting (see examples).\n 4. only two-class case is supported, and different classes may have different role and meaning.\n \"\"\"\n\n def fit(self, X, y, sample_weight):\n \"\"\" This method is optional, it is called before all the others.\n Heavy preprocessing should be done here.\"\"\"\n return self\n\n def __call__(self, y_pred):\n \"\"\"Compute loss function\n\n :param y_pred: contains predictions for all the events passed to `fit` method,\n moreover, the order should be the same\"\"\"\n raise NotImplementedError()\n\n def prepare_tree_params(self, y_pred):\n \"\"\"Prepares parameters for regression tree that minimizes MSE\n\n :param y_pred: contains predictions for all the events passed to `fit` method,\n moreover, the order should be the same\n :return: tuple (tree_target, tree_weight) with target and weight to be used in decision tree\n \"\"\"\n return self.negative_gradient(y_pred), numpy.ones(len(y_pred))\n\n def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred):\n \"\"\"Loss function can prepare better values for leaves by overriding this function\n\n :param terminal_regions: indices of terminal regions of each event.\n :param leaf_values: numpy.array, current mapping of leaf indices to prediction values.\n :param y_pred: predictions before adding new tree.\n :return: numpy.array with new prediction values for all leaves.\n \"\"\"\n return leaf_values\n\n def compute_optimal_step(self, y_pred):\n \"\"\"\n Compute optimal global step. This method is typically used to make optimal step\n before fitting trees to reduce variance.\n\n :param y_pred: initial predictions, numpy.array of shape [n_samples]\n :return: float\n \"\"\"\n return 0.\n\n\nclass HessianLossFunction(AbstractLossFunction):\n \"\"\"Loss function with diagonal hessian (or hessian, which can be approximated by diagonal),\n uses Newton-Raphson step to update trees. \"\"\"\n\n def __init__(self, regularization=5.):\n \"\"\"\n :param regularization: float, penalty for leaves with few events,\n corresponds roughly to the number of added events of both classes to each leaf.\n \"\"\"\n self.regularization = regularization\n\n def fit(self, X, y, sample_weight):\n self.regularization_ = self.regularization * numpy.mean(sample_weight)\n return self\n\n def hessian(self, y_pred):\n \"\"\" Returns diagonal of hessian matrix.\n :param y_pred: numpy.array of shape [n_samples] with events passed in the same order as in `fit`.\n :return: numpy.array of shape [n_sampels] with second derivatives with respect to each prediction.\n \"\"\"\n raise NotImplementedError('Override this method in loss function.')\n\n def prepare_tree_params(self, y_pred):\n grad = self.negative_gradient(y_pred)\n hess = self.hessian(y_pred) + 0.01\n return grad / hess, hess\n\n def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred):\n \"\"\" This expression comes from optimization of second-order approximation of loss function.\"\"\"\n gradients = self.negative_gradient(y_pred)\n hessians = self.hessian(y_pred)\n return HessianLossFunction._prepare_hessian_leaves_values(\n terminal_regions=terminal_regions, leaf_values=leaf_values,\n gradients=gradients, hessians=hessians, regularization_=self.regularization_\n )\n\n @staticmethod\n def _prepare_hessian_leaves_values(terminal_regions, leaf_values, gradients, hessians, regularization_):\n min_length = len(leaf_values)\n nominators = numpy.bincount(terminal_regions, weights=gradients, minlength=min_length)\n denominators = numpy.bincount(terminal_regions, weights=hessians, minlength=min_length)\n return nominators / (denominators + regularization_)\n\n def compute_optimal_step(self, y_pred):\n \"\"\"\n Optimal step is computed using Newton-Raphson algorithm (10 iterations).\n :param y_pred: predictions (usually, zeros)\n :return: float\n \"\"\"\n terminal_regions = numpy.zeros(len(y_pred), dtype='int')\n leaf_values = numpy.zeros(shape=1)\n step = 0.\n for _ in range(10):\n step_ = self.prepare_new_leaves_values(terminal_regions, leaf_values=leaf_values, y_pred=y_pred + step)[0]\n step += 0.5 * step_\n return step\n\n\n# region Classification losses\n\nclass AdaLossFunction(HessianLossFunction):\n \"\"\" AdaLossFunction is the same as Exponential Loss Function (aka exploss) \"\"\"\n\n def fit(self, X, y, sample_weight):\n self.sample_weight = check_sample_weight(y, sample_weight=sample_weight,\n normalize=True, normalize_by_class=True)\n self.y_signed = numpy.array(2 * y - 1, dtype='float32')\n HessianLossFunction.fit(self, X, y, sample_weight=self.sample_weight)\n return self\n\n def __call__(self, y_pred):\n return numpy.sum(self.sample_weight * numpy.exp(- self.y_signed * y_pred))\n\n def negative_gradient(self, y_pred):\n return self.y_signed * self.sample_weight * numpy.exp(- self.y_signed * y_pred)\n\n def hessian(self, y_pred):\n return self.sample_weight * numpy.exp(- self.y_signed * y_pred)\n\n def prepare_tree_params(self, y_pred):\n return self.y_signed, self.hessian(y_pred)\n\n\nclass LogLossFunction(HessianLossFunction):\n \"\"\"Logistic loss function (logloss), aka binomial deviance, aka cross-entropy,\n aka log-likelihood loss.\n \"\"\"\n\n def fit(self, X, y, sample_weight):\n self.sample_weight = check_sample_weight(y, sample_weight=sample_weight,\n normalize=True, normalize_by_class=True)\n self.y_signed = 2 * y - 1\n self.minus_y_signed = - self.y_signed\n self.y_signed_times_weights = self.y_signed * self.sample_weight\n HessianLossFunction.fit(self, X, y, sample_weight=self.sample_weight)\n return self\n\n def __call__(self, y_pred):\n return numpy.sum(self.sample_weight * numpy.logaddexp(0, self.minus_y_signed * y_pred))\n\n def negative_gradient(self, y_pred):\n return self.y_signed_times_weights * expit(self.minus_y_signed * y_pred)\n\n def hessian(self, y_pred):\n expits = expit(y_pred)\n return self.sample_weight * expits * (1 - expits)\n\n def prepare_tree_params(self, y_pred):\n return self.y_signed * expit(self.minus_y_signed * y_pred), self.sample_weight\n\n\nclass CompositeLossFunction(HessianLossFunction):\n \"\"\"\n Composite loss function is defined as exploss for backgorund events and logloss for signal with proper constants.\n\n Such kind of loss functions is very useful to optimize AMS or in situations where very clean signal is expected.\n \"\"\"\n\n def fit(self, X, y, sample_weight):\n self.y = y\n self.sample_weight = check_sample_weight(y, sample_weight=sample_weight,\n normalize=True, normalize_by_class=True)\n self.y_signed = 2 * y - 1\n self.sig_w = (y == 1) * self.sample_weight\n self.bck_w = (y == 0) * self.sample_weight\n HessianLossFunction.fit(self, X, y, sample_weight=self.sample_weight)\n return self\n\n def __call__(self, y_pred):\n result = numpy.sum(self.sig_w * numpy.logaddexp(0, -y_pred))\n result += numpy.sum(self.bck_w * numpy.exp(0.5 * y_pred))\n return result\n\n def negative_gradient(self, y_pred):\n result = self.sig_w * expit(- y_pred)\n result -= 0.5 * self.bck_w * numpy.exp(0.5 * y_pred)\n return result\n\n def hessian(self, y_pred):\n expits = expit(y_pred)\n return self.sig_w * expits * (1 - expits) + self.bck_w * 0.25 * numpy.exp(0.5 * y_pred)\n\n\n# endregion\n\n# region Regression Losses\n\nclass MSELossFunction(HessianLossFunction):\n r\"\"\" Mean squared error loss function, used for regression.\n :math:`\\text{loss} = \\sum_i (y_i - \\hat{y}_i)^2`\n \"\"\"\n\n def fit(self, X, y, sample_weight):\n self.y = y\n self.sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True)\n HessianLossFunction.fit(self, X, y, sample_weight=sample_weight)\n return self\n\n def __call__(self, y_pred):\n return 0.5 * numpy.sum(self.sample_weight * (self.y - y_pred) ** 2)\n\n def negative_gradient(self, y_pred):\n return self.sample_weight * (self.y - y_pred)\n\n def hessian(self, y_pred):\n return self.sample_weight\n\n def prepare_tree_params(self, y_pred):\n return self.y - y_pred, self.sample_weight\n\n def compute_optimal_step(self, y_pred):\n return numpy.average(self.y - y_pred, weights=self.sample_weight)\n\n\nclass MAELossFunction(AbstractLossFunction):\n r\"\"\" Mean absolute error loss function, used for regression.\n :math:`\\text{loss} = \\sum_i |y_i - \\hat{y}_i|`\n \"\"\"\n\n def fit(self, X, y, sample_weight):\n self.y = y\n self.sample_weight = check_sample_weight(y, sample_weight=sample_weight, normalize=True)\n self._regularization = numpy.mean(self.sample_weight)\n return self\n\n def __call__(self, y_pred):\n return numpy.sum(self.sample_weight * numpy.abs(self.y - y_pred))\n\n def negative_gradient(self, y_pred):\n return self.sample_weight * numpy.sign(self.y - y_pred)\n\n def prepare_tree_params(self, y_pred):\n return numpy.sign(self.y - y_pred), self.sample_weight\n\n def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred):\n # computing weighted median is slow in python\n # and cannot be done in numpy without sorting\n nominators = numpy.bincount(terminal_regions, weights=self.negative_gradient(y_pred=y_pred),\n minlength=len(leaf_values))\n denominators = numpy.bincount(terminal_regions, weights=self.sample_weight, minlength=len(leaf_values))\n return 0.5 * nominators / (denominators + self._regularization)\n\n def compute_optimal_step(self, y_pred):\n return weighted_quantile(self.y - y_pred, quantiles=[0.5], sample_weight=self.sample_weight)[0]\n\n\n# endregion RegressionLosses\n\n\nclass RankBoostLossFunction(HessianLossFunction):\n def __init__(self, request_column, penalty_power=1., update_iterations=1):\n r\"\"\"RankBoostLossFunction is target of optimization in RankBoost [RB]_ algorithm,\n which was developed for ranking and introduces penalties for wrong order of predictions.\n\n However, this implementation goes further and there is selection of optimal leaf values based\n on iterative procedure. This implementation also uses matrix decomposition of loss function,\n which is very effective, when labels are from some very limited set (usually it is 0, 1, 2, 3, 4)\n\n :math:`\\text{loss} = \\sum_{ij} w_{ij} exp(pred_i - pred_j)`,\n\n :math:`w_{ij} = ( \\alpha + \\beta * [query_i = query_j]) R_{label_i, label_j}`, where\n :math:`R_{ij} = 0` if :math:`i \\leq j`, else :math:`R_{ij} = (i - j)^{p}`\n\n :param str request_column: name of column with search query ids. The higher attention is payed\n to samples with same query.\n :param float penalty_power: describes dependence of penalty on the difference between target labels.\n :param int update_iterations: number of minimization steps to provide optimal values in leaves.\n\n .. [RB] Y. Freund et al. An Efficient Boosting Algorithm for Combining Preferences\n \"\"\"\n self.update_iterations = update_iterations\n self.penalty_power = penalty_power\n self.request_column = request_column\n HessianLossFunction.__init__(self, regularization=0.1)\n\n def fit(self, X, y, sample_weight):\n self.queries = X[self.request_column]\n self.y = y\n self.possible_queries, normed_queries = numpy.unique(self.queries, return_inverse=True)\n self.possible_ranks, normed_ranks = numpy.unique(self.y, return_inverse=True)\n\n self.lookups = [normed_ranks, normed_queries * len(self.possible_ranks) + normed_ranks]\n self.minlengths = [len(self.possible_ranks), len(self.possible_ranks) * len(self.possible_queries)]\n self.rank_penalties = numpy.zeros([len(self.possible_ranks), len(self.possible_ranks)], dtype=float)\n for r1 in self.possible_ranks:\n for r2 in self.possible_ranks:\n if r1 < r2:\n self.rank_penalties[r1, r2] = (r2 - r1) ** self.penalty_power\n\n self.penalty_matrices = []\n self.penalty_matrices.append(self.rank_penalties / numpy.sqrt(1 + len(y)))\n n_queries = numpy.bincount(normed_queries)\n assert len(n_queries) == len(self.possible_queries)\n self.penalty_matrices.append(\n sparse.block_diag([self.rank_penalties * 1. / numpy.sqrt(1 + nq) for nq in n_queries]))\n HessianLossFunction.fit(self, X, y, sample_weight=sample_weight)\n\n def __call__(self, y_pred):\n y_pred -= y_pred.mean()\n pos_exponent = numpy.exp(y_pred)\n neg_exponent = numpy.exp(-y_pred)\n result = 0.\n for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices):\n pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length)\n neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length)\n result += pos_stats.T.dot(penalty_matrix.dot(neg_stats))\n return result\n\n def negative_gradient(self, y_pred):\n y_pred -= y_pred.mean()\n pos_exponent = numpy.exp(y_pred)\n neg_exponent = numpy.exp(-y_pred)\n gradient = numpy.zeros(len(y_pred), dtype=float)\n for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices):\n pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length)\n neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length)\n gradient += pos_exponent * penalty_matrix.dot(neg_stats)[lookup]\n gradient -= neg_exponent * penalty_matrix.T.dot(pos_stats)[lookup]\n return - gradient\n\n def hessian(self, y_pred):\n y_pred -= y_pred.mean()\n pos_exponent = numpy.exp(y_pred)\n neg_exponent = numpy.exp(-y_pred)\n result = numpy.zeros(len(y_pred), dtype=float)\n for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices):\n pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length)\n neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length)\n result += pos_exponent * penalty_matrix.dot(neg_stats)[lookup]\n result += neg_exponent * penalty_matrix.T.dot(pos_stats)[lookup]\n return result\n\n def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred):\n leaves_values = numpy.zeros(len(leaf_values))\n for _ in range(self.update_iterations):\n y_test = y_pred + leaves_values[terminal_regions]\n new_leaves_values = self._prepare_new_leaves_values(terminal_regions, leaves_values, y_test)\n leaves_values += 0.5 * new_leaves_values\n return leaves_values\n\n def _prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred):\n \"\"\"\n For each event we shall represent loss as w_plus * e^{pred} + w_minus * e^{-pred},\n then we are able to construct optimal step.\n Pay attention: this is not an optimal, since we are ignoring,\n that some events belong to the same leaf\n \"\"\"\n pos_exponent = numpy.exp(y_pred)\n neg_exponent = numpy.exp(-y_pred)\n w_plus = numpy.zeros(len(y_pred), dtype=float)\n w_minus = numpy.zeros(len(y_pred), dtype=float)\n\n for lookup, length, penalty_matrix in zip(self.lookups, self.minlengths, self.penalty_matrices):\n pos_stats = numpy.bincount(lookup, weights=pos_exponent, minlength=length)\n neg_stats = numpy.bincount(lookup, weights=neg_exponent, minlength=length)\n w_plus += penalty_matrix.dot(neg_stats)[lookup]\n w_minus += penalty_matrix.T.dot(pos_stats)[lookup]\n\n w_plus_leaf = numpy.bincount(terminal_regions, weights=w_plus * pos_exponent, minlength=len(leaf_values))\n w_minus_leaf = numpy.bincount(terminal_regions, weights=w_minus * neg_exponent, minlength=len(leaf_values))\n return 0.5 * numpy.log((w_minus_leaf + self.regularization) / (w_plus_leaf + self.regularization))\n\n\n# region MatrixLossFunction\n\n\nclass AbstractMatrixLossFunction(HessianLossFunction):\n def __init__(self, uniform_features, regularization=5.):\n r\"\"\"AbstractMatrixLossFunction is a base class to be inherited by other loss functions,\n which choose the particular A matrix and w vector. The formula of loss is:\n \\text{loss} = \\sum_i w_i * exp(- \\sum_j a_ij y_j score_j)\n \"\"\"\n self.uniform_features = uniform_features\n # real matrix and vector will be computed during fitting\n self.A = None\n self.A_t = None\n self.w = None\n HessianLossFunction.__init__(self, regularization=regularization)\n\n def fit(self, X, y, sample_weight):\n \"\"\"This method is used to compute A matrix and w based on train dataset\"\"\"\n assert len(X) == len(y), \"different size of arrays\"\n A, w = self.compute_parameters(X, y, sample_weight)\n self.A = sparse.csr_matrix(A)\n self.A_t = sparse.csr_matrix(self.A.transpose())\n self.A_t_sq = self.A_t.multiply(self.A_t)\n self.w = numpy.array(w)\n assert A.shape[0] == len(w), \"inconsistent sizes\"\n assert A.shape[1] == len(X), \"wrong size of matrix\"\n self.y_signed = numpy.array(2 * y - 1)\n HessianLossFunction.fit(self, X, y, sample_weight=sample_weight)\n return self\n\n def __call__(self, y_pred):\n \"\"\"Computing the loss itself\"\"\"\n assert len(y_pred) == self.A.shape[1], \"something is wrong with sizes\"\n exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred))\n return numpy.sum(self.w * exponents)\n\n def negative_gradient(self, y_pred):\n \"\"\"Computing negative gradient\"\"\"\n assert len(y_pred) == self.A.shape[1], \"something is wrong with sizes\"\n exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred))\n result = self.A_t.dot(self.w * exponents) * self.y_signed\n return result\n\n def hessian(self, y_pred):\n assert len(y_pred) == self.A.shape[1], 'something wrong with sizes'\n exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred))\n result = self.A_t_sq.dot(self.w * exponents)\n return result\n\n def compute_parameters(self, trainX, trainY, trainW):\n \"\"\"This method should be overloaded in descendant, and should return A, w (matrix and vector)\"\"\"\n raise NotImplementedError()\n\n def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred):\n exponents = numpy.exp(- self.A.dot(self.y_signed * y_pred))\n # current approach uses Newton-Raphson step\n regions_matrix = sparse.csc_matrix((self.y_signed, [numpy.arange(len(self.y_signed)), terminal_regions]),\n shape=[len(self.y_signed), len(leaf_values)])\n # Z is matrix of shape [n_exponents, n_terminal_regions]\n # with contributions of each terminal region to each exponent\n Z = self.A.dot(regions_matrix)\n Z = Z.T\n nominator = Z.dot(self.w * exponents)\n denominator = Z.multiply(Z).dot(self.w * exponents)\n return nominator / (denominator + 1e-5)\n\n\nclass KnnAdaLossFunction(AbstractMatrixLossFunction):\n def __init__(self, uniform_features, uniform_label, knn=10, row_norm=1.):\n r\"\"\"Modification of AdaLoss to achieve uniformity of predictions\n\n :math:`\\text{loss} = \\sum_i w_i * exp(- \\sum_j a_{ij} y_j score_j)`\n\n `A` matrix is square, each row corresponds to a single event in train dataset, in each row we put ones\n to the closest neighbours if this event from uniform class.\n See [BU]_ for details.\n\n :param list[str] uniform_features: the features, along which uniformity is desired\n :param int|list[int] uniform_label: the label (labels) of 'uniform classes'\n :param int knn: the number of nonzero elements in the row, corresponding to event in 'uniform class'\n\n .. [BU] A. Rogozhnikov et al, New approaches for boosting to uniformity\n http://arxiv.org/abs/1410.4140\n\n \"\"\"\n self.knn = knn\n self.row_norm = row_norm\n self.uniform_label = check_uniform_label(uniform_label)\n AbstractMatrixLossFunction.__init__(self, uniform_features)\n\n def compute_parameters(self, trainX, trainY, trainW):\n A_parts = []\n w_parts = []\n for label in self.uniform_label:\n label_mask = numpy.array(trainY == label)\n n_label = numpy.sum(label_mask)\n knn_indices = compute_knn_indices_of_signal(trainX[self.uniform_features], label_mask, self.knn)\n knn_indices = knn_indices[label_mask, :]\n ind_ptr = numpy.arange(0, n_label * self.knn + 1, self.knn)\n column_indices = knn_indices.flatten()\n data = numpy.ones(n_label * self.knn, dtype=float) * self.row_norm / self.knn\n A_part = sparse.csr_matrix((data, column_indices, ind_ptr), shape=[n_label, len(trainX)])\n w_part = numpy.mean(numpy.take(trainW, knn_indices), axis=1)\n assert A_part.shape[0] == len(w_part)\n A_parts.append(A_part)\n w_parts.append(w_part)\n\n for label in set(trainY) - set(self.uniform_label):\n label_mask = trainY == label\n n_label = numpy.sum(label_mask)\n ind_ptr = numpy.arange(0, n_label + 1)\n column_indices = numpy.where(label_mask)[0].flatten()\n data = numpy.ones(n_label, dtype=float) * self.row_norm\n A_part = sparse.csr_matrix((data, column_indices, ind_ptr), shape=[n_label, len(trainX)])\n w_part = trainW[label_mask]\n A_parts.append(A_part)\n w_parts.append(w_part)\n\n A = sparse.vstack(A_parts, format='csr', dtype=float)\n w = numpy.concatenate(w_parts)\n assert A.shape == (len(trainX), len(trainX))\n return A, w\n\n\n# endregion\n\n\n# region ReweightLossFunction\n\n\n# Mathematically at each stage we\n# 0. recompute weights\n# 1. normalize global ratio between distributions (negatives are in opposite distribution)\n# 2. optimize chi2- changing only sign, weights are the same\n# 3. computing optimal values for leaves: simply log (negatives are in the same distribution with sign -)\n\nclass ReweightLossFunction(AbstractLossFunction):\n def __init__(self, regularization=5.):\n \"\"\"\n Loss function used to reweight distributions. Works inside :class:`hep_ml.reweight.GBReweighter`\n See [Rew]_ for details.\n\n Conventions: :math:`y=0` - target distribution, :math:`y=1` - original distribution.\n\n Weights after look like:\n\n * :math:`w = w_0` for target distribution\n * :math:`w = w_0 * exp(pred)` for events from original distribution\n (so predictions for target distribution is ignored)\n\n :param float regularization: roughly, it's number of events added in each leaf to prevent overfitting.\n\n .. [Rew] http://arogozhnikov.github.io/2015/10/09/gradient-boosted-reweighter.html\n\n \"\"\"\n self.regularization = regularization\n\n def fit(self, X, y, sample_weight):\n assert numpy.all(numpy.in1d(y, [0, 1]))\n if sample_weight is None:\n self.sample_weight = numpy.ones(len(X), dtype=float)\n else:\n self.sample_weight = numpy.array(sample_weight, dtype=float)\n self.y = y\n # signs encounter transfer to opposite distribution\n self.signs = (2 * y - 1) * numpy.sign(sample_weight)\n\n self.mask_original = numpy.array(self.y)\n self.mask_target = numpy.array(1 - self.y)\n return self\n\n def _compute_weights(self, y_pred):\n \"\"\"We need renormalization at eac step\"\"\"\n weights = self.sample_weight * numpy.exp(self.y * y_pred)\n return check_sample_weight(self.y, weights, normalize=True, normalize_by_class=True)\n\n def __call__(self, *args, **kwargs):\n \"\"\" Loss function doesn't have precise expression \"\"\"\n return 0\n\n def negative_gradient(self, y_pred):\n return 0.\n\n def prepare_tree_params(self, y_pred):\n return self.signs, numpy.abs(self._compute_weights(y_pred))\n\n def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred):\n weights = self._compute_weights(y_pred)\n w_target = numpy.bincount(terminal_regions, weights=self.mask_target * weights)\n w_original = numpy.bincount(terminal_regions, weights=self.mask_original * weights)\n\n # suppressing possibly negative samples\n w_target = w_target.clip(0)\n w_original = w_original.clip(0)\n\n return numpy.log(w_target + self.regularization) - numpy.log(w_original + self.regularization)\n\n\n# endregion\n\n\n# region FlatnessLossFunction\n\n\ndef _exp_margin(margin):\n \"\"\" margin = - y_signed * y_pred \"\"\"\n return numpy.exp(numpy.clip(margin, -1e5, 2))\n\n\nclass AbstractFlatnessLossFunction(AbstractLossFunction):\n \"\"\"Base class for FlatnessLosses\"\"\"\n\n def __init__(self, uniform_features, uniform_label, power=2., fl_coefficient=3.,\n allow_wrong_signs=True):\n\n self.uniform_features = uniform_features\n if isinstance(uniform_label, numbers.Number):\n self.uniform_label = numpy.array([uniform_label])\n else:\n self.uniform_label = numpy.array(uniform_label)\n self.power = power\n self.fl_coefficient = fl_coefficient\n self.allow_wrong_signs = allow_wrong_signs\n\n def fit(self, X, y, sample_weight=None):\n sample_weight = check_sample_weight(y, sample_weight=sample_weight,\n normalize=True, normalize_by_class=True)\n assert len(X) == len(y), 'lengths are different'\n X = pandas.DataFrame(X)\n\n self.regularization_ = numpy.mean(sample_weight) * 5.\n self.group_indices = dict()\n self.group_matrices = dict()\n self.group_weights = dict()\n self.label_masks = dict()\n\n occurences = numpy.zeros(len(X))\n for label in self.uniform_label:\n self.label_masks[label] = y == label\n self.group_indices[label] = self._compute_groups_indices(X, y, label=label)\n self.group_matrices[label] = group_indices_to_groups_matrix(self.group_indices[label], len(X))\n self.group_weights[label] = compute_group_weights(self.group_matrices[label], sample_weight=sample_weight)\n for group in self.group_indices[label]:\n occurences[group] += 1\n\n out_of_bins = (occurences == 0) & numpy.in1d(y, self.uniform_label)\n if numpy.mean(out_of_bins) > 0.01:\n warnings.warn(\"%i events out of all bins \" % numpy.sum(out_of_bins), UserWarning)\n\n self.y = y\n self.y_signed = 2 * y - 1\n self.sample_weight = numpy.copy(sample_weight)\n self.divided_weight = sample_weight / numpy.maximum(occurences, 1)\n\n return self\n\n def _compute_groups_indices(self, X, y, label):\n raise NotImplementedError('To be overriden in descendants.')\n\n def __call__(self, pred):\n # the actual value does not play any role in boosting\n # computations are very costly\n return 0\n\n def _compute_fl_derivatives(self, y_pred):\n y_pred = numpy.ravel(y_pred)\n neg_gradient = numpy.zeros(len(self.y), dtype=numpy.float)\n\n for label in self.uniform_label:\n label_mask = self.label_masks[label]\n global_positions = numpy.zeros(len(y_pred), dtype=float)\n global_positions[label_mask] = \\\n _compute_positions(y_pred[label_mask], sample_weight=self.sample_weight[label_mask])\n\n for indices_in_bin in self.group_indices[label]:\n local_pos = _compute_positions(y_pred[indices_in_bin],\n sample_weight=self.sample_weight[indices_in_bin])\n global_pos = global_positions[indices_in_bin]\n bin_gradient = self.power * numpy.sign(local_pos - global_pos) * \\\n numpy.abs(local_pos - global_pos) ** (self.power - 1)\n neg_gradient[indices_in_bin] += bin_gradient\n\n neg_gradient *= self.divided_weight\n # check that events outside uniform uniform classes are not touched\n assert numpy.all(neg_gradient[~numpy.in1d(self.y, self.uniform_label)] == 0)\n return neg_gradient\n\n def negative_gradient(self, y_pred):\n y_signed = self.y_signed\n neg_gradient = self._compute_fl_derivatives(y_pred) * self.fl_coefficient\n # adding ExpLoss\n neg_gradient += y_signed * self.sample_weight * _exp_margin(-y_signed * y_pred)\n\n if not self.allow_wrong_signs:\n neg_gradient = y_signed * numpy.clip(y_signed * neg_gradient, 0, 1e5)\n\n return neg_gradient\n\n def prepare_new_leaves_values(self, terminal_regions, leaf_values, y_pred):\n grad = self.negative_gradient(y_pred)\n\n nom = numpy.bincount(terminal_regions, weights=grad, minlength=len(leaf_values))\n denom = numpy.bincount(terminal_regions, minlength=len(leaf_values))\n return nom / denom.clip(1e-10)\n\n\nclass BinFlatnessLossFunction(AbstractFlatnessLossFunction):\n def __init__(self, uniform_features, uniform_label, n_bins=10, power=2., fl_coefficient=3.,\n allow_wrong_signs=True):\n r\"\"\"\n This loss function contains separately penalty for non-flatness and for bad prediction quality.\n See [FL]_ for details.\n\n :math:`\\text{loss} =\\text{ExpLoss} + c \\times \\text{FlatnessLoss}`\n\n FlatnessLoss computed using binning of uniform variables\n\n :param list[str] uniform_features: names of features, along which we want to obtain uniformity of predictions\n :param int|list[int] uniform_label: the label(s) of classes for which uniformity is desired\n :param int n_bins: number of bins along each variable\n :param float power: the loss contains the difference :math:`| F - F_bin |^p`, where p is power\n :param float fl_coefficient: multiplier for flatness_loss. Controls the tradeoff of quality vs uniformity.\n :param bool allow_wrong_signs: defines whether gradient may different sign from the \"sign of class\"\n (i.e. may have negative gradient on signal). If False, values will be clipped to zero.\n\n .. [FL] A. Rogozhnikov et al, New approaches for boosting to uniformity\n http://arxiv.org/abs/1410.4140\n \"\"\"\n self.n_bins = n_bins\n AbstractFlatnessLossFunction.__init__(self, uniform_features,\n uniform_label=uniform_label, power=power,\n fl_coefficient=fl_coefficient,\n allow_wrong_signs=allow_wrong_signs)\n\n def _compute_groups_indices(self, X, y, label):\n \"\"\"Returns a list, each element is events' indices in some group.\"\"\"\n label_mask = y == label\n extended_bin_limits = []\n for var in self.uniform_features:\n f_min, f_max = numpy.min(X[var][label_mask]), numpy.max(X[var][label_mask])\n extended_bin_limits.append(numpy.linspace(f_min, f_max, 2 * self.n_bins + 1))\n groups_indices = list()\n for shift in [0, 1]:\n bin_limits = []\n for axis_limits in extended_bin_limits:\n bin_limits.append(axis_limits[1 + shift:-1:2])\n bin_indices = compute_bin_indices(X.ix[:, self.uniform_features].values, bin_limits=bin_limits)\n groups_indices += list(bin_to_group_indices(bin_indices, mask=label_mask))\n return groups_indices\n\n\nclass KnnFlatnessLossFunction(AbstractFlatnessLossFunction):\n def __init__(self, uniform_features, uniform_label, n_neighbours=100, power=2., fl_coefficient=3.,\n max_groups=5000, allow_wrong_signs=True, random_state=42):\n r\"\"\"\n This loss function contains separately penalty for non-flatness and for bad prediction quality.\n See [FL]_ for details.\n\n :math:`\\text{loss} = \\text{ExpLoss} + c \\times \\text{FlatnessLoss}`\n\n FlatnessLoss computed using nearest neighbors in space of uniform features\n\n :param list[str] uniform_features: names of features, along which we want to obtain uniformity of predictions\n :param int|list[int] uniform_label: the label(s) of classes for which uniformity is desired\n :param int n_neighbours: number of neighbors used in flatness loss\n :param float power: the loss contains the difference :math:`| F - F_bin |^p`, where p is power\n :param float fl_coefficient: multiplier for flatness_loss. Controls the tradeoff of quality vs uniformity.\n :param bool allow_wrong_signs: defines whether gradient may different sign from the \"sign of class\"\n (i.e. may have negative gradient on signal). If False, values will be clipped to zero.\n :param int max_groups: to limit memory consumption when training sample is large,\n we randomly pick this number of points with their members.\n\n .. [FL] A. Rogozhnikov et al, New approaches for boosting to uniformity\n http://arxiv.org/abs/1410.4140\n \"\"\"\n\n self.n_neighbours = n_neighbours\n self.max_groups = max_groups\n self.random_state = random_state\n AbstractFlatnessLossFunction.__init__(self, uniform_features,\n uniform_label=uniform_label, power=power,\n fl_coefficient=fl_coefficient,\n allow_wrong_signs=allow_wrong_signs)\n\n def _compute_groups_indices(self, X, y, label):\n mask = y == label\n self.random_state = check_random_state(self.random_state)\n knn_indices = compute_knn_indices_of_signal(X[self.uniform_features], mask,\n n_neighbours=self.n_neighbours)[mask, :]\n if len(knn_indices) > self.max_groups:\n selected_group = self.random_state.choice(len(knn_indices), size=self.max_groups, replace=False)\n return knn_indices[selected_group, :]\n else:\n return knn_indices\n\n# endregion\n" ]
[ [ "numpy.take", "numpy.linspace", "numpy.sqrt", "numpy.in1d", "numpy.cumsum", "pandas.DataFrame", "numpy.concatenate", "numpy.max", "numpy.mean", "scipy.sparse.vstack", "numpy.exp", "numpy.where", "numpy.unique", "numpy.clip", "numpy.arange", "numpy.copy", "numpy.ravel", "numpy.zeros", "numpy.log", "numpy.min", "scipy.sparse.csr_matrix", "sklearn.utils.validation.check_random_state", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.logaddexp", "numpy.maximum", "numpy.abs", "scipy.special.expit", "numpy.ones", "numpy.sign", "numpy.bincount", "numpy.average" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
egivental/resetInterpretability
[ "cb1d62bb89512c3469c641f2082f24c813ab2b26" ]
[ "model/CORELS/CORELS.py" ]
[ "import os\nimport subprocess\nimport sys\nimport numpy as np\nimport pandas as pd\nimport string\nfrom .dataMaker import CorelsDataMake\nimport copy\n\n\n#This method accepts a string which contains a definition of a dictionary \n#it returns a dictionary, which contains certain inputs that correspond to an output\n#Ex: {X1:4,X2:3,Y:1} it is called by the fit in CORELS \n#input: a string which contains a dictionary with {XXX,YYY}\ndef findDictionaries(line): \n if 'if' in line: #if not the last line of the rule list\n then = line.split('then') #split between the rules and the outcome\n line = then[0]\n then = eval(then[1])\n i = 0\n while(line[i] != '(' ): #ignore the text before the dictionary definition\n i +=1\n i+=1\n if line[i] == '{':\n i+=1\n dict = '{'\n while(line[i] != '}'): #until we finish finding all the rules, parse the text\n dict += line[i]\n i+=1\n dict += '}' #close the dictionary\n elif line[i] == '0':\n dict = '{0:0}'\n elif line[i] == '1':\n dict = '{1:1}'\n dict = eval(dict) #create the dictionary\n dict.update(then) #add in the outcome to the dictionary\n else: #if there were no rule; this was the else statement\n dict = '' \n i = 0\n while dict != '{': #get to the dictionary\n dict = line[i]\n i += 1\n while '}' not in dict: #finish the dictionary\n dict += line[i]\n i+=1\n dict = eval(dict) #create the dictionary\n return dict\n\n\nclass CORELS():\n def __init__(self):\n self.model = 'not run'\n self.output = ''\n self.name = \"<Rudin-CORELS>\"\n\n cwd = os.getcwd()\n if not os.path.isdir('temp'):\n os.mkdir('temp')\n if not os.path.isdir('logs'):\n os.mkdir('logs')\n os.chdir(cwd + '/logs/')\n if not os.path.isdir('CORELS'):\n os.mkdir('CORELS')\n os.chdir(cwd)\n self.logs = os.getcwd() + '/logs/CORELS/'\n\n\n\n ###fit accepts an X matrix, a Y matrix both must have equal length, \n ###optionally it accepts xlabels, and ylabels, a maximum length of each rule list (max 2)\n ###and a log file path \n ### Inputs: X (2D matrix),Y (Vector) OPTIONAL INPUTS: xlabels (list), ylabel(list), ruleListLength (1 or 2), logfile (str)\n def fit(self, X, Y, xlabels = 'not_defined', ylabel = ['Y'], ruleListLength = 2, dataset = None): \n\n ### fixing data for parsing ###\n self.lengthList = ruleListLength\n if type(ylabel) is str:\n ylabel = [ylabel]\n Xstr,Ystr = X.astype(str) , Y.astype(str) #we convert the data to strings for easier getting rid of spaces\n if type(xlabels) is str: #in case the labels were not defined\n xlabels = list(range(Xstr.shape[1])) \n else:\n for i in range(len(xlabels)): #we get rid of spaces\n xlabels[i] = str(xlabels[i]).replace(' ', '_') # we set them to the numbers from 0 to num_features\n ylabel[0] = str(ylabel[0]).replace(' ', '_') #get rid of spaces in ylabels\n\n ### making temp data files and log files ### \n wd = os.getcwd() #we get the currect wd to correctly move around for calls later\n dataPath = wd + '/temp/train' #we set the datapath for temp txt file \n if dataset == None:\n self.logFile = 'CORELSpredicting' + ylabel[0] #we set the datapath for the logfile\n else:\n self.logFile = 'CORELS' + dataset \n CorelsDataMake(Xstr, Ystr, xlabels, ylabel, ruleListLength) #Call this function to make txt files that \n print(\"done making CORELS data file\")\n #have one hot encoded data\n f = open(wd + '/temp/templog.txt', 'w+')\n\n ### calling subprocesses to call CORELS src code ###\n os.chdir(wd + \"/model/CORELS/corels_master/src/\") #change directory to src directory\n new_call = './corels -r 0.0005 -c 2 -p 1 -L ' + dataPath + '.out ' + dataPath + '.label ' \n subprocess.call('make', shell = True, stdout = f) #use their make from their makefile\n subprocess.call(new_call, shell = True, stdout = f) #call their method, writing to logfile\n f.close() #close the file so that now we can read from it \n os.chdir(wd)\n ### parsing the results from the logfile ### \n f = open(wd + '/temp/templog.txt', 'r') #open the file for reading\n line = f.readline() #read lines until we find the optimal rule list line\n while(line!= 'OPTIMAL RULE LIST\\n'): \n line = f.readline()\n line = ''\n newLine = 'if' \n while 'if' in newLine: #until we get to the end of the non else statements\n newLine = f.readline()\n line += newLine\n\n rules = line.split('\\n') #make an array of with all the rules\n ruleList = [] # initialize final output, list of dictionaries\n for rule in rules: #for each string in rules\n if rule != '\\n' and rule != '': \n ruleList.append(findDictionaries(rule)) #find the dictionary in the string\n\n ### printing and saving the results in log file and restoring working directory ###\n self.model = ruleList #save the rules and the feature that is being classified\n self.output = ylabel[0].replace('\\\"', '')\n self.xlabels = xlabels\n\n f.close() #close file\n subprocess.call('rm -R temp', shell = True)\n\n self.logs = wd + '/logs/CORELS/'\n os.chdir(self.logs) #change directory to Logging Directory\n f = open(self.logFile, 'w+') #open the file\n f.write('Data Set Predicting: ' + ylabel[0] + '\\nUsing Features:\\n') #write in the prediction and the features used\n for i,feature in enumerate(xlabels):\n f.write('\\t'+ feature.strip('\"') + ': ' + str(set(X[:,i])) + '\\n') \n f.write('\\nmodel:\\n' + line + '\\n\\n') #write out the model\n f.write('Training Accuracy: ' + str(self.findAccuracy(self.predict(X, xlabels), Y, log = False)) + '\\n\\n') #State the training accuracy\n f.close()\n os.chdir(wd) #restore working directory\n wd = os.getcwd()\n #print(wd)\n return ruleList #print out the rules\n\n #inputs: Accepts an X matrix, along with an optional set of x labels, which should be a matrix\n def predict(self, X, xlabels = ''):\n \n ### checking xlabels ###\n assert(self.model != 'not run'), 'the model has not yet been run. run CORELS.fit()'\n if type(xlabels) is string: #in case labels not given\n xlabels = range(X.shape[1]) #the labels become numbers from 1-5\n #assert(set(xlabels) == set(xlabels)), 'the labels do not match or not given'\n \n ### reorienting data and turning into dictionary###\n X = pd.DataFrame(X, columns = xlabels) \n X = X.transpose()\n X = X.to_dict()\n\n ### initializing and making an array of outcomes that will have the same length as X ###-\n outcomes = [] # empty array for outcomes\n for row in X.values(): #get the dictionary definition of each input\n extra = copy.deepcopy(self.model) #make a deepcopy of the model so that it can be changed\n for key in row: \n row[key.replace('\\\"', '')] = row.pop(key) #for each input remove spaces\n for dic in extra: \n outcome = dic.pop(self.output) #pop the outcome if this given dictionary in the \n #rule list turns out to be the right one\n if dic == {1:1}:\n outcomes.append(outcome)\n break;\n if dic == {0:0}:\n outcomes.append(1-outcome)\n break;\n if all(key in row and row[key] == dic[key] for key in dic): #if the rule list dictionary is contained \n #by the datapoint, we return the output of that rule list dictionary\n outcomes.append(outcome)\n break;\n return outcomes\n\n\n ### Tells you the accuracy of your algorithm by comparing predictions to actual outputs ###\n ### inputs: 2 numpy vectors of equal length ### \n ### outputs: A floating point number representing the fraction that were correctly predicted\n def findAccuracy(self, predictions, Y, log = True):\n assert(len(predictions) == len(Y)), 'Y and predictions must have the same length'\n correct, truePos, trueNeg, falsePos, falseNeg = 0,0,0,0,0\n for i in range(len(Y)):\n if predictions[i] == 1 and Y[i] == 1:\n truePos += 1\n correct += 1\n elif predictions[i] == 0 and Y[i] == 0:\n trueNeg += 1\n correct += 1\n elif predictions[i] == 1 and Y[i] == 0:\n falsePos += 1\n elif predictions[i] == 0 and Y[i] == 1:\n falseNeg += 1\n confusionString = 'TP: ' + str(truePos) + ' TN: ' + str(trueNeg) + ' FP: ' + str(falsePos) + ' FN: ' + str(falseNeg)\n acc = correct/len(Y)\n if log:\n wd = os.getcwd()\n os.chdir(self.logs)\n f = open(self.logFile, 'a')\n f.write('Testing Accuracy: ' + str((acc,confusionString)))\n f.close()\n os.chdir(wd)\n return acc, confusionString\n\n ### returns the name of the algorithm ### \n def get_name(self):\n return self.name\n\n\n\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
gitvicky/tf-pde
[ "9ff131192aa21babc4238bd5d123fedefbf48d9e" ]
[ "Examples/Burgers_test.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 18 16:04:32 2020\n\n@author: Vicky\n\nNeural PDE - Tensorflow 2.X\nTesting with Burgers Equation\n\nPDE: u_t + u*u_x - 0.1*u_xx\nIC: u(0, x) = -sin(pi.x/8)\nBC: Periodic \nDomain: t ∈ [0,10], x ∈ [-8,8]\n\"\"\"\nimport os \nimport numpy as np \nimport scipy.io\n\nfrom pyDOE import lhs\nimport tfpde\n\n# %%\n#Neural Network Hyperparameters\nNN_parameters = {'Network_Type': 'Regular',\n 'input_neurons' : 2,\n 'output_neurons' : 1,\n 'num_layers' : 4,\n 'num_neurons' : 64,\n }\n\n\n#Neural PDE Hyperparameters\nNPDE_parameters = {'Sampling_Method': 'Initial',\n 'N_initial' : 100, #Number of Randomly sampled Data points from the IC vector\n 'N_boundary' : 100, #Number of Boundary Points\n 'N_domain' : 5000 #Number of Domain points generated\n }\n\n\n#PDE \nPDE_parameters = {'Inputs': 't, x',\n 'Outputs': 'u',\n 'Equation': 'D(u, t) + u*D(u, x) - 0.1*D2(u, x)',\n 'lower_range': [0.0, -8.0], #Float \n 'upper_range': [10.0, 8.0], #Float\n 'Boundary_Condition': \"Dirichlet\", #Periodic \n 'Boundary_Vals' : None,\n 'Initial_Condition': lambda x: -np.sin((np.pi*x)/8),\n 'Initial_Vals': None\n }\n\n\n# %%\n\n#Using Simulation Data at the Initial and Boundary Values (BC would be Dirichlet under that case )\n\nN_f = NPDE_parameters['N_domain']\nN_i = NPDE_parameters['N_initial']\nN_b = NPDE_parameters['N_boundary']\n\n# Data Location\ndata_loc = os.path.abspath('..') + '/Data/'\ndata = scipy.io.loadmat(data_loc +'burgers.mat')\n\nt = data['t'].flatten()[:,None]\nx = data['x'].flatten()[:,None]\nExact = np.real(data['usol']).T\n\nX, T = np.meshgrid(x,t)\n\nX_star = np.hstack((T.flatten()[:,None], X.flatten()[:,None])) \nu_star = Exact.flatten()[:,None] \n\n# Domain bounds\nlb = X_star.min(0) \nub = X_star.max(0)\n \nX_i = np.hstack((T[0:1,:].T, X[0:1,:].T))\nu_i = Exact[0:1,:].T\n\nX_lb = np.hstack((T[:,0:1], X[:,0:1])) \nu_lb = Exact[:,0:1] \nX_ub = np.hstack((T[:,-1:], X[:,-1:])) \nu_ub = Exact[:,-1:] \n\nu_lb = np.zeros((len(u_lb),1))\nu_ub = np.zeros((len(u_ub),1)) \n\nX_b = np.vstack((X_lb, X_ub))\nu_b = np.vstack((u_lb, u_ub))\n\nX_f = tfpde.sampler.domain_sampler(N_f, lb, ub) \n\nidx = np.random.choice(X_i.shape[0], N_i, replace=False)\nX_i = X_i[idx, :]\nu_i = u_i[idx,:]\n\nidx = np.random.choice(X_b.shape[0], N_b, replace=False)\nX_b = X_b[idx, :] \nu_b = u_b[idx,:]\n\n\n\ntraining_data = {'X_i': X_i, 'u_i': u_i,\n 'X_b': X_b, 'u_b': u_b,\n 'X_f': X_f}\n\n\n# %%\n'''\nN_i = NPDE_parameters['N_initial']\nN_b = NPDE_parameters['N_boundary']\nN_f = NPDE_parameters['N_domain']\n\nlb = PDE_parameters['lower_range']\nub = PDE_parameters['upper_range']\n\nInitial_Condition = PDE_parameters['Initial_Condition']\nBoundary_vals = PDE_parameters['Boundary_Vals']\n\nX_i = tfpde.sampler.initial_sampler(N_i, lb, ub)\nX_b = tfpde.sampler.boundary_sampler(N_b, lb, ub)\nX_f = tfpde.sampler.domain_sampler(N_f, lb, ub)\n\nu_i = Initial_Condition(X_i[:,1:2])\nu_b = Boundary_vals\n\n# X_i = 2.0*(X_i - np.asarray(lb))/(np.asarray(ub) - np.asarray(lb)) - 1.0\n# X_b = 2.0*(X_b - np.asarray(lb))/(np.asarray(ub) - np.asarray(lb)) - 1.0\n# X_f = 2.0*(X_f - np.asarray(lb))/(np.asarray(ub) - np.asarray(lb)) - 1.0\n\ntraining_data = {'X_i': X_i, 'u_i': u_i,\n 'X_b': X_b, 'u_b': u_b, \n 'X_f': X_f}\n'''\n# %%\n\nmodel = tfpde.main.setup(NN_parameters, NPDE_parameters, PDE_parameters)\n\n# %%\ntrain_config = {'Optimizer': 'adam',\n 'learning_rate': 0.001, \n 'Iterations' : 5000}\n\ntime_GD = model.train(train_config, training_data)\n\n# %%\ntrain_config = {'Optimizer': 'L-BFGS-B',\n 'learning_rate': None, \n 'Iterations' : None}\n\ntime_QN = model.train(train_config, training_data)\n# %%\ndata_loc = os.path.abspath('..') + '/Data/'\ndata = scipy.io.loadmat(data_loc +'burgers.mat')\n\nt = data['t'].flatten()[:,None]\nx = data['x'].flatten()[:,None]\nExact = np.real(data['usol']).T\n\nX, T = np.meshgrid(x,t)\n\nX_star = np.hstack((T.flatten()[:,None], X.flatten()[:,None])) \nu_star = Exact.flatten()[:,None] \n\nu_pred = model.predict(X_star)\nu_pred = np.reshape(u_pred, np.shape(Exact))\n\ntfpde.plotter.evolution_plot(Exact, u_pred)\n " ]
[ [ "numpy.hstack", "numpy.random.choice", "numpy.sin", "numpy.real", "numpy.shape", "numpy.meshgrid", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
suokunlong/spyder
[ "2d5d450fdcef232fb7f38e7fefc27f0e7f704c9a" ]
[ "spyder/plugins/variableexplorer/widgets/arrayeditor.py" ]
[ "# -*- coding: utf-8 -*-\r\n#\r\n# Copyright © Spyder Project Contributors\r\n# Licensed under the terms of the MIT License\r\n# (see spyder/__init__.py for details)\r\n\r\n\"\"\"\r\nNumPy Array Editor Dialog based on Qt\r\n\"\"\"\r\n\r\n# pylint: disable=C0103\r\n# pylint: disable=R0903\r\n# pylint: disable=R0911\r\n# pylint: disable=R0201\r\n\r\n# Standard library imports\r\nfrom __future__ import print_function\r\n\r\n# Third party imports\r\nimport numpy as np\r\nfrom qtpy.compat import from_qvariant, to_qvariant\r\nfrom qtpy.QtCore import (QAbstractTableModel, QItemSelection, QLocale,\r\n QItemSelectionRange, QModelIndex, Qt, Slot)\r\nfrom qtpy.QtGui import QColor, QCursor, QDoubleValidator, QKeySequence\r\nfrom qtpy.QtWidgets import (QAbstractItemDelegate, QApplication, QCheckBox,\r\n QComboBox, QDialog, QGridLayout, QHBoxLayout,\r\n QInputDialog, QItemDelegate, QLabel, QLineEdit,\r\n QMenu, QMessageBox, QPushButton, QSpinBox,\r\n QStackedWidget, QTableView, QVBoxLayout,\r\n QWidget)\r\nfrom spyder_kernels.utils.nsview import value_to_display\r\n\r\n# Local imports\r\nfrom spyder.config.base import _\r\nfrom spyder.config.fonts import DEFAULT_SMALL_DELTA\r\nfrom spyder.config.gui import get_font\r\nfrom spyder.config.manager import CONF\r\nfrom spyder.py3compat import (io, is_binary_string, is_string,\r\n is_text_string, PY3, to_binary_string,\r\n to_text_string)\r\nfrom spyder.utils import icon_manager as ima\r\nfrom spyder.utils.qthelpers import add_actions, create_action, keybinding\r\n\r\n\r\n# Note: string and unicode data types will be formatted with '%s' (see below)\r\nSUPPORTED_FORMATS = {\r\n 'single': '%.6g',\r\n 'double': '%.6g',\r\n 'float_': '%.6g',\r\n 'longfloat': '%.6g',\r\n 'float16': '%.6g',\r\n 'float32': '%.6g',\r\n 'float64': '%.6g',\r\n 'float96': '%.6g',\r\n 'float128': '%.6g',\r\n 'csingle': '%r',\r\n 'complex_': '%r',\r\n 'clongfloat': '%r',\r\n 'complex64': '%r',\r\n 'complex128': '%r',\r\n 'complex192': '%r',\r\n 'complex256': '%r',\r\n 'byte': '%d',\r\n 'bytes8': '%s',\r\n 'short': '%d',\r\n 'intc': '%d',\r\n 'int_': '%d',\r\n 'longlong': '%d',\r\n 'intp': '%d',\r\n 'int8': '%d',\r\n 'int16': '%d',\r\n 'int32': '%d',\r\n 'int64': '%d',\r\n 'ubyte': '%d',\r\n 'ushort': '%d',\r\n 'uintc': '%d',\r\n 'uint': '%d',\r\n 'ulonglong': '%d',\r\n 'uintp': '%d',\r\n 'uint8': '%d',\r\n 'uint16': '%d',\r\n 'uint32': '%d',\r\n 'uint64': '%d',\r\n 'bool_': '%r',\r\n 'bool8': '%r',\r\n 'bool': '%r',\r\n }\r\n\r\n\r\nLARGE_SIZE = 5e5\r\nLARGE_NROWS = 1e5\r\nLARGE_COLS = 60\r\n\r\n\r\n#==============================================================================\r\n# Utility functions\r\n#==============================================================================\r\ndef is_float(dtype):\r\n \"\"\"Return True if datatype dtype is a float kind\"\"\"\r\n return ('float' in dtype.name) or dtype.name in ['single', 'double']\r\n\r\n\r\ndef is_number(dtype):\r\n \"\"\"Return True is datatype dtype is a number kind\"\"\"\r\n return is_float(dtype) or ('int' in dtype.name) or ('long' in dtype.name) \\\r\n or ('short' in dtype.name)\r\n\r\n\r\ndef get_idx_rect(index_list):\r\n \"\"\"Extract the boundaries from a list of indexes\"\"\"\r\n rows, cols = list(zip(*[(i.row(), i.column()) for i in index_list]))\r\n return ( min(rows), max(rows), min(cols), max(cols) )\r\n\r\n\r\n#==============================================================================\r\n# Main classes\r\n#==============================================================================\r\nclass ArrayModel(QAbstractTableModel):\r\n \"\"\"Array Editor Table Model\"\"\"\r\n\r\n ROWS_TO_LOAD = 500\r\n COLS_TO_LOAD = 40\r\n\r\n def __init__(self, data, format=\"%.6g\", xlabels=None, ylabels=None,\r\n readonly=False, parent=None):\r\n QAbstractTableModel.__init__(self)\r\n\r\n self.dialog = parent\r\n self.changes = {}\r\n self.xlabels = xlabels\r\n self.ylabels = ylabels\r\n self.readonly = readonly\r\n self.test_array = np.array([0], dtype=data.dtype)\r\n\r\n # for complex numbers, shading will be based on absolute value\r\n # but for all other types it will be the real part\r\n if data.dtype in (np.complex64, np.complex128):\r\n self.color_func = np.abs\r\n else:\r\n self.color_func = np.real\r\n\r\n # Backgroundcolor settings\r\n huerange = [.66, .99] # Hue\r\n self.sat = .7 # Saturation\r\n self.val = 1. # Value\r\n self.alp = .6 # Alpha-channel\r\n\r\n self._data = data\r\n self._format = format\r\n\r\n self.total_rows = self._data.shape[0]\r\n self.total_cols = self._data.shape[1]\r\n size = self.total_rows * self.total_cols\r\n\r\n try:\r\n self.vmin = np.nanmin(self.color_func(data))\r\n self.vmax = np.nanmax(self.color_func(data))\r\n if self.vmax == self.vmin:\r\n self.vmin -= 1\r\n self.hue0 = huerange[0]\r\n self.dhue = huerange[1]-huerange[0]\r\n self.bgcolor_enabled = True\r\n except (TypeError, ValueError):\r\n self.vmin = None\r\n self.vmax = None\r\n self.hue0 = None\r\n self.dhue = None\r\n self.bgcolor_enabled = False\r\n\r\n # Deactivate coloring for object arrays\r\n if self._data.dtype.name == 'object':\r\n self.bgcolor_enabled = False\r\n\r\n # Use paging when the total size, number of rows or number of\r\n # columns is too large\r\n if size > LARGE_SIZE:\r\n self.rows_loaded = self.ROWS_TO_LOAD\r\n self.cols_loaded = self.COLS_TO_LOAD\r\n else:\r\n if self.total_rows > LARGE_NROWS:\r\n self.rows_loaded = self.ROWS_TO_LOAD\r\n else:\r\n self.rows_loaded = self.total_rows\r\n if self.total_cols > LARGE_COLS:\r\n self.cols_loaded = self.COLS_TO_LOAD\r\n else:\r\n self.cols_loaded = self.total_cols\r\n\r\n def get_format(self):\r\n \"\"\"Return current format\"\"\"\r\n # Avoid accessing the private attribute _format from outside\r\n return self._format\r\n\r\n def get_data(self):\r\n \"\"\"Return data\"\"\"\r\n return self._data\r\n\r\n def set_format(self, format):\r\n \"\"\"Change display format\"\"\"\r\n self._format = format\r\n self.reset()\r\n\r\n def columnCount(self, qindex=QModelIndex()):\r\n \"\"\"Array column number\"\"\"\r\n if self.total_cols <= self.cols_loaded:\r\n return self.total_cols\r\n else:\r\n return self.cols_loaded\r\n\r\n def rowCount(self, qindex=QModelIndex()):\r\n \"\"\"Array row number\"\"\"\r\n if self.total_rows <= self.rows_loaded:\r\n return self.total_rows\r\n else:\r\n return self.rows_loaded\r\n\r\n def can_fetch_more(self, rows=False, columns=False):\r\n if rows:\r\n if self.total_rows > self.rows_loaded:\r\n return True\r\n else:\r\n return False\r\n if columns:\r\n if self.total_cols > self.cols_loaded:\r\n return True\r\n else:\r\n return False\r\n\r\n def fetch_more(self, rows=False, columns=False):\r\n if self.can_fetch_more(rows=rows):\r\n reminder = self.total_rows - self.rows_loaded\r\n items_to_fetch = min(reminder, self.ROWS_TO_LOAD)\r\n self.beginInsertRows(QModelIndex(), self.rows_loaded,\r\n self.rows_loaded + items_to_fetch - 1)\r\n self.rows_loaded += items_to_fetch\r\n self.endInsertRows()\r\n if self.can_fetch_more(columns=columns):\r\n reminder = self.total_cols - self.cols_loaded\r\n items_to_fetch = min(reminder, self.COLS_TO_LOAD)\r\n self.beginInsertColumns(QModelIndex(), self.cols_loaded,\r\n self.cols_loaded + items_to_fetch - 1)\r\n self.cols_loaded += items_to_fetch\r\n self.endInsertColumns()\r\n\r\n def bgcolor(self, state):\r\n \"\"\"Toggle backgroundcolor\"\"\"\r\n self.bgcolor_enabled = state > 0\r\n self.reset()\r\n\r\n def get_value(self, index):\r\n i = index.row()\r\n j = index.column()\r\n if len(self._data.shape) == 1:\r\n value = self._data[j]\r\n else:\r\n value = self._data[i, j]\r\n return self.changes.get((i, j), value)\r\n\r\n def data(self, index, role=Qt.DisplayRole):\r\n \"\"\"Cell content.\"\"\"\r\n if not index.isValid():\r\n return to_qvariant()\r\n value = self.get_value(index)\r\n dtn = self._data.dtype.name\r\n\r\n # Tranform binary string to unicode so they are displayed\r\n # correctly\r\n if is_binary_string(value):\r\n try:\r\n value = to_text_string(value, 'utf8')\r\n except Exception:\r\n pass\r\n\r\n # Handle roles\r\n if role == Qt.DisplayRole:\r\n if value is np.ma.masked:\r\n return ''\r\n else:\r\n if dtn == 'object':\r\n # We don't know what's inside an object array, so\r\n # we can't trust value repr's here.\r\n return value_to_display(value)\r\n else:\r\n try:\r\n return to_qvariant(self._format % value)\r\n except TypeError:\r\n self.readonly = True\r\n return repr(value)\r\n elif role == Qt.TextAlignmentRole:\r\n return to_qvariant(int(Qt.AlignCenter|Qt.AlignVCenter))\r\n elif (role == Qt.BackgroundColorRole and self.bgcolor_enabled\r\n and value is not np.ma.masked):\r\n try:\r\n hue = (self.hue0 +\r\n self.dhue * (float(self.vmax) - self.color_func(value))\r\n / (float(self.vmax) - self.vmin))\r\n hue = float(np.abs(hue))\r\n color = QColor.fromHsvF(hue, self.sat, self.val, self.alp)\r\n return to_qvariant(color)\r\n except (TypeError, ValueError):\r\n return to_qvariant()\r\n elif role == Qt.FontRole:\r\n return to_qvariant(get_font(font_size_delta=DEFAULT_SMALL_DELTA))\r\n return to_qvariant()\r\n\r\n def setData(self, index, value, role=Qt.EditRole):\r\n \"\"\"Cell content change\"\"\"\r\n if not index.isValid() or self.readonly:\r\n return False\r\n i = index.row()\r\n j = index.column()\r\n value = from_qvariant(value, str)\r\n dtype = self._data.dtype.name\r\n if dtype == \"bool\":\r\n try:\r\n val = bool(float(value))\r\n except ValueError:\r\n val = value.lower() == \"true\"\r\n elif dtype.startswith(\"string\") or dtype.startswith(\"bytes\"):\r\n val = to_binary_string(value, 'utf8')\r\n elif dtype.startswith(\"unicode\") or dtype.startswith(\"str\"):\r\n val = to_text_string(value)\r\n else:\r\n if value.lower().startswith('e') or value.lower().endswith('e'):\r\n return False\r\n try:\r\n val = complex(value)\r\n if not val.imag:\r\n val = val.real\r\n except ValueError as e:\r\n QMessageBox.critical(self.dialog, \"Error\",\r\n \"Value error: %s\" % str(e))\r\n return False\r\n try:\r\n self.test_array[0] = val # will raise an Exception eventually\r\n except OverflowError as e:\r\n print(\"OverflowError: \" + str(e)) # spyder: test-skip\r\n QMessageBox.critical(self.dialog, \"Error\",\r\n \"Overflow error: %s\" % str(e))\r\n return False\r\n\r\n # Add change to self.changes\r\n self.changes[(i, j)] = val\r\n self.dataChanged.emit(index, index)\r\n if not is_string(val):\r\n if val > self.vmax:\r\n self.vmax = val\r\n if val < self.vmin:\r\n self.vmin = val\r\n return True\r\n\r\n def flags(self, index):\r\n \"\"\"Set editable flag\"\"\"\r\n if not index.isValid():\r\n return Qt.ItemIsEnabled\r\n return Qt.ItemFlags(int(QAbstractTableModel.flags(self, index) |\r\n Qt.ItemIsEditable))\r\n\r\n def headerData(self, section, orientation, role=Qt.DisplayRole):\r\n \"\"\"Set header data\"\"\"\r\n if role != Qt.DisplayRole:\r\n return to_qvariant()\r\n labels = self.xlabels if orientation == Qt.Horizontal else self.ylabels\r\n if labels is None:\r\n return to_qvariant(int(section))\r\n else:\r\n return to_qvariant(labels[section])\r\n\r\n def reset(self):\r\n self.beginResetModel()\r\n self.endResetModel()\r\n\r\n\r\nclass ArrayDelegate(QItemDelegate):\r\n \"\"\"Array Editor Item Delegate\"\"\"\r\n def __init__(self, dtype, parent=None):\r\n QItemDelegate.__init__(self, parent)\r\n self.dtype = dtype\r\n\r\n def createEditor(self, parent, option, index):\r\n \"\"\"Create editor widget\"\"\"\r\n model = index.model()\r\n value = model.get_value(index)\r\n if model._data.dtype.name == \"bool\":\r\n value = not value\r\n model.setData(index, to_qvariant(value))\r\n return\r\n elif value is not np.ma.masked:\r\n editor = QLineEdit(parent)\r\n editor.setFont(get_font(font_size_delta=DEFAULT_SMALL_DELTA))\r\n editor.setAlignment(Qt.AlignCenter)\r\n if is_number(self.dtype):\r\n validator = QDoubleValidator(editor)\r\n validator.setLocale(QLocale('C'))\r\n editor.setValidator(validator)\r\n editor.returnPressed.connect(self.commitAndCloseEditor)\r\n return editor\r\n\r\n def commitAndCloseEditor(self):\r\n \"\"\"Commit and close editor\"\"\"\r\n editor = self.sender()\r\n # Avoid a segfault with PyQt5. Variable value won't be changed\r\n # but at least Spyder won't crash. It seems generated by a bug in sip.\r\n try:\r\n self.commitData.emit(editor)\r\n except AttributeError:\r\n pass\r\n self.closeEditor.emit(editor, QAbstractItemDelegate.NoHint)\r\n\r\n def setEditorData(self, editor, index):\r\n \"\"\"Set editor widget's data\"\"\"\r\n text = from_qvariant(index.model().data(index, Qt.DisplayRole), str)\r\n editor.setText(text)\r\n\r\n\r\n#TODO: Implement \"Paste\" (from clipboard) feature\r\nclass ArrayView(QTableView):\r\n \"\"\"Array view class\"\"\"\r\n def __init__(self, parent, model, dtype, shape):\r\n QTableView.__init__(self, parent)\r\n\r\n self.setModel(model)\r\n self.setItemDelegate(ArrayDelegate(dtype, self))\r\n total_width = 0\r\n for k in range(shape[1]):\r\n total_width += self.columnWidth(k)\r\n self.viewport().resize(min(total_width, 1024), self.height())\r\n self.shape = shape\r\n self.menu = self.setup_menu()\r\n CONF.config_shortcut(\r\n self.copy,\r\n context='variable_explorer',\r\n name='copy',\r\n parent=self)\r\n self.horizontalScrollBar().valueChanged.connect(\r\n lambda val: self.load_more_data(val, columns=True))\r\n self.verticalScrollBar().valueChanged.connect(\r\n lambda val: self.load_more_data(val, rows=True))\r\n\r\n def load_more_data(self, value, rows=False, columns=False):\r\n\r\n try:\r\n old_selection = self.selectionModel().selection()\r\n old_rows_loaded = old_cols_loaded = None\r\n\r\n if rows and value == self.verticalScrollBar().maximum():\r\n old_rows_loaded = self.model().rows_loaded\r\n self.model().fetch_more(rows=rows)\r\n\r\n if columns and value == self.horizontalScrollBar().maximum():\r\n old_cols_loaded = self.model().cols_loaded\r\n self.model().fetch_more(columns=columns)\r\n\r\n if old_rows_loaded is not None or old_cols_loaded is not None:\r\n # if we've changed anything, update selection\r\n new_selection = QItemSelection()\r\n for part in old_selection:\r\n top = part.top()\r\n bottom = part.bottom()\r\n if (old_rows_loaded is not None and\r\n top == 0 and bottom == (old_rows_loaded-1)):\r\n # complete column selected (so expand it to match\r\n # updated range)\r\n bottom = self.model().rows_loaded-1\r\n left = part.left()\r\n right = part.right()\r\n if (old_cols_loaded is not None\r\n and left == 0 and right == (old_cols_loaded-1)):\r\n # compete row selected (so expand it to match updated\r\n # range)\r\n right = self.model().cols_loaded-1\r\n top_left = self.model().index(top, left)\r\n bottom_right = self.model().index(bottom, right)\r\n part = QItemSelectionRange(top_left, bottom_right)\r\n new_selection.append(part)\r\n self.selectionModel().select\r\n (new_selection, self.selectionModel().ClearAndSelect)\r\n except NameError:\r\n # Needed to handle a NameError while fetching data when closing\r\n # See isue 7880\r\n pass\r\n\r\n def resize_to_contents(self):\r\n \"\"\"Resize cells to contents\"\"\"\r\n QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))\r\n self.resizeColumnsToContents()\r\n self.model().fetch_more(columns=True)\r\n self.resizeColumnsToContents()\r\n QApplication.restoreOverrideCursor()\r\n\r\n def setup_menu(self):\r\n \"\"\"Setup context menu\"\"\"\r\n self.copy_action = create_action(self, _('Copy'),\r\n shortcut=keybinding('Copy'),\r\n icon=ima.icon('editcopy'),\r\n triggered=self.copy,\r\n context=Qt.WidgetShortcut)\r\n menu = QMenu(self)\r\n add_actions(menu, [self.copy_action, ])\r\n return menu\r\n\r\n def contextMenuEvent(self, event):\r\n \"\"\"Reimplement Qt method\"\"\"\r\n self.menu.popup(event.globalPos())\r\n event.accept()\r\n\r\n def keyPressEvent(self, event):\r\n \"\"\"Reimplement Qt method\"\"\"\r\n if event == QKeySequence.Copy:\r\n self.copy()\r\n else:\r\n QTableView.keyPressEvent(self, event)\r\n\r\n def _sel_to_text(self, cell_range):\r\n \"\"\"Copy an array portion to a unicode string\"\"\"\r\n if not cell_range:\r\n return\r\n row_min, row_max, col_min, col_max = get_idx_rect(cell_range)\r\n if col_min == 0 and col_max == (self.model().cols_loaded-1):\r\n # we've selected a whole column. It isn't possible to\r\n # select only the first part of a column without loading more,\r\n # so we can treat it as intentional and copy the whole thing\r\n col_max = self.model().total_cols-1\r\n if row_min == 0 and row_max == (self.model().rows_loaded-1):\r\n row_max = self.model().total_rows-1\r\n\r\n _data = self.model().get_data()\r\n if PY3:\r\n output = io.BytesIO()\r\n else:\r\n output = io.StringIO()\r\n try:\r\n np.savetxt(output, _data[row_min:row_max+1, col_min:col_max+1],\r\n delimiter='\\t', fmt=self.model().get_format())\r\n except:\r\n QMessageBox.warning(self, _(\"Warning\"),\r\n _(\"It was not possible to copy values for \"\r\n \"this array\"))\r\n return\r\n contents = output.getvalue().decode('utf-8')\r\n output.close()\r\n return contents\r\n\r\n @Slot()\r\n def copy(self):\r\n \"\"\"Copy text to clipboard\"\"\"\r\n cliptxt = self._sel_to_text( self.selectedIndexes() )\r\n clipboard = QApplication.clipboard()\r\n clipboard.setText(cliptxt)\r\n\r\n\r\nclass ArrayEditorWidget(QWidget):\r\n\r\n def __init__(self, parent, data, readonly=False,\r\n xlabels=None, ylabels=None):\r\n QWidget.__init__(self, parent)\r\n self.data = data\r\n self.old_data_shape = None\r\n if len(self.data.shape) == 1:\r\n self.old_data_shape = self.data.shape\r\n self.data.shape = (self.data.shape[0], 1)\r\n elif len(self.data.shape) == 0:\r\n self.old_data_shape = self.data.shape\r\n self.data.shape = (1, 1)\r\n\r\n format = SUPPORTED_FORMATS.get(data.dtype.name, '%s')\r\n self.model = ArrayModel(self.data, format=format, xlabels=xlabels,\r\n ylabels=ylabels, readonly=readonly, parent=self)\r\n self.view = ArrayView(self, self.model, data.dtype, data.shape)\r\n\r\n btn_layout = QHBoxLayout()\r\n btn_layout.setAlignment(Qt.AlignLeft)\r\n btn = QPushButton(_( \"Format\"))\r\n # disable format button for int type\r\n btn.setEnabled(is_float(data.dtype))\r\n btn_layout.addWidget(btn)\r\n btn.clicked.connect(self.change_format)\r\n btn = QPushButton(_( \"Resize\"))\r\n btn_layout.addWidget(btn)\r\n btn.clicked.connect(self.view.resize_to_contents)\r\n bgcolor = QCheckBox(_( 'Background color'))\r\n bgcolor.setChecked(self.model.bgcolor_enabled)\r\n bgcolor.setEnabled(self.model.bgcolor_enabled)\r\n bgcolor.stateChanged.connect(self.model.bgcolor)\r\n btn_layout.addWidget(bgcolor)\r\n\r\n layout = QVBoxLayout()\r\n layout.addWidget(self.view)\r\n layout.addLayout(btn_layout)\r\n self.setLayout(layout)\r\n\r\n def accept_changes(self):\r\n \"\"\"Accept changes\"\"\"\r\n for (i, j), value in list(self.model.changes.items()):\r\n self.data[i, j] = value\r\n if self.old_data_shape is not None:\r\n self.data.shape = self.old_data_shape\r\n\r\n def reject_changes(self):\r\n \"\"\"Reject changes\"\"\"\r\n if self.old_data_shape is not None:\r\n self.data.shape = self.old_data_shape\r\n\r\n def change_format(self):\r\n \"\"\"Change display format\"\"\"\r\n format, valid = QInputDialog.getText(self, _( 'Format'),\r\n _( \"Float formatting\"),\r\n QLineEdit.Normal, self.model.get_format())\r\n if valid:\r\n format = str(format)\r\n try:\r\n format % 1.1\r\n except:\r\n QMessageBox.critical(self, _(\"Error\"),\r\n _(\"Format (%s) is incorrect\") % format)\r\n return\r\n self.model.set_format(format)\r\n\r\n\r\nclass ArrayEditor(QDialog):\r\n \"\"\"Array Editor Dialog\"\"\"\r\n def __init__(self, parent=None):\r\n QDialog.__init__(self, parent)\r\n\r\n # Destroying the C++ object right after closing the dialog box,\r\n # otherwise it may be garbage-collected in another QThread\r\n # (e.g. the editor's analysis thread in Spyder), thus leading to\r\n # a segmentation fault on UNIX or an application crash on Windows\r\n self.setAttribute(Qt.WA_DeleteOnClose)\r\n\r\n self.data = None\r\n self.arraywidget = None\r\n self.stack = None\r\n self.layout = None\r\n self.btn_save_and_close = None\r\n self.btn_close = None\r\n # Values for 3d array editor\r\n self.dim_indexes = [{}, {}, {}]\r\n self.last_dim = 0 # Adjust this for changing the startup dimension\r\n\r\n def setup_and_check(self, data, title='', readonly=False,\r\n xlabels=None, ylabels=None):\r\n \"\"\"\r\n Setup ArrayEditor:\r\n return False if data is not supported, True otherwise\r\n \"\"\"\r\n self.data = data\r\n readonly = readonly or not self.data.flags.writeable\r\n is_record_array = data.dtype.names is not None\r\n is_masked_array = isinstance(data, np.ma.MaskedArray)\r\n\r\n if data.ndim > 3:\r\n self.error(_(\"Arrays with more than 3 dimensions are not \"\r\n \"supported\"))\r\n return False\r\n if xlabels is not None and len(xlabels) != self.data.shape[1]:\r\n self.error(_(\"The 'xlabels' argument length do no match array \"\r\n \"column number\"))\r\n return False\r\n if ylabels is not None and len(ylabels) != self.data.shape[0]:\r\n self.error(_(\"The 'ylabels' argument length do no match array row \"\r\n \"number\"))\r\n return False\r\n if not is_record_array:\r\n dtn = data.dtype.name\r\n if dtn == 'object':\r\n # If the array doesn't have shape, we can't display it\r\n if data.shape == ():\r\n self.error(_(\"Object arrays without shape are not \"\r\n \"supported\"))\r\n return False\r\n # We don't know what's inside these arrays, so we can't handle\r\n # edits\r\n self.readonly = readonly = True\r\n elif (dtn not in SUPPORTED_FORMATS and not dtn.startswith('str')\r\n and not dtn.startswith('unicode')):\r\n arr = _(\"%s arrays\") % data.dtype.name\r\n self.error(_(\"%s are currently not supported\") % arr)\r\n return False\r\n\r\n self.layout = QGridLayout()\r\n self.setLayout(self.layout)\r\n self.setWindowIcon(ima.icon('arredit'))\r\n if title:\r\n title = to_text_string(title) + \" - \" + _(\"NumPy array\")\r\n else:\r\n title = _(\"Array editor\")\r\n if readonly:\r\n title += ' (' + _('read only') + ')'\r\n self.setWindowTitle(title)\r\n self.resize(600, 500)\r\n\r\n # Stack widget\r\n self.stack = QStackedWidget(self)\r\n if is_record_array:\r\n for name in data.dtype.names:\r\n self.stack.addWidget(ArrayEditorWidget(self, data[name],\r\n readonly, xlabels,\r\n ylabels))\r\n elif is_masked_array:\r\n self.stack.addWidget(ArrayEditorWidget(self, data, readonly,\r\n xlabels, ylabels))\r\n self.stack.addWidget(ArrayEditorWidget(self, data.data, readonly,\r\n xlabels, ylabels))\r\n self.stack.addWidget(ArrayEditorWidget(self, data.mask, readonly,\r\n xlabels, ylabels))\r\n elif data.ndim == 3:\r\n pass\r\n else:\r\n self.stack.addWidget(ArrayEditorWidget(self, data, readonly,\r\n xlabels, ylabels))\r\n self.arraywidget = self.stack.currentWidget()\r\n if self.arraywidget:\r\n self.arraywidget.model.dataChanged.connect(\r\n self.save_and_close_enable)\r\n self.stack.currentChanged.connect(self.current_widget_changed)\r\n self.layout.addWidget(self.stack, 1, 0)\r\n\r\n # Buttons configuration\r\n btn_layout = QHBoxLayout()\r\n if is_record_array or is_masked_array or data.ndim == 3:\r\n if is_record_array:\r\n btn_layout.addWidget(QLabel(_(\"Record array fields:\")))\r\n names = []\r\n for name in data.dtype.names:\r\n field = data.dtype.fields[name]\r\n text = name\r\n if len(field) >= 3:\r\n title = field[2]\r\n if not is_text_string(title):\r\n title = repr(title)\r\n text += ' - '+title\r\n names.append(text)\r\n else:\r\n names = [_('Masked data'), _('Data'), _('Mask')]\r\n if data.ndim == 3:\r\n # QSpinBox\r\n self.index_spin = QSpinBox(self, keyboardTracking=False)\r\n self.index_spin.valueChanged.connect(self.change_active_widget)\r\n # QComboBox\r\n names = [str(i) for i in range(3)]\r\n ra_combo = QComboBox(self)\r\n ra_combo.addItems(names)\r\n ra_combo.currentIndexChanged.connect(self.current_dim_changed)\r\n # Adding the widgets to layout\r\n label = QLabel(_(\"Axis:\"))\r\n btn_layout.addWidget(label)\r\n btn_layout.addWidget(ra_combo)\r\n self.shape_label = QLabel()\r\n btn_layout.addWidget(self.shape_label)\r\n label = QLabel(_(\"Index:\"))\r\n btn_layout.addWidget(label)\r\n btn_layout.addWidget(self.index_spin)\r\n self.slicing_label = QLabel()\r\n btn_layout.addWidget(self.slicing_label)\r\n # set the widget to display when launched\r\n self.current_dim_changed(self.last_dim)\r\n else:\r\n ra_combo = QComboBox(self)\r\n ra_combo.currentIndexChanged.connect(self.stack.setCurrentIndex)\r\n ra_combo.addItems(names)\r\n btn_layout.addWidget(ra_combo)\r\n if is_masked_array:\r\n label = QLabel(_(\"<u>Warning</u>: changes are applied separately\"))\r\n label.setToolTip(_(\"For performance reasons, changes applied \"\\\r\n \"to masked array won't be reflected in \"\\\r\n \"array's data (and vice-versa).\"))\r\n btn_layout.addWidget(label)\r\n\r\n btn_layout.addStretch()\r\n\r\n if not readonly:\r\n self.btn_save_and_close = QPushButton(_('Save and Close'))\r\n self.btn_save_and_close.setDisabled(True)\r\n self.btn_save_and_close.clicked.connect(self.accept)\r\n btn_layout.addWidget(self.btn_save_and_close)\r\n\r\n self.btn_close = QPushButton(_('Close'))\r\n self.btn_close.setAutoDefault(True)\r\n self.btn_close.setDefault(True)\r\n self.btn_close.clicked.connect(self.reject)\r\n btn_layout.addWidget(self.btn_close)\r\n self.layout.addLayout(btn_layout, 2, 0)\r\n\r\n self.setMinimumSize(400, 300)\r\n\r\n # Make the dialog act as a window\r\n self.setWindowFlags(Qt.Window)\r\n\r\n return True\r\n\r\n @Slot(QModelIndex, QModelIndex)\r\n def save_and_close_enable(self, left_top, bottom_right):\r\n \"\"\"Handle the data change event to enable the save and close button.\"\"\"\r\n if self.btn_save_and_close:\r\n self.btn_save_and_close.setEnabled(True)\r\n self.btn_save_and_close.setAutoDefault(True)\r\n self.btn_save_and_close.setDefault(True)\r\n\r\n def current_widget_changed(self, index):\r\n self.arraywidget = self.stack.widget(index)\r\n self.arraywidget.model.dataChanged.connect(self.save_and_close_enable)\r\n\r\n def change_active_widget(self, index):\r\n \"\"\"\r\n This is implemented for handling negative values in index for\r\n 3d arrays, to give the same behavior as slicing\r\n \"\"\"\r\n string_index = [':']*3\r\n string_index[self.last_dim] = '<font color=red>%i</font>'\r\n self.slicing_label.setText((r\"Slicing: [\" + \", \".join(string_index) +\r\n \"]\") % index)\r\n if index < 0:\r\n data_index = self.data.shape[self.last_dim] + index\r\n else:\r\n data_index = index\r\n slice_index = [slice(None)]*3\r\n slice_index[self.last_dim] = data_index\r\n\r\n stack_index = self.dim_indexes[self.last_dim].get(data_index)\r\n if stack_index is None:\r\n stack_index = self.stack.count()\r\n try:\r\n self.stack.addWidget(ArrayEditorWidget(\r\n self, self.data[tuple(slice_index)]))\r\n except IndexError: # Handle arrays of size 0 in one axis\r\n self.stack.addWidget(ArrayEditorWidget(self, self.data))\r\n self.dim_indexes[self.last_dim][data_index] = stack_index\r\n self.stack.update()\r\n self.stack.setCurrentIndex(stack_index)\r\n\r\n def current_dim_changed(self, index):\r\n \"\"\"\r\n This change the active axis the array editor is plotting over\r\n in 3D\r\n \"\"\"\r\n self.last_dim = index\r\n string_size = ['%i']*3\r\n string_size[index] = '<font color=red>%i</font>'\r\n self.shape_label.setText(('Shape: (' + ', '.join(string_size) +\r\n ') ') % self.data.shape)\r\n if self.index_spin.value() != 0:\r\n self.index_spin.setValue(0)\r\n else:\r\n # this is done since if the value is currently 0 it does not emit\r\n # currentIndexChanged(int)\r\n self.change_active_widget(0)\r\n self.index_spin.setRange(-self.data.shape[index],\r\n self.data.shape[index]-1)\r\n\r\n @Slot()\r\n def accept(self):\r\n \"\"\"Reimplement Qt method\"\"\"\r\n for index in range(self.stack.count()):\r\n self.stack.widget(index).accept_changes()\r\n QDialog.accept(self)\r\n\r\n def get_value(self):\r\n \"\"\"Return modified array -- this is *not* a copy\"\"\"\r\n # It is important to avoid accessing Qt C++ object as it has probably\r\n # already been destroyed, due to the Qt.WA_DeleteOnClose attribute\r\n return self.data\r\n\r\n def error(self, message):\r\n \"\"\"An error occured, closing the dialog box\"\"\"\r\n QMessageBox.critical(self, _(\"Array editor\"), message)\r\n self.setAttribute(Qt.WA_DeleteOnClose)\r\n self.reject()\r\n\r\n @Slot()\r\n def reject(self):\r\n \"\"\"Reimplement Qt method\"\"\"\r\n if self.arraywidget is not None:\r\n for index in range(self.stack.count()):\r\n self.stack.widget(index).reject_changes()\r\n QDialog.reject(self)\r\n" ]
[ [ "numpy.array", "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SX-Aurora/mpi4py-ve
[ "aa6b1f97933196f8a485d5d808e89d5a29b58b1c" ]
[ "demo/osu_latency.py" ]
[ "# http://mvapich.cse.ohio-state.edu/benchmarks/\n\nfrom mpi4pyve import MPI\n\ndef osu_latency(\n BENCHMARH = \"MPI Latency Test\",\n skip = 1000,\n loop = 10000,\n skip_large = 10,\n loop_large = 100,\n large_message_size = 8192,\n MAX_MSG_SIZE = 1<<22,\n ):\n\n comm = MPI.COMM_WORLD\n myid = comm.Get_rank()\n numprocs = comm.Get_size()\n\n if numprocs != 2:\n if myid == 0:\n errmsg = \"This test requires exactly two processes\"\n else:\n errmsg = None\n raise SystemExit(errmsg)\n\n s_buf = allocate(MAX_MSG_SIZE)\n r_buf = allocate(MAX_MSG_SIZE)\n\n if myid == 0:\n print ('# %s' % (BENCHMARH,))\n if myid == 0:\n print ('# %-8s%20s' % (\"Size [B]\", \"Latency [us]\"))\n\n message_sizes = [0] + [2**i for i in range(30)]\n for size in message_sizes:\n if size > MAX_MSG_SIZE:\n break\n if size > large_message_size:\n skip = skip_large\n loop = loop_large\n iterations = list(range(loop+skip))\n s_msg = [s_buf, size, MPI.BYTE]\n r_msg = [r_buf, size, MPI.BYTE]\n #\n comm.Barrier()\n if myid == 0:\n for i in iterations:\n if i == skip:\n t_start = MPI.Wtime()\n comm.Send(s_msg, 1, 1)\n comm.Recv(r_msg, 1, 1)\n t_end = MPI.Wtime()\n elif myid == 1:\n for i in iterations:\n comm.Recv(r_msg, 0, 1)\n comm.Send(s_msg, 0, 1)\n #\n if myid == 0:\n latency = (t_end - t_start) * 1e6 / (2 * loop)\n print ('%-10d%20.2f' % (size, latency))\n\n\ndef allocate(n):\n try:\n import mmap\n return mmap.mmap(-1, n)\n except (ImportError, EnvironmentError):\n try:\n from numpy import zeros\n return zeros(n, 'B')\n except ImportError:\n from array import array\n return array('B', [0]) * n\n\n\nif __name__ == '__main__':\n osu_latency()\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TheoBuchwald/UCPH-KVM
[ "bf77ad06bc1d7077e3cbcd81854f0fbfcd7323f7" ]
[ "KurtGroup/Kurt/output_processing.py" ]
[ "\nimport subprocess\nimport numpy as np\nfrom typing import List\nfrom chemical_information import AtomicInformation\n\ndef Forward_search_last(file: str, text: str, error: str, quiet: bool = False) -> int:\n \"\"\"Searches from the beggining of the file given to the end where it returns the linenumber of the last occurence\n\n Args:\n file (str): The file to search in\n text (str): The text string to search for\n error (str): If no occurences were found it will print 'No [error] could be found in [file]\n err (bool, optional): Whether or not to print error message if no occurences are found. Defaults to True.\n\n Returns:\n (int): Linenumber of last occurence\n \"\"\"\n ps1 = subprocess.run(['grep', '-nT', text, file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = subprocess.run(['tail', '-n1'], input=ps1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n res = out.stdout\n if len(res) == 0:\n if not quiet:\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f'No {error} could be found in {file}\\n')\n return 'NaN'\n res = res.split()[0]\n res = str(res).split(':')\n return int(res[0].replace('b\\'','').replace('\\'','')) - 1\n\ndef Forward_search_after_last(file: str, text1: str, text2: str, lines: int, error: str, quiet: bool = False) -> int:\n \"\"\"Searches from beggining of file for last occurence of [text1] and in the following [lines] after for [text2]\n\n Args:\n file (str): File to search in\n text1 (str): From the last occurence of this this function will search\n text2 (str): This is what will be found in the lines following [text1]\n lines (int): How many lines after [text1] should the function search for [text2]\n error (str): If no occurences were found it will print 'No [error] could be found in [file]\n err (bool, optional): Whether or not to print error message if no occurences are found. Defaults to True.. Defaults to True.\n\n Returns:\n (int): Linenumber of [text2] occurence\n \"\"\"\n ps1 = subprocess.run(['grep', '-nTA', f'{lines}', text1, file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n ps2 = subprocess.run(['tail', '-n', f'{lines + 1}'], input=ps1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = subprocess.run(['grep', text2], input=ps2.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n res = out.stdout\n if len(res) == 0:\n if not quiet:\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f'No {error} could be found in {file}\\n')\n return 'NaN'\n res = res.split()[0]\n res = str(res).split('-')\n return int(res[0].replace('b\\'','').replace('\\'','')) - 1\n\ndef Backward_search_last(file: str, text: str, filelength: int, error: str, quiet: bool = False) -> int:\n \"\"\"Finds the last occurence of a text string in a file by searching from the end\n\n Args:\n file (str): File to search in\n text (str): Text string to look for\n filelength (int): The length of the file\n error (str): If no occurences were fount it will print 'No [error] could be found in [file]\n quiet (bool, optional): Whether or not to print error message. Defaults to False.\n\n Returns:\n (int): Linenumber of last occurence\n \"\"\"\n ps1 = subprocess.run(['tac', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = subprocess.run(['grep', '-nTm1', text], input=ps1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n res = out.stdout\n if len(res) == 0:\n if not quiet:\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f'No {error} could be found in {file}\\n')\n return 'NaN'\n res = res.split()[0]\n res = str(res).split(':')\n return filelength - int(res[0].replace('b\\'','').replace('\\'',''))\n\ndef Forward_search_first(file: str, text: str, error: str, quiet: bool = False) -> int:\n \"\"\"Searches from beginning of file and finds the first occurence of [text]\n\n Args:\n file (str): File to search in\n text (str): Text to look for\n error (str): If no occurences were found it will print 'No [error] could be found in [file]\n err (bool, optional): Whether or not to print error message if no occurences are found. Defaults to True.. Defaults to True.\n\n Returns:\n (int): Linenumber of first occurence\n \"\"\"\n out = subprocess.run(['grep', '-nTm1', text, file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n res = out.stdout\n if len(res) == 0:\n if not quiet:\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f'No {error} could be found in {file}\\n')\n return 'NaN'\n res = res.split()[0]\n res = str(res).split(':')\n return int(res[0].replace('b\\'','').replace('\\'','')) - 1\n\ndef Forward_search_all(file: str, text: str, error: str, quiet: bool = False) -> list:\n \"\"\"Searches from beggining of file to end of file finding all occurences of [text]\n\n Args:\n file (str): File to search in\n text (str): Text to look for\n error (str): If no occurences were found it will print 'No [error] could be found in [file]\n err (bool, optional): Whether or not to print error message if no occurences are found. Defaults to True.. Defaults to True.\n\n Returns:\n (list): List of the linenumbers of all occurences\n \"\"\"\n ps1 = subprocess.run(['grep', '-nT', text, file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = subprocess.run(['awk', '{print $1}'], input=ps1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n res = out.stdout\n if len(res) == 0:\n if not quiet:\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f'No {error} could be found in {file}\\n')\n return 'NaN'\n res = str(res).split('\\\\n')\n return [int(val.replace('b\\'','').replace('\\'','').replace(':','')) - 1 for val in res[:-1]]\n\ndef CheckForOnlyNans(array: list) -> bool:\n \"\"\"Function for checking if an array is fille only with the value 'NaN'\n\n Args:\n array (list): Array that should be looked through\n\n Returns:\n (bool): Returns a False/True based on whether or not the array the array only consists of 'NaN'\n \"\"\"\n for i in array:\n if i != 'NaN':\n return False\n return True\n\ndef WriteToFile(filename : str, lines : list) -> None:\n \"\"\" Function for writing out to a file\n\n Args:\n lines (list) : List of lines that should be written in the file\n \"\"\"\n with open(filename,'w') as wrt:\n wrt.writelines(lines)\n\ndef GenerateXYZ(lines : list, filename : str , start : int, end : int, lab_loc : int, transform : bool = False) -> None:\n \"\"\" Function for generating and writing out XYZ file from imput\n\n Args:\n lines (list): Lines in an input file\n filename (str): Filename for geometry file\n start, end (int): Starting and ending linenumber of the final geometry in the file\n lab_loc (int): Location of label in line\n transform (bool): Transforms atomic number into label, if needed\n \"\"\"\n lines_to_add = []\n lines_to_add.append(str(end-(start))+ '\\n')\n lines_to_add.append('\\n')\n for line in lines[start:end]:\n words = line.split()\n if transform:\n atm = AtomicInformation(int(words[lab_loc]))\n lines_to_add.append(''.join([atm.atom.ljust(2),' ',words[-3].rjust(10),' ', words[-2].rjust(15), ' ',words[-1].rjust(15) ,'\\n']))\n else:\n lines_to_add.append(''.join([words[lab_loc].ljust(2),' ',words[-3].rjust(10),' ', words[-2].rjust(15), ' ',words[-1].rjust(15) ,'\\n']))\n WriteToFile(filename,lines_to_add)\n\n\nclass OutputType:\n def __init__(self, filename: str, *, Quiet: bool = False, Temperature: float = 298.15):\n self.filename = filename\n\n with open(self.filename,'r') as read:\n lines = read.readlines()[:100]\n\n AMS = False\n for line in lines:\n if \"Amsterdam Modeling Suite (AMS)\" in line:\n AMS = True\n\n # The output file is determined to be of one of the following types\n\n # File type = ORCA\n if '* O R C A *' in lines[4]:\n self.extract = OrcaExtract(self.filename, Quiet=Quiet, Temperature=Temperature)\n self.input = 'ORCA'\n\n # File type = DALTON\n elif '*************** Dalton - An Electronic Structure Program ***************' in lines[3]:\n self.extract = DaltonExtract(self.filename, Quiet=Quiet, Temperature=Temperature)\n self.input = 'DALTON'\n\n # File type = GAUSSIAN\n elif 'Gaussian, Inc. All Rights Reserved.' in lines[6]:\n self.extract = GaussianExtract(self.filename, Quiet=Quiet, Temperature=Temperature)\n self.input = 'GAUSSIAN'\n\n # File type = LSDALTON\n elif '********** LSDalton - An electronic structure program **********' in lines[2]:\n self.extract = LSDaltonExtract(self.filename, Quiet=Quiet, Temperature=Temperature)\n self.input = 'LSDALTON'\n\n # File type = VELOXCHEM\n elif 'VELOXCHEM' in lines[2]:\n self.extract = VeloxExtract(self.filename, Quiet=Quiet, Temperature=Temperature)\n self.input = 'VELOXCHEM'\n\n\n\n # File type = AMS\n elif AMS:\n self.extract = AMSExtract(self.filename, Quiet=Quiet, Temperature=Temperature)\n self.input = 'Amsterdam Modeling Suite'\n\n\n # File type not implemented\n else:\n if not Quiet:\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f\"The output file {self.filename} is not of a known format\\n\")\n\n del lines\n\n def getEnergy(self) -> float:\n try:\n return self.extract.tot_energy\n except AttributeError:\n try:\n self.extract._Energy()\n return self.extract.tot_energy\n except AttributeError: ...\n\n def getZeroPointVibrationalEnergy(self) -> float:\n try:\n return self.extract.zpv\n except AttributeError:\n try:\n self.extract._ZPV()\n return self.extract.zpv\n except AttributeError: ...\n\n def getEnthalpy(self) -> float:\n try:\n return self.extract.enthalpy\n except AttributeError:\n try:\n self.extract.tot_energy\n except AttributeError:\n try:\n self.extract._Energy()\n except AttributeError: return\n try:\n self.extract.freq\n except AttributeError:\n try:\n self.extract._Frequencies()\n except AttributeError: return\n try:\n self.extract._Enthalpy()\n return self.extract.enthalpy\n except AttributeError: ...\n\n def getEntropy(self) -> float:\n try:\n return self.entropy\n except AttributeError:\n try:\n self.extract.freq\n except AttributeError:\n try:\n self.extract._Frequencies()\n except AttributeError: return\n try:\n self.extract._Entropy()\n return self.extract.entropy\n except AttributeError: ...\n\n def getGibbsFreeEnergy(self) -> float:\n try:\n return self.extract.gibbs\n except AttributeError:\n try:\n self.extract.tot_energy\n except AttributeError:\n try:\n self.extract._Energy()\n except AttributeError: return\n try:\n self.extract.freq\n except AttributeError:\n try:\n self.extract._Frequencies()\n except AttributeError: return\n try:\n self.extract.enthalpy\n except AttributeError:\n try:\n self.extract._Enthalpy()\n except AttributeError: return\n try:\n self.extract.entropy\n except AttributeError:\n try:\n self.extract._Entropy()\n except AttributeError: return\n try:\n self.extract._Gibbs()\n return self.extract.gibbs\n except AttributeError: ...\n\n def getDipoleMoment(self) -> List[float]:\n try:\n return [self.extract.dipolex, self.extract.dipoley, self.extract.dipolez, self.extract.total_dipole]\n except AttributeError:\n try:\n self.extract._Dipole_moments()\n return [self.extract.dipolex, self.extract.dipoley, self.extract.dipolez, self.extract.total_dipole]\n except AttributeError: ...\n\n def getPolarizability(self) -> List[float]:\n try:\n return [self.extract.polx, self.extract.poly, self.extract.polz, self.extract.iso_polar]\n except AttributeError:\n try:\n self.extract._Polarizabilities()\n return [self.extract.polx, self.extract.poly, self.extract.polz, self.extract.iso_polar]\n except AttributeError: ...\n\n def getExcitationEnergies(self) -> List[float]:\n try:\n return self.extract.exc_energies\n except AttributeError:\n try:\n self.extract._Excitation_energies()\n return self.extract.exc_energies\n except AttributeError: ...\n\n def getOscillatorStrengths(self) -> List[float]:\n try:\n return self.extract.osc_strengths\n except AttributeError:\n try:\n self.extract.exc_energies\n except AttributeError:\n try:\n self.extract._Excitation_energies()\n except AttributeError: return\n try:\n self.extract._Oscillator_strengths()\n return self.extract.osc_strengths\n except AttributeError: ...\n\n def getFrequencies(self) -> List[float]:\n try:\n return self.extract.freq\n except AttributeError:\n try:\n self.extract._Frequencies()\n return self.extract.freq\n except AttributeError: ...\n\n def getPartitionFunction(self) -> float:\n try:\n return self.extract.qTotal\n except AttributeError:\n try:\n self.extract.freq\n except AttributeError:\n try:\n self.extract._Frequencies()\n except AttributeError: return\n try:\n self.extract._PartitionFunctions()\n return self.extract.qTotal\n except AttributeError: ...\n\n def getCPUTime(self) -> List[float]:\n try:\n return [self.extract.total_cpu_time, self.extract.wall_cpu_time]\n except AttributeError:\n try:\n self.extract._CPUS()\n return [self.extract.total_cpu_time, self.extract.wall_cpu_time]\n except AttributeError: ...\n\n def getOptimizedGeometry(self) -> None:\n self.extract._Optimized_Geometry()\n\n\nclass Constants:\n def __init__(self) -> None:\n self.ev_to_au = 0.036749405469679\n self.inv_cm_to_au = 1/219474.63068\n self.trans_const_fac = 1.5625517342018425307E+22 #Molar value assuming 1 bar standard pressure\n self.rot_lin_const = 20.83661793 #Assuming rigid, linear rotor and T>>Rotational temperature and rotational constant in GHz\n self.rot_poly_const = 168.5837766 #Assuming rigid, polyatomic rotor and T>>Rotational temperature and rotational constant in GHz\n self.vib_const = 3.157750419E+05 #Assuming harmonic oscillator and frequency in au\n self.gas_constant = 8.31446261815324E-03 # In kJ/(mol*K)\n self.s_trans_const = 0.3160965065 #Assuming 1 bar standard pressure and molar\n self.au_to_kJmol = 2625.4996394799\n self.bohr_to_ao = 0.529177249\n self.debye_to_au = 0.393456\n\n\nclass VeloxExtract:\n def __init__(self, filename: str, *, Quiet: bool = False, Temperature: float = 298.15) -> None:\n self.filename = filename\n self.quiet = Quiet\n self.T = Temperature\n self.constants = Constants()\n\n self.ReadFile()\n\n self.end = len(self.lines)\n\n def ReadFile(self) -> None:\n with open(self.filename, \"r\") as file:\n self.lines = file.readlines()\n\n def _Energy(self) -> None:\n linenumber = Forward_search_last(self.filename, 'Total Energy', 'final energy', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.tot_energy = float(self.lines[linenumber].split()[-2])\n return\n self.tot_energy = 'NaN'\n\n def _Dipole_moments(self) -> None:\n linenumber = Forward_search_last(self.filename, 'Ground-State Dipole Moment', 'dipole moment', quiet=self.quiet)\n if isinstance(linenumber, int):\n linenumber += 3\n self.dipolex, self.dipoley, self.dipolez, self.total_dipole = float(self.lines[linenumber].split()[-4]), float(self.lines[linenumber+1].split()[-4]), float(self.lines[linenumber+2].split()[-4]), float(self.lines[linenumber+3].split()[-4])\n return\n self.dipolex, self.dipoley, self.dipolez, self.total_dipole = 'NaN'\n\n #NB! Only static polarizability considered\n def _Polarizabilities(self) -> None:\n linenumber = Forward_search_last(self.filename, 'Polarizability (w=0.0000)', 'polarizability', quiet=self.quiet)\n if isinstance(linenumber, int):\n #Need to perform diagonalization\n PolarizabilityTensor = np.array([[float(self.lines[linenumber+3].split()[1]), float(self.lines[linenumber+3].split()[2]), float(self.lines[linenumber+3].split()[3])],\n [float(self.lines[linenumber+4].split()[1]), float(self.lines[linenumber+4].split()[2]), float(self.lines[linenumber+4].split()[3])],\n [float(self.lines[linenumber+5].split()[1]), float(self.lines[linenumber+5].split()[2]), float(self.lines[linenumber+5].split()[3])]])\n PolarizabilityEigenvalues = np.linalg.eigh(PolarizabilityTensor)[0]\n self.polx, self.poly, self.polz = PolarizabilityEigenvalues\n self.iso_polar = PolarizabilityEigenvalues.mean()\n return\n self.polx = self.poly = self.polz = self.iso_polar = 'NaN'\n\n def _Optimized_Geometry(self) -> None:\n start = Forward_search_last(self.filename, 'Molecular Geometry', 'geometry', quiet=self.quiet)\n if start != \"NaN\":# and end != \"NaN\":\n start += 5\n for i, line in enumerate(self.lines[start:]):\n if len(line.strip()) == 0:\n end = start + i\n break\n #Offset for going into actual coordinate list\n #Which position in the line is the atom label / number at\n label_location = 0\n OptGeomFilename = self.filename[:-4] + \"_opt.xyz\"\n GenerateXYZ(self.lines, OptGeomFilename, start, end, label_location)\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(\"Final geometry has been saved to \" + OptGeomFilename + \"\\n\")\n\n\nclass AMSExtract:\n def __init__(self, filename: str, *, Quiet: bool = False, Temperature: float = 298.15) -> None:\n self.filename = filename\n self.quiet = Quiet\n self.T = Temperature\n self.constants = Constants()\n self.ReadFile()\n\n self.end = len(self.lines)\n\n def ReadFile(self) -> None:\n with open(self.filename, \"r\") as file:\n self.lines = file.readlines()\n\n def _Energy(self) -> None:\n linenumber = Forward_search_last(self.filename, \"Energy (hartree)\", \"final energy\", quiet=self.quiet)\n if isinstance(linenumber, int):\n self.tot_energy = float(self.lines[linenumber].split()[-1])\n return\n self.tot_energy = 'NaN'\n\n def _Dipole_moments(self) -> None:\n linenumber = Forward_search_last(self.filename, 'Dipole Moment', 'dipole moment', quiet=self.quiet)\n if isinstance(linenumber, int):\n linenumber += 3\n self.dipolex, self.dipoley, self.dipolez, self.total_dipole = float(self.lines[linenumber].split()[-3])*self.constants.debye_to_au, float(self.lines[linenumber].split()[-2])*self.constants.debye_to_au, float(self.lines[linenumber].split()[-1])*self.constants.debye_to_au, float(self.lines[linenumber+1].split()[-1])*self.constants.debye_to_au\n return\n self.dipolex, self.dipoley, self.dipolez, self.total_dipole = 'NaN'\n\n def _Optimized_Geometry(self) -> None:\n start = Forward_search_last(self.filename, 'Formula:', 'geometry', quiet=self.quiet)\n if start != \"NaN\":\n start += 3\n #Offset for going into actual coordinate list\n for i, line in enumerate(self.lines[start:]):\n if len(line.strip()) == 0:\n end = start + i\n break\n #Which position in the line is the atom label / number at\n label_location = 1\n OptGeomFilename = self.filename[:-4] + \"_opt.xyz\"\n GenerateXYZ(self.lines, OptGeomFilename, start, end, label_location)\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(\"Final geometry has been saved to \" + OptGeomFilename + \"\\n\")\n\n\nclass GaussianExtract:\n def __init__(self, filename: str, *, Quiet: bool = False, Temperature: float = 298.15) -> None:\n self.filename = filename\n self.quiet = Quiet\n self.T = Temperature\n self.constants = Constants()\n\n self.ReadFile()\n\n self.end = len(self.lines)\n\n def ReadFile(self) -> None:\n with open(self.filename, \"r\") as file:\n self.lines = file.readlines()\n\n def _CPUS(self) -> None:\n linenumber = Backward_search_last(self.filename, 'Job cpu time:', self.end, 'CPU time', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.total_cpu_time = float(self.lines[linenumber].split()[3])*24*60 + float(self.lines[linenumber].split()[5])*60 + float(self.lines[linenumber].split()[7]) + float(self.lines[linenumber].split()[9])/60\n self.wall_cpu_time = float(self.lines[linenumber+1].split()[2])*24*60 + float(self.lines[linenumber+1].split()[4])*60 + float(self.lines[linenumber+1].split()[6]) + float(self.lines[linenumber+1].split()[8])/60\n return\n self.total_cpu_time = 'NaN'\n self.wall_cpu_time = 'NaN'\n\n def _Energy(self) -> None:\n linenumber = Forward_search_last(self.filename, 'Sum of electronic and zero-point Energies=', 'final energy', quiet=True)\n if isinstance(linenumber, int):\n self.tot_energy = float(self.lines[linenumber].split()[-1]) - float(self.lines[linenumber-4].split()[-2])\n return\n linenumber = Forward_search_last(self.filename, 'SCF Done:', 'final energy', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.tot_energy = float(self.lines[linenumber].split()[4])\n return\n self.tot_energy = 'NaN'\n\n def _ZPV(self) -> None:\n linenumber = Forward_search_last(self.filename, 'Sum of electronic and zero-point Energies=', 'ZPV energy', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.zpv = float(self.lines[linenumber].split()[-1])\n return\n self.zpv = 'NaN'\n\n def _Dipole_moments(self) -> None:\n linenumber = Forward_search_last(self.filename, 'Electric dipole moment (input orientation):', 'dipole moments', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.dipolex, self.dipoley, self.dipolez, self.total_dipole = float(self.lines[linenumber+4].split()[1].replace('D','E')), float(self.lines[linenumber+5].split()[1].replace('D','E')), float(self.lines[linenumber+6].split()[1].replace('D','E')), float(self.lines[linenumber+3].split()[1].replace('D','E'))\n return\n self.dipolex = self.dipoley = self.dipolez = self.total_dipole = 'NaN'\n\n def _Polarizabilities(self) -> None:\n linenumber = ['NaN', 'NaN', 'NaN', 'NaN']\n searchwords = [' xx ', ' yy ', ' zz ', ' iso ']\n for i in range(len(searchwords)):\n linenumber[i] = Forward_search_after_last(self.filename, 'Dipole polarizability, Alpha (input orientation).', searchwords[i], 15, 'polarizabilities', quiet=self.quiet)\n if linenumber != ['NaN', 'NaN', 'NaN', 'NaN']:\n self.polx, self.poly, self.polz, self.iso_polar = float(self.lines[linenumber[0]].split()[1].replace('D','E')), float(self.lines[linenumber[1]].split()[1].replace('D','E')), float(self.lines[linenumber[2]].split()[1].replace('D','E')), float(self.lines[linenumber[3]].split()[1].replace('D','E'))\n return\n self.polx = self.poly = self.polz = self.iso_polar = 'NaN'\n\n def _Frequencies(self) -> None:\n self.freq = []\n linenumbers = Forward_search_all(self.filename, 'Frequencies --', 'frequencies', quiet=self.quiet)\n if isinstance(linenumbers, list):\n for i in linenumbers:\n for j in self.lines[i].split()[2:]:\n self.freq.append(float(j)* self.constants.inv_cm_to_au)\n if len(self.freq) == 0:\n self.freq = ['NaN']\n\n def _Excitation_energies(self) -> None:\n self.exc_energies = []\n linenumber = Forward_search_last(self.filename, 'Excitation energies and oscillator strengths:', 'excitation energies', quiet=True)\n if isinstance(linenumber, int):\n linenumbers = Forward_search_all(self.filename, 'Excited State', 'excitation energies', quiet=self.quiet)\n linenumbers = [i for i in linenumbers if i > linenumber]\n for i in linenumbers:\n self.exc_energies.append(float(self.lines[i].split()[4])* self.constants.ev_to_au)\n if len(self.exc_energies) == 0:\n self.exc_energies = ['NaN']\n\n def _Oscillator_strengths(self) -> None:\n self.osc_strengths = []\n linenumber = Forward_search_last(self.filename, 'Excitation energies and oscillator strengths:', 'oscillator strengths', quiet=True)\n if isinstance(linenumber, int):\n linenumbers = Forward_search_all(self.filename, 'Excited State', 'oscillator strengths', quiet=self.quiet)\n linenumbers = [i for i in linenumbers if i > linenumber]\n for i in linenumbers:\n for j in self.lines[i].split():\n if 'f=' in j:\n self.osc_strengths.append(float(j.replace('f=','')))\n if len(self.osc_strengths) == 0:\n self.osc_strengths = ['NaN']\n\n def _RotationalConsts(self) -> None:\n self.rots = []\n linenumbers = Forward_search_all(self.filename, 'Rotational constants (GHZ):', 'rotational constants', quiet=self.quiet)\n for i in self.lines[linenumbers[-1]].split()[3:]:\n self.rots.append(float(i))\n self.rots = np.array(self.rots)\n self.rots = self.rots[self.rots != 0.0]\n\n def _Mass(self) -> None:\n self.mass = 0.0\n linenumber = Forward_search_last(self.filename, 'Molecular mass', 'molecular mass', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.mass = float(self.lines[linenumber].split()[2])\n\n def _SymmetryNumber(self):\n self.symnum = 0\n linenumber = Forward_search_last(self.filename, 'Rotational symmetry number', 'rotational symmetry number', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.symnum = int(self.lines[linenumber].split()[-1].replace('.',''))\n\n def _Multiplicity(self) -> None:\n self.multi = 0\n linenumber = Forward_search_first(self.filename, 'Multiplicity', 'multiplicity', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.multi = int(self.lines[linenumber].split()[-1])\n\n def _PartitionFunctions(self) -> None:\n if CheckForOnlyNans(np.array(self.freq)):\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f\"No frequencies found in {self.filename}, skipping partition function calculation\\n\")\n self.qTotal = 'NaN'\n return\n self._RotationalConsts()\n self._Mass()\n self._SymmetryNumber()\n self._Multiplicity()\n self.qT = self.constants.trans_const_fac * self.mass ** (1.5) * self.T ** (2.5)\n if len(self.rots) == 1:\n self.qR = self.constants.rot_lin_const * self.T / (self.symnum * self.rots[0])\n else:\n self.qR = self.constants.rot_poly_const * self.T ** (1.5) / ( self.symnum * np.prod(np.array(self.rots)) ** (0.5))\n realfreq = np.array([x for x in self.freq if x != 'NaN'])\n realfreq = realfreq[realfreq > 0.0]\n self.qV = np.prod(1 / (1 - np.exp( - self.constants.vib_const * realfreq / self.T)))\n self.qE = self.multi #Good approximation for most closed-shell molecules\n self.qTotal = self.qT*self.qR*self.qV*self.qE\n\n def _Enthalpy(self) -> None:\n if CheckForOnlyNans(np.array(self.freq)):\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f\"No frequencies found in {self.filename}, skipping partition function calculation\\n\")\n self.enthalpy = 'NaN'\n self._RotationalConsts()\n self.E_T = 3/2 * self.T * self.constants.gas_constant\n if len(self.rots) == 1:\n self.E_R = self.T * self.constants.gas_constant\n else:\n self.E_R = 3/2 * self.T * self.constants.gas_constant\n realfreq = np.array([x for x in self.freq if x != 'NaN'])\n realfreq = realfreq[realfreq > 0.0]\n self.E_V = self.constants.gas_constant * np.sum(self.constants.vib_const * realfreq * (1/2 + 1 / (np.exp(self.constants.vib_const * realfreq / self.T ) - 1)))\n self.E_e = 0 #Good approximation for most closed-shell molecules\n self.enthalpy = (self.E_T+self.E_R+self.E_V+self.constants.gas_constant * self.T ) / self.constants.au_to_kJmol + self.tot_energy\n\n def _Entropy(self) -> None:\n if CheckForOnlyNans(np.array(self.freq)):\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f\"No frequencies found in {self.filename}, skipping partition function calculation\\n\")\n self.entropy = 'NaN'\n return\n self._RotationalConsts()\n self._Mass()\n self._SymmetryNumber()\n self._Multiplicity()\n self.S_T = self.constants.gas_constant * np.log(self.constants.s_trans_const * self.mass ** 1.5 * self.T ** 2.5)\n if len(self.rots) == 1:\n self.S_R = self.constants.gas_constant * np.log(self.constants.rot_lin_const * self.T / (self.symnum * self.rots[0]))\n else:\n self.S_R = self.constants.gas_constant * (3/2 + np.log(self.constants.rot_poly_const * self.T ** (1.5) / ( self.symnum * np.prod(np.array(self.rots)) ** (0.5))))\n realfreq = np.array([x for x in self.freq if x != 'NaN'])\n realfreq = realfreq[realfreq > 0.0]\n self.S_V = self.constants.gas_constant * np.sum(self.constants.vib_const * realfreq / self.T / (np.exp(self.constants.vib_const * realfreq / self.T ) - 1) - np.log(1-np.exp(-self.constants.vib_const * realfreq / self.T )))\n self.S_E = self.constants.gas_constant * np.log(self.multi) #Good approximation for most closed-shell molecules\n self.entropy = self.S_T+self.S_R+self.S_V+self.S_E\n\n def _Gibbs(self) -> None:\n if CheckForOnlyNans(np.array(self.freq)):\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f\"No frequencies found in {self.filename}, skipping free energy energy calculation\\n\")\n self.gibbs = 'NaN'\n return\n self.gibbs = self.enthalpy - self.T*self.entropy / self.constants.au_to_kJmol\n\n def _Optimized_Geometry(self) -> None:\n start = Forward_search_last(self.filename, 'Standard orientation', 'geometry', quiet=self.quiet)\n # end = Forward_search_after_last(self.filename, 'Standard orientation', 'Rotational constants', 200, \"end of geometry\", quiet=self.quiet)\n if start != \"NaN\":# and end != \"NaN\":\n #Offset for going into actual coordinate list\n start += 5\n for i, line in enumerate(self.lines[start:]):\n if '---------------------------------------------------------------------' in line:\n end = start + i\n break\n #Which position in the line is the atom label / number at\n label_location = 1\n OptGeomFilename = self.filename[:-4] + \"_opt.xyz\"\n GenerateXYZ(self.lines, OptGeomFilename, start, end, label_location, transform = True)\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(\"Final geometry has been saved to \" + OptGeomFilename + \"\\n\")\n\n\nclass OrcaExtract:\n def __init__(self, filename: str, *, Quiet: bool = False, Temperature: float = 298.15) -> None:\n self.filename = filename\n self.quiet = Quiet\n self.T = Temperature\n self.constants = Constants()\n\n self.ReadFile()\n\n self.end = len(self.lines)\n\n def ReadFile(self) -> None:\n with open(self.filename, \"r\") as file:\n self.lines = file.readlines()\n\n def _CPUS(self) -> None:\n linenumber = Backward_search_last(self.filename, 'Sum of individual times ...', self.end, 'CPU time', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.wall_cpu_time = float(self.lines[linenumber].split()[-2])\n linenumber2 = Forward_search_last(self.filename, '%pal nprocs', 'CPU count', quiet=True)\n if isinstance(linenumber2, int):\n self.total_cpu_time = self.wall_cpu_time * int(self.lines[linenumber2].split()[-1])\n linenumber3 = Forward_search_last(self.filename, 'PAL', 'CPU count', quiet=self.quiet)\n if isinstance(linenumber3, int):\n self.total_cpu_time = self.wall_cpu_time * int(self.lines[linenumber3].split()[-1][3:])\n return\n self.total_cpu_time = 'NaN'\n self.wall_cpu_time = 'NaN'\n\n def _Energy(self) -> None:\n linenumber = Forward_search_last(self.filename, 'Electronic energy', 'Final energy', quiet=True)\n if isinstance(linenumber, int):\n self.tot_energy = float(self.lines[linenumber].split()[-2])\n return\n linenumber = Forward_search_last(self.filename, 'FINAL SINGLE POINT ENERGY', 'Final energy', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.tot_energy = float(self.lines[linenumber].split()[-1])\n return\n self.tot_energy = 'NaN'\n\n def _ZPV(self) -> None:\n linenumber = Forward_search_last(self.filename, 'Electronic energy', 'ZPV energy', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.zpv = float(self.lines[linenumber].split()[-2]) + float(self.lines[linenumber+1].split()[-4])\n return\n self.zpv = 'NaN'\n\n def _Enthalpy(self) -> None:\n if CheckForOnlyNans(np.array(self.freq)):\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f\"No frequencies found in {self.filename}, skipping partition function calculation\\n\")\n self.enthalpy = 'NaN'\n return\n self._RotationalConsts()\n self.E_T = 3/2 * self.T * self.constants.gas_constant\n if len(self.rots) == 1:\n self.E_R = self.T * self.constants.gas_constant\n else:\n self.E_R = 3/2 * self.T * self.constants.gas_constant\n realfreq = np.array([x for x in self.freq if x != 'NaN'])\n realfreq = realfreq[realfreq > 0.0]\n self.E_V = self.constants.gas_constant * np.sum(self.constants.vib_const * realfreq * (1/2 + 1 / (np.exp(self.constants.vib_const * realfreq / self.T ) - 1)))\n self.E_e = 0 #Good approximation for most closed-shell molecules\n self.enthalpy = (self.E_T+self.E_R+self.E_V+self.constants.gas_constant * self.T ) / self.constants.au_to_kJmol + self.tot_energy\n\n def _Gibbs(self) -> None:\n if CheckForOnlyNans(np.array(self.freq)):\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f\"No frequencies found in {self.filename}, skipping free energy energy calculation\\n\")\n self.gibbs = 'NaN'\n return\n self.gibbs = self.enthalpy - self.T*self.entropy / self.constants.au_to_kJmol\n\n def _Dipole_moments(self) -> None:\n linenumber = Forward_search_last(self.filename, 'Total Dipole Moment', 'dipole moment', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.dipolex, self.dipoley, self.dipolez, self.total_dipole = float(self.lines[linenumber].split()[-3]), float(self.lines[linenumber].split()[-2]), float(self.lines[linenumber].split()[-1]), float(self.lines[linenumber+2].split()[-1])\n return\n self.dipolex, self.dipoley, self.dipolez, self.total_dipole = 'NaN'\n\n def _Polarizabilities(self) -> None:\n linenumber = Forward_search_after_last(self.filename, 'THE POLARIZABILITY TENSOR', \"'diagonalized tensor:'\", 10, 'polarizability', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.polx, self.poly, self.polz, self.iso_polar = float(self.lines[linenumber+1].split()[0]), float(self.lines[linenumber+1].split()[1]), float(self.lines[linenumber+1].split()[2]), float(self.lines[linenumber+7].split()[-1])\n return\n self.polx = self.poly = self.polz = self.iso_polar = 'NaN'\n\n def _Excitation_energies(self) -> None:\n self.exc_energies = []\n linenumbers = Forward_search_all(self.filename, 'STATE ', 'excitation energies', quiet=self.quiet)\n if isinstance(linenumbers, list):\n for i in linenumbers:\n self.exc_energies.append(float(self.lines[i].split()[3]))\n if len(self.exc_energies) == 0:\n self.exc_energies = ['NaN']\n\n def _Oscillator_strengths(self) -> None:\n self.osc_strengths = []\n linenumber = Forward_search_last(self.filename, 'ABSORPTION SPECTRUM VIA TRANSITION ELECTRIC DIPOLE MOMENTS', 'oscillator strengths', quiet=self.quiet)\n if isinstance(linenumber, int):\n for i in range(len(self.exc_energies)):\n if len(self.lines[linenumber+5+i].split()) > 6:\n self.osc_strengths.append(float(self.lines[linenumber+5+i].split()[3]))\n else:\n self.osc_strengths.append('NaN')\n if len(self.osc_strengths) == 0:\n self.osc_strengths = ['NaN']\n\n def _Frequencies(self) -> None:\n self.freq = []\n linenumber = Forward_search_last(self.filename, \"VIBRATIONAL FREQUENCIES\", 'frequencies', quiet=self.quiet)\n if isinstance(linenumber, int):\n for j in self.lines[linenumber+7: self.end]:\n if \": \" and \" 0.00 \" in j:\n pass\n elif \": \" in j and not \" 0.00 \" in j:\n self.freq.append(float(j.split()[1])* self.constants.inv_cm_to_au)\n else:\n break\n if len(self.freq) == 0:\n self.freq = ['NaN']\n\n def _RotationalConsts(self) -> None:\n self.rots = []\n linenumbers = Forward_search_first(self.filename, 'Rotational constants in MHz', 'rotational constants', quiet=self.quiet)\n for i in self.lines[linenumbers].split()[-3:]:\n self.rots.append(float(i))\n self.rots = np.array(self.rots) * 1E-3\n self.rots = self.rots[self.rots != 0.0]\n\n def _Mass(self) -> None:\n self.mass = 0.0\n linenumber = Forward_search_last(self.filename, 'Total Mass', 'molecular mass', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.mass = float(self.lines[linenumber].split()[-2])\n\n def _SymmetryNumber(self) -> None:\n self.symnum = 0\n linenumber = Forward_search_last(self.filename, 'Symmetry Number', 'rotational symmetry number', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.symnum = int(self.lines[linenumber].split()[-1])\n\n def _Multiplicity(self) -> None:\n self.multi = 0\n linenumber = Forward_search_first(self.filename, 'Multiplicity', 'multiplicity', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.multi = int(self.lines[linenumber].split()[-1])\n\n def _PartitionFunctions(self) -> None:\n if CheckForOnlyNans(np.array(self.freq)):\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f\"No frequencies found in {self.filename}, skipping partition function calculation\\n\")\n self.qTotal = 'NaN'\n return\n self._RotationalConsts()\n self._Mass()\n self._Multiplicity()\n self._SymmetryNumber()\n self.qT = self.constants.trans_const_fac * self.mass ** (1.5) * self.T ** (2.5)\n if len(self.rots) == 1:\n self.qR = self.constants.rot_lin_const * self.T / (self.symnum * self.rots[0])\n else:\n self.qR = self.constants.rot_poly_const * self.T ** (1.5) / ( self.symnum * np.prod(np.array(self.rots)) ** (0.5))\n realfreq = np.array([x for x in self.freq if x != 'NaN'])\n realfreq = realfreq[realfreq > 0.0]\n self.qV = np.prod(1 / (1 - np.exp( - self.constants.rot_poly_const * realfreq / self.T )))\n self.qE = self.multi #Good approximation for most closed-shell molecules\n self.qTotal = self.qT*self.qR*self.qV*self.qE\n\n def _Entropy(self) -> None:\n if CheckForOnlyNans(np.array(self.freq)):\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f\"No frequencies found in {self.filename}, skipping partition function calculation\\n\")\n self.entropy = 'NaN'\n return\n self._RotationalConsts()\n self._Mass()\n self._Multiplicity()\n self._SymmetryNumber()\n self.S_T = self.constants.gas_constant * np.log(self.constants.s_trans_const * self.mass ** 1.5 * self.T ** 2.5)\n if len(self.rots) == 1:\n self.S_R = self.constants.gas_constant * np.log(self.constants.rot_lin_const * self.T / (self.symnum * self.rots[0]))\n else:\n self.S_R = self.constants.gas_constant * (3/2 + np.log(self.constants.rot_poly_const * self.T ** (1.5) / ( self.symnum * np.prod(np.array(self.rots)) ** (0.5))))\n realfreq = np.array([x for x in self.freq if x != 'NaN'])\n realfreq = realfreq[realfreq > 0.0]\n self.S_V = self.constants.gas_constant * np.sum(self.constants.vib_const * realfreq / self.T / (np.exp(self.constants.vib_const * realfreq / self.T ) - 1) - np.log(1-np.exp(-self.constants.vib_const * realfreq / self.T )))\n self.S_E = self.constants.gas_constant * np.log(self.multi) #Good approximation for most closed-shell molecules\n self.entropy = self.S_T+self.S_R+self.S_V+self.S_E\n\n def _Optimized_Geometry(self) -> None:\n start = Forward_search_last(self.filename, 'CARTESIAN COORDINATES (ANGSTROEM)', 'geometry', quiet=self.quiet)\n if start != \"NaN\":\n #Offset for going into actual coordinate list\n start += 2\n for i, line in enumerate(self.lines[start:]):\n if'----------------------------' in line:\n end = start + i - 1\n break\n #Which position in the line is the atom label / number at\n label_location = 0\n OptGeomFilename = self.filename[:-4] + \"_opt.xyz\"\n GenerateXYZ(self.lines, OptGeomFilename, start, end, label_location)\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(\"Final geometry has been saved to \" + OptGeomFilename + \"\\n\")\n\n\nclass DaltonExtract:\n def __init__(self, filename: str, NeededArguments: dict = None, Quiet: bool = False, Temperature: float = 298.15) -> None:\n self.filename = filename\n self.NeededArguments = NeededArguments\n self.quiet = Quiet\n self.T = Temperature\n self.constants = Constants()\n\n self.ReadFile()\n\n self.end = len(self.lines)\n\n def ReadFile(self) -> None:\n with open(self.filename, \"r\") as file:\n self.lines = file.readlines()\n\n def _Complex_propagator(self) -> None:\n linenumbers = Forward_search_all(self.filename, 'Averaged value', 'polarizability with damping', quiet=self.quiet)\n if isinstance(linenumbers, list):\n self.complex_propagator = []\n for i in linenumbers:\n self.complex_propagator.append([float(self.lines[i].split()[-3]), float(self.lines[i].split()[-2]), float(self.lines[i].split()[-1])])\n return\n self.complex_propagator = 'NaN'\n\n def _CPUS(self) -> None:\n linenumber = Backward_search_last(self.filename, 'Total CPU time used in DALTON:', self.end, 'CPU time', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.total_cpu_time = 0.\n self.wall_cpu_time = 0.\n total_time = self.lines[linenumber].split()[6:]\n pr_time = self.lines[linenumber+1].split()[6:]\n for i, time_value in enumerate(total_time[-2::-2]):\n if i*2 == 0:\n self.total_cpu_time += float(time_value) / 60\n elif i*2 == 2:\n self.total_cpu_time += float(time_value)\n elif i*2 == 4:\n self.total_cpu_time += float(time_value) * 60\n elif i*2 == 6:\n self.total_cpu_time += float(time_value) * 60 * 24\n else:\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write('''It was not expected that DALTON would print anything larger than days in the total CPU time\nThis will not be accounted for when printing the CPU time. The result will therefore not be correct\nPlease contact a maintainer of the script ot have this updated\\n''')\n for i, time_value in enumerate(pr_time[-2::-2]):\n if i*2 == 0:\n self.wall_cpu_time += float(time_value) / 60\n elif i*2 == 2:\n self.wall_cpu_time += float(time_value)\n elif i*2 == 4:\n self.wall_cpu_time += float(time_value) * 60\n elif i*2 == 6:\n self.wall_cpu_time += float(time_value) * 60 * 24\n else:\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write('''It was not expected that DALTON would print anything larger than days in the total CPU time\nThis will not be accounted for when printing the CPU time. The result will therefore not be correct\nPlease contact a maintainer of the script ot have this updated\\n''')\n return\n self.wall_cpu_time = 'NaN'\n self.total_cpu_time = 'NaN'\n\n def _Energy(self) -> None:\n linenumber = Forward_search_last(self.filename, 'Total .* energy:', 'final energy', quiet=True)\n if isinstance(linenumber, int):\n self.tot_energy = float(self.lines[linenumber].split()[-1])\n return\n linenumber = Forward_search_last(self.filename, '@ Final .* energy:', 'final energy', quiet=True)\n if isinstance(linenumber, int):\n self.tot_energy = float(self.lines[linenumber].split()[-1])\n return\n linenumber = Forward_search_last(self.filename, '@ Energy at final geometry is', 'final energy', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.tot_energy = float(self.lines[linenumber].split()[-2])\n return\n self.tot_energy = 'NaN'\n\n def _ZPV(self) -> None:\n linenumber = Forward_search_last(self.filename, 'Total Molecular Energy', 'zero-point energy', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.zpv = float(self.lines[linenumber+5].split()[1])\n return\n self.zpv = 'NaN'\n\n def _Dipole_moments(self) -> None:\n linenumber = Forward_search_last(self.filename, 'Dipole moment components', 'dipole moment', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.dipolex, self.dipoley, self.dipolez, self.total_dipole = float(self.lines[linenumber+5].split()[1]), float(self.lines[linenumber+6].split()[1]), float(self.lines[linenumber+7].split()[1]), float(self.lines[linenumber-3].split()[0])\n return\n self.dipolex = self.dipoley = self.dipolez = self.total_dipole = 'NaN'\n\n def _Polarizabilities(self) -> None:\n linenumber = Forward_search_last(self.filename, 'SECOND ORDER PROPERTIES', 'polarizabilities', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.polx, self.poly, self.polz = float(self.lines[linenumber+2].split()[-1]), float(self.lines[linenumber+5].split()[-1]), float(self.lines[linenumber+7].split()[-1])\n self.iso_polar = (self.polx + self.poly + self.polz)/3.\n return\n self.polx = self.poly = self.polz = self.iso_polar = 'NaN'\n\n def _Excitation_energies(self) -> None:\n self.exc_energies = []\n self.exc_type = None\n linenumber = Forward_search_last(self.filename, '@ Oscillator strengths are dimensionless.', 'excitation energies', quiet=True)\n if isinstance(linenumber, int):\n self.exc_type = '.EXCITA'\n for i in self.lines[linenumber+5: self.end]:\n if \"@ \"in i:\n self.exc_energies.append(float(i.split()[3])* self.constants.ev_to_au)\n else:\n break\n linenumbers = Forward_search_all(self.filename, '@ Excitation energy', 'excitation energies', quiet=self.quiet)\n if isinstance(linenumbers, list):\n self.exc_type = 'MCTDHF'\n for i in linenumbers:\n self.exc_energies.append(float(self.lines[i].split()[-2]))\n if len(self.exc_energies) == 0:\n self.exc_energies = ['NaN']\n\n def _Oscillator_strengths(self) -> None:\n self.osc_strengths = []\n if self.exc_type == '.EXCITA':\n linenumber = Forward_search_last(self.filename, '@ Oscillator strengths are dimensionless.', 'oscillator strengths', quiet=self.quiet)\n if isinstance(linenumber, int):\n for i in self.lines[linenumber+5: self.end]:\n if \"@ \" in i:\n self.osc_strengths.append(float(i.split()[-1]))\n else:\n break\n elif self.exc_type == 'MCTDHF':\n linenumbers = Forward_search_all(self.filename, '@ Excitation energy', 'oscillator strengths', quiet=self.quiet)\n if isinstance(linenumbers, list):\n for i in linenumbers:\n osc = 0\n for j in self.lines[i:i+15]:\n if '@ Oscillator strength' in j:\n osc += float(j.split()[5])**2\n self.osc_strengths.append(osc**0.5)\n if len(self.osc_strengths) == 0:\n self.osc_strengths = ['NaN']\n\n def _Frequencies(self) -> None:\n self.freq = []\n linenumber = Forward_search_last(self.filename, 'Vibrational Frequencies and IR Intensities', 'frequencies', quiet=self.quiet)\n if isinstance(linenumber, int):\n for i in self.lines[linenumber+7: self.end]:\n if len(i.split()) < 1:\n break\n self.freq.append(float(i.split()[3]))\n if len(self.freq) == 0:\n self.freq = ['NaN']\n\n def _RotationalConsts(self) -> None:\n self.rots = []\n linenumbers = Forward_search_last(self.filename, 'Rotational constants', 'rotational constants', quiet=self.quiet)\n for i in self.lines[linenumbers+7].split()[:-1]:\n self.rots.append(float(i))\n self.rots = np.array(self.rots) * 1E-3\n self.rots = self.rots[self.rots != 0.0]\n\n def _Mass(self) -> None:\n self.mass = 0.0\n linenumber = Forward_search_last(self.filename, 'Total mass:', 'molecular mass')\n if isinstance(linenumber, int):\n self.mass = float(self.lines[linenumber].split()[-2])\n\n #Symmetry checking not implemented by default in Dalton\n #def _SymmetryNumber(self):\n #\n # self.symnum = 0\n # linenumber = Forward_search_last(self.file, 'Symmetry Number', 'rotational symmetry number')\n # if isinstance(linenumber, int):\n # self.symnum = int(self.lines[linenumber].split()[-1])\n\n def _Multiplicity(self) -> None:\n self.multi = 0\n linenumber = Forward_search_last(self.filename, 'Spatial symmetry', 'multiplicity', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.multi = int(self.lines[linenumber].split()[2])\n\n def _PartitionFunctions(self) -> None:\n if CheckForOnlyNans(np.array(self.freq)):\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f\"No frequencies found in {self.filename}, skipping partition function calculation\\n\")\n self.qTotal = 'NaN'\n return\n self._RotationalConsts()\n self._Mass()\n self._Multiplicity()\n self.qT = self.constants.trans_const_fac * self.mass ** (1.5) * self.T ** (2.5)\n #Rotational does not give the same as Dalton, due to a correction from the assymmetric top being applied: 10.1063/1.1748490\n if len(self.rots) == 1:\n self.qR = self.constants.rot_lin_const * self.T / (self.rots[0])\n else:\n self.qR = self.constants.rot_poly_const * self.T ** (1.5) / (np.prod(self.rots) ** (0.5))\n realfreq = np.array([x for x in self.freq if x != 'NaN'])\n realfreq = realfreq[realfreq > 0.0]\n self.qV = np.prod(1 / (1 - np.exp( - self.constants.vib_const * realfreq / self.T )))\n self.qE = self.multi #Good approximation for most closed-shell molecules\n self.qTotal = self.qT*self.qR*self.qV*self.qE\n\n def _Entropy(self) -> None:\n if CheckForOnlyNans(np.array(self.freq)):\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f\"No frequencies found in {self.filename}, skipping partition function calculation\\n\")\n self.entropy = 'NaN'\n return\n self._RotationalConsts()\n self._Mass()\n self._Multiplicity()\n self.S_T = self.constants.gas_constant * np.log(self.constants.s_trans_const * self.mass ** 1.5 * self.T ** 2.5)\n if len(self.rots) == 1:\n self.S_R = self.constants.gas_constant * np.log(self.constants.rot_lin_const * self.T / (self.rots[0]))\n else:\n self.S_R = self.constants.gas_constant * (3/2 + np.log(self.constants.rot_poly_const * self.T ** (1.5) / ( np.prod(self.rots) ** (0.5))))\n realfreq = np.array([x for x in self.freq if x != 'NaN'])\n realfreq = realfreq[realfreq > 0.0]\n self.S_V = self.constants.gas_constant * np.sum(self.constants.vib_const * realfreq / self.T / (np.exp(self.constants.vib_const * realfreq / self.T ) - 1) - np.log(1-np.exp(-self.constants.vib_const * realfreq / self.T )))\n self.S_E = self.constants.gas_constant * np.log(self.multi) #Good approximation for most closed-shell molecules\n self.entropy = self.S_T+self.S_R+self.S_V+self.S_E\n\n def _Enthalpy(self) -> None:\n if CheckForOnlyNans(np.array(self.freq)):\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f\"No frequencies found in {self.filename}, skipping partition function calculation\\n\")\n self.enthalpy = 'NaN'\n return\n self._RotationalConsts()\n self.E_T = 3/2 * self.T * self.constants.gas_constant\n if len(self.rots) == 1:\n self.E_R = self.T * self.constants.gas_constant\n else:\n self.E_R = 3/2 * self.T * self.constants.gas_constant\n realfreq = np.array([x for x in self.freq if x != 'NaN'])\n realfreq = realfreq[realfreq > 0.0]\n self.E_V = self.constants.gas_constant * np.sum(self.constants.vib_const * realfreq * (1/2 + 1 / (np.exp(self.constants.vib_const * realfreq / self.T ) - 1)))\n self.E_e = 0 #Good approximation for most closed-shell molecules\n self.enthalpy = (self.E_T+self.E_R+self.E_V+self.constants.gas_constant * self.T ) / self.constants.au_to_kJmol + self.tot_energy\n\n def _Gibbs(self) -> None:\n if CheckForOnlyNans(np.array(self.freq)):\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(f\"No frequencies found in {self.filename}, skipping free energy energy calculation\\n\")\n self.gibbs = 'NaN'\n return\n self.gibbs = self.enthalpy - self.T*self.entropy / self.constants.au_to_kJmol\n\n def _Optimized_Geometry(self) -> None:\n start = Forward_search_last(self.filename, 'Final geometry (xyz format; angstrom)', 'final geometry', quiet=self.quiet)\n if start != \"NaN\":\n #Offset for going into actual coordinate list\n start += 5\n end = start + int(self.lines[start-2])\n #Which position in the line is the atom label / number at\n label_location = 0\n OptGeomFilename = self.filename[:-4] + \"_opt.xyz\"\n GenerateXYZ(self.lines, OptGeomFilename, start, end, label_location)\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(\"Final geometry has been saved to \" + OptGeomFilename + \"\\n\")\n else:\n start = Forward_search_last(self.filename, 'Cartesian Coordinates', 'initial geometry', quiet=self.quiet)\n if start != \"NaN\":\n start += 4\n end = start + int(int(self.lines[start-1].split(' ')[-1])/3)\n lines_to_add = []\n lines_to_add.append(str(end-(start))+ '\\n')\n lines_to_add.append('\\n')\n for line in self.lines[start:end]:\n words = line.split()\n lines_to_add.append(''.join([words[0].ljust(2),' ',f\"{float(words[-7]) * self.constants.bohr_to_ao:.7f}\".rjust(10),' ', f\"{float(words[-4]) * self.constants.bohr_to_ao:.7f}\".rjust(15), ' ',f\"{float(words[-1]) * self.constants.bohr_to_ao:.7f}\".rjust(15) ,'\\n']))\n OptGeomFilename = self.filename[:-4] + \"_opt.xyz\"\n WriteToFile(OptGeomFilename,lines_to_add)\n if not(self.quiet):\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write(\"Initial geometry has been saved to \" + OptGeomFilename + \"\\n\")\n\n\nclass LSDaltonExtract:\n def __init__(self, filename: str, NeededArguments: dict = None, Quiet: bool = False, Temperature: float = 298.15) -> None:\n self.filename = filename\n self.NeededArguments = NeededArguments\n self.quiet = Quiet\n self.T = Temperature\n self.constants = Constants()\n\n self.ReadFile()\n\n self.end = len(self.lines)\n\n def ReadFile(self) -> None:\n with open(self.filename, \"r\") as file:\n self.lines = file.readlines()\n\n def _CPUS(self) -> None:\n linenumber = Backward_search_last(self.filename, '>>> CPU Time used in LSDALTON is', self.end, 'CPU time', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.total_cpu_time = 0.\n self.wall_cpu_time = 0.\n total_time = self.lines[linenumber].split()[7:]\n pr_time = self.lines[linenumber+1].split()[7:]\n for i, time_value in enumerate(total_time[-2::-2]):\n if i*2 == 0:\n self.total_cpu_time += float(time_value) / 60\n elif i*2 == 2:\n self.total_cpu_time += float(time_value)\n elif i*2 == 4:\n self.total_cpu_time += float(time_value) * 60\n elif i*2 == 6:\n self.total_cpu_time += float(time_value) * 60 * 24\n else:\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write('''It was not expected that LSDALTON would print anything larger than days in the total CPU time\nThis will not be accounted for when printing the CPU time. The result will therefore not be correct\nPlease contact a maintainer of the script ot have this updated\\n''')\n for i, time_value in enumerate(pr_time[-2::-2]):\n if i*2 == 0:\n self.wall_cpu_time += float(time_value) / 60\n elif i*2 == 2:\n self.wall_cpu_time += float(time_value)\n elif i*2 == 4:\n self.wall_cpu_time += float(time_value) * 60\n elif i*2 == 6:\n self.wall_cpu_time += float(time_value) * 60 * 24\n else:\n with open(\"collect_data.log\", \"a\") as logfile:\n logfile.write('''It was not expected that LSDALTON would print anything larger than days in the total CPU time\nThis will not be accounted for when printing the CPU time. The result will therefore not be correct\nPlease contact a maintainer of the script ot have this updated\\n''')\n return\n self.total_cpu_time = 'NaN'\n self.wall_cpu_time = 'NaN'\n\n def _Energy(self) -> None:\n linenumber = Forward_search_last(self.filename, 'Total .* energy:', 'final energy', quiet=True)\n if isinstance(linenumber, int):\n self.tot_energy = float(self.lines[linenumber].split()[-1])\n return\n linenumber = Forward_search_last(self.filename, '@ Final .* energy:', 'final energy', quiet=True)\n if isinstance(linenumber, int):\n self.tot_energy = float(self.lines[linenumber].split()[-1])\n return\n linenumber = Forward_search_last(self.filename, '@ Energy at final geometry is', 'final energy', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.tot_energy = float(self.lines[linenumber].split()[-2])\n return\n self.tot_energy = 'NaN'\n\n def _Energy(self) -> None:\n linenumber = Forward_search_last(self.filename, 'ENERGY SUMMARY', 'final energy', quiet=True)\n if isinstance(linenumber, int):\n for i in self.lines[linenumber+3:self.end]:\n if 'E: ' in i:\n self.tot_energy = float(i.split()[-1])\n else:\n return\n linenumber = Forward_search_last(self.filename, 'Final .* energy:', 'final energy', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.tot_energy = float(self.lines[linenumber].split()[-1])\n return\n self.tot_energy = 'NaN'\n\n def _Dipole_moments(self) -> None:\n linenumber = Forward_search_last(self.filename, 'Permanent dipole moment', 'dipole moment', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.dipolex, self.dipoley, self.dipolez, self.total_dipole = float(self.lines[linenumber+9].split()[1]), float(self.lines[linenumber+10].split()[1]), float(self.lines[linenumber+11].split()[1]), float(self.lines[linenumber+3].split()[0])\n return\n self.dipolex = self.dipoley = self.dipolez = self.total_dipole = 'NaN'\n\n def _Polarizabilities(self) -> None:\n linenumber = Forward_search_last(self.filename, '* POLARIZABILITY TENSOR RESULTS (in a.u.) *', 'polarizability', quiet=self.quiet)\n if isinstance(linenumber, int):\n self.polx, self.poly, self.polz, self.iso_polar = float(self.lines[linenumber+10].split()[-3]), float(self.lines[linenumber+11].split()[-2]), float(self.lines[linenumber+12].split()[-1]), float(self.lines[linenumber+14].split()[-1])\n return\n self.polx = self.poly = self.polz = self.iso_polar = 'NaN'\n\n def _Excitation_energies(self) -> None:\n self.exc_energies = []\n linenumber = Forward_search_last(self.filename, '* ONE-PHOTON ABSORPTION RESULTS (in a.u.) *', 'excitation energies', quiet=self.quiet)\n if isinstance(linenumber, int):\n for i in range(linenumber+8,self.end):\n if len(self.lines[i].split()) < 1:\n break\n self.exc_energies.append(float(self.lines[i].split()[0]))\n if len(self.exc_energies) == 0:\n self.exc_energies = ['NaN']\n\n def _Oscillator_strengths(self) -> None:\n self.osc_strengths = []\n linenumber = Forward_search_last(self.filename, '* ONE-PHOTON ABSORPTION RESULTS (in a.u.) *', 'oscillator strengths', quiet=self.quiet)\n if isinstance(linenumber, int):\n for i in range(len(self.exc_energies)):\n self.osc_strengths.append(float(self.lines[linenumber+8+i].split()[-1]))\n if len(self.osc_strengths) == 0:\n self.osc_strengths = ['NaN']\n" ]
[ [ "numpy.log", "numpy.linalg.eigh", "numpy.prod", "numpy.array", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AdmitHub/cloud
[ "e9e116d462ea5603c3ccac22b22be33d9452ed1a" ]
[ "src/python/tensorflow_cloud/tuner/optimizer_client.py" ]
[ "# Lint as: python3\n# Copyright 2020 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A thin client for the Cloud AI Platform Optimizer Service.\"\"\"\n\nimport datetime\nimport http\nimport json\nimport time\nfrom typing import Any, Dict, List, Mapping, Optional, Text, Union\n\nfrom googleapiclient import discovery\nfrom googleapiclient import errors\nimport tensorflow as tf\n\nfrom tensorflow_cloud.tuner import constants\nfrom tensorflow_cloud.utils import google_api_client\n\n\nclass SuggestionInactiveError(Exception):\n \"\"\"Indicates that GetSuggestion was called on an inactive study.\"\"\"\n\n\nclass _OptimizerClient(object):\n \"\"\"A wrapper class that allows for easy interaction with a Study.\"\"\"\n\n def __init__(self,\n service_client: discovery.Resource,\n project_id: Text,\n region: Text,\n study_id: Text = None):\n \"\"\"Create an OptimizerClient object.\n\n Use this constructor when you know the study_id, and when the Study\n already exists. Otherwise, you'll probably want to use\n create_or_load_study() instead of constructing the\n OptimizerClient class directly.\n\n Args:\n service_client: An API client of CAIP Optimizer service.\n project_id: A GCP project id.\n region: A GCP region. e.g. 'us-central1'.\n study_id: An identifier of the study. The full study name will be\n `projects/{project_id}/locations/{region}/studies/{study_id}`.\n The full trial name will be `{study name}/trials/{trial_id}`.\n \"\"\"\n self.service_client = service_client\n self.project_id = project_id\n self.region = region\n if not study_id:\n raise ValueError(\n \"Use create_or_load_study() instead of constructing the\"\n \"OptimizerClient class directly\"\n )\n self.study_id = study_id\n\n def get_suggestions(\n self,\n client_id: Text,\n suggestion_count: int = constants.SUGGESTION_COUNT_PER_REQUEST\n ) -> Dict[Text, Any]:\n \"\"\"Gets a list of suggested Trials.\n\n Args:\n client_id: An ID that identifies the `Tuner` requesting a `Trial`.\n `Tuners` that should run the same trial (for instance, when\n running a multi-worker model) should have the same ID. If\n multiple suggestTrialsRequests have the same tuner_id, the\n service will return the identical suggested trial if the trial\n is PENDING, and provide a new trial if the last suggest trial\n was completed.\n suggestion_count: The number of suggestions to request.\n\n Returns:\n A list of Trials, This may be an empty list in case that a finite\n search space has been exhausted, if max_num_trials = 1000 has been\n reached, or if there are no longer any trials that match a supplied\n Context.\n\n Raises:\n SuggestionInactiveError: Indicates that a suggestion was requested\n from an inactive study. Note that this is NOT raised when a\n finite Study runs out of suggestions. In such a case, an empty\n list is returned.\n \"\"\"\n # Requests a trial.\n try:\n resp = (\n self.service_client.projects()\n .locations()\n .studies()\n .trials()\n .suggest(\n parent=self._make_study_name(),\n body={\n \"client_id\": client_id,\n \"suggestion_count\": suggestion_count,\n },\n )\n .execute()\n )\n except errors.HttpError as e:\n if e.resp.status == 429:\n # Status 429 'RESOURCE_EXAUSTED' is raised when trials more than\n # the maximum limit (1000) of the Optimizer service for a study\n # are requested, or the number of finite search space.\n # For distributed tuning, a tuner worker may request the 1001th\n # trial, while the other tuner worker has not completed training\n # the 1000th trial, and triggers this error.\n tf.get_logger().info(\"Reached max number of trials.\")\n return {}\n else:\n tf.get_logger().info(\"SuggestTrial failed.\")\n raise e\n\n # Polls the suggestion of long-running operations.\n tf.get_logger().info(\"CreateTrial: polls the suggestions.\")\n operation = self._obtain_long_running_operation(resp)\n\n suggestions = operation[\"response\"]\n\n if \"trials\" not in suggestions:\n if operation[\"response\"][\"studyState\"] == \"INACTIVE\":\n raise SuggestionInactiveError(\n \"The study is stopped due to an internal error.\"\n )\n return suggestions\n\n def report_intermediate_objective_value(\n self,\n step: int,\n elapsed_secs: float,\n metric_list: List[Mapping[Text, Union[int, float]]],\n trial_id: Text,\n ) -> None:\n \"\"\"Calls AddMeasurementToTrial with the provided objective_value.\n\n Args:\n step: The number of steps the model has trained for.\n elapsed_secs: The number of seconds since Trial execution began.\n metric_list: A list of dictionary from metric names (strings) to\n values (doubles) for additional metrics to record.\n trial_id: trial_id.\n \"\"\"\n measurement = {\n \"stepCount\": step,\n \"elapsedTime\": {\"seconds\": int(elapsed_secs)},\n \"metrics\": metric_list,\n }\n try:\n self.service_client.projects().locations().studies().trials(\n ).addMeasurement(\n name=self._make_trial_name(trial_id),\n body={\"measurement\": measurement}).execute()\n except errors.HttpError as e:\n tf.get_logger().info(\"AddMeasurement failed.\")\n raise e\n\n def should_trial_stop(self, trial_id: Text) -> bool:\n \"\"\"Returns whether trial should stop early.\n\n Args:\n trial_id: trial_id.\n\n Returns:\n Whether it is recommended to stop the trial early.\n \"\"\"\n trial_name = self._make_trial_name(trial_id)\n try:\n resp = (\n self.service_client.projects()\n .locations()\n .studies()\n .trials()\n .checkEarlyStoppingState(name=trial_name)\n .execute()\n )\n except errors.HttpError as e:\n tf.get_logger().info(\"CheckEarlyStoppingState failed.\")\n raise e\n # Polls the stop decision of long-running operations.\n operation = self._obtain_long_running_operation(resp)\n\n tf.get_logger().info(\"CheckEarlyStoppingStateResponse\")\n if operation[\"response\"].get(\"shouldStop\"):\n # Stops a trial.\n try:\n tf.get_logger().info(\"Stop the Trial.\")\n self.service_client.projects().locations().studies().trials(\n ).stop(name=trial_name).execute()\n except errors.HttpError as e:\n tf.get_logger().info(\"StopTrial failed.\")\n raise e\n return True\n return False\n\n def complete_trial(self,\n trial_id: Text,\n trial_infeasible: bool,\n infeasibility_reason: Text = None):\n \"\"\"Marks the trial as COMPLETED and sets the final measurement.\n\n Args:\n trial_id: trial_id.\n trial_infeasible: If True, the parameter setting is not feasible.\n infeasibility_reason: The reason the Trial was infeasible. Should\n only be non-empty if trial_infeasible==True.\n\n Returns:\n The Completed Optimizer trials.\n \"\"\"\n try:\n optimizer_trial = (\n self.service_client.projects()\n .locations()\n .studies()\n .trials()\n .complete(\n name=self._make_trial_name(trial_id),\n body={\n \"trial_infeasible\": trial_infeasible,\n \"infeasible_reason\": infeasibility_reason,\n },\n )\n .execute()\n )\n except errors.HttpError as e:\n tf.get_logger().info(\"CompleteTrial failed.\")\n raise e\n return optimizer_trial\n\n def get_trial(self, trial_id: Text) -> Dict[Text, Text]:\n \"\"\"Return the Optimizer trial for the given trial_id.\"\"\"\n try:\n trial = (\n self.service_client.projects()\n .locations()\n .studies()\n .trials()\n .get(name=self._make_trial_name(trial_id))\n .execute()\n )\n except errors.HttpError:\n tf.get_logger().info(\"GetTrial failed.\")\n raise\n return trial\n\n def list_trials(self) -> List[Text]:\n \"\"\"List trials.\"\"\"\n study_name = self._make_study_name()\n try:\n resp = (\n self.service_client.projects()\n .locations()\n .studies()\n .trials()\n .list(parent=study_name)\n .execute()\n )\n except errors.HttpError as e:\n tf.get_logger().info(\"ListTrials failed.\")\n raise e\n return resp.get(\"trials\", [])\n\n def list_studies(self) -> List[Text]:\n \"\"\"List all studies under the current project and region.\n\n Returns:\n The list of studies.\n \"\"\"\n parent_name = self._make_parent_name()\n try:\n resp = self.service_client.projects().locations().studies().list(\n parent=parent_name).execute()\n except errors.HttpError:\n tf.get_logger().info(\"ListStudies failed.\")\n raise\n return resp.get(\"studies\", [])\n\n def delete_study(self, study_name: Text = None) -> None:\n \"\"\"Deletes the study.\n\n Args:\n study_name: Name of the study.\n\n Raises:\n ValueError: Indicates that the study_name does not exist.\n HttpError: Indicates a HTTP error from calling the discovery API.\n \"\"\"\n if study_name is None:\n study_name = self._make_study_name()\n try:\n self.service_client.projects().locations().studies().delete(\n name=study_name).execute()\n except errors.HttpError as e:\n if e.resp.status == http.HTTPStatus.NOT_FOUND.value:\n raise ValueError(\n \"DeleteStudy failed. Study not found: {}.\"\n .format(study_name))\n tf.get_logger().info(\"DeleteStudy failed.\")\n raise\n tf.get_logger().info(\"Study deleted: {}.\".format(study_name))\n\n def _obtain_long_running_operation(self, resp):\n \"\"\"Obtain the long-running operation.\"\"\"\n op_id = resp[\"name\"].split(\"/\")[-1]\n operation_name = \"projects/{}/locations/{}/operations/{}\".format(\n self.project_id, self.region, op_id\n )\n try:\n get_op = (\n self.service_client.projects()\n .locations()\n .operations()\n .get(name=operation_name)\n )\n operation = get_op.execute()\n except errors.HttpError as e:\n tf.get_logger().info(\"GetLongRunningOperations failed.\")\n raise e\n\n polling_secs = 1\n num_attempts = 0\n while not operation.get(\"done\"):\n sleep_time = self._polling_delay(num_attempts, polling_secs)\n num_attempts += 1\n tf.get_logger().info(\n \"Waiting for operation; attempt {}; \"\n \"sleeping for {} seconds\".format(\n num_attempts, sleep_time\n )\n )\n time.sleep(sleep_time.total_seconds())\n if num_attempts > 30: # about 10 minutes\n raise RuntimeError(\"GetLongRunningOperations timeout.\")\n operation = get_op.execute()\n return operation\n\n def _polling_delay(self, num_attempts, time_scale):\n \"\"\"Computes a delay to the next attempt to poll the Optimizer service.\n\n This does bounded exponential backoff, starting with $time_scale.\n If $time_scale == 0, it starts with a small time interval, less than\n 1 second.\n\n Args:\n num_attempts: The number of times have we polled and found that the\n desired result was not yet available.\n time_scale: The shortest polling interval, in seconds, or zero.\n Zero is treated as a small interval, less than 1 second.\n\n Returns:\n A recommended delay interval, in seconds.\n \"\"\"\n small_interval = 0.3 # Seconds\n interval = max(\n time_scale, small_interval) * 1.41 ** min(num_attempts, 9)\n return datetime.timedelta(seconds=interval)\n\n def _make_study_name(self):\n return \"projects/{}/locations/{}/studies/{}\".format(\n self.project_id, self.region, self.study_id\n )\n\n def _make_trial_name(self, trial_id):\n return \"projects/{}/locations/{}/studies/{}/trials/{}\".format(\n self.project_id, self.region, self.study_id, trial_id\n )\n\n def _make_parent_name(self):\n return \"projects/{}/locations/{}\".format(self.project_id, self.region)\n\n\ndef create_or_load_study(\n project_id: Text,\n region: Text,\n study_id: Text,\n study_config: Optional[Dict[Text, Any]] = None,\n) -> _OptimizerClient:\n \"\"\"Factory method for creating or loading a CAIP Optimizer client.\n\n Given an Optimizer study_config, this will either create or open the\n specified study. It will create it if it doesn't already exist, and open\n it if someone has already created it.\n\n Note that once a study is created, you CANNOT modify it with this function.\n\n This function is designed for use in a distributed system, where many jobs\n call create_or_load_study() nearly simultaneously with the same\n `study_config`. In that situation, all clients will end up pointing nicely\n to the same study.\n\n Args:\n project_id: A GCP project id.\n region: A GCP region. e.g. 'us-central1'.\n study_id: An identifier of the study. If not supplied, system-determined\n unique ID is given. The full study name will be\n projects/{project_id}/locations/{region}/studies/{study_id}.\n And the full trial name will be {study name}/trials/{trial_id}.\n study_config: Study configuration for CAIP Optimizer service. If not\n supplied, it will be assumed that the study with the given study_id\n already exists, and will try to retrieve that study.\n\n Returns:\n An _OptimizerClient object with the specified study created or loaded.\n\n Raises:\n RuntimeError: Indicates that study_config is supplied but CreateStudy\n failed and GetStudy did not succeed after\n constants.MAX_NUM_TRIES_FOR_STUDIES tries.\n ValueError: Indicates that study_config is not supplied and the study\n with the given study_id does not exist.\n \"\"\"\n # Build the API client\n # Note that Optimizer service is exposed as a regional endpoint. As such,\n # an API client needs to be created separately from the default.\n with open(constants.OPTIMIZER_API_DOCUMENT_FILE) as f:\n service_client = discovery.build_from_document(\n service=json.load(f),\n requestBuilder=google_api_client.TFCloudHttpRequest,\n )\n\n # Creates or loads a study.\n study_parent = \"projects/{}/locations/{}\".format(project_id, region)\n\n if study_config is None:\n # If study config is unspecified, assume that the study already exists.\n _get_study(\n service_client=service_client,\n study_parent=study_parent,\n study_id=study_id,\n study_should_exist=True,\n )\n\n else:\n request = (\n service_client.projects()\n .locations()\n .studies()\n .create(\n body={\"study_config\": study_config},\n parent=study_parent,\n studyId=study_id,\n )\n )\n try:\n tf.get_logger().info(request.execute())\n except errors.HttpError as e:\n if e.resp.status != 409: # 409 implies study exists, handled below\n raise\n\n _get_study(\n service_client=service_client,\n study_parent=study_parent,\n study_id=study_id,\n )\n\n return _OptimizerClient(service_client, project_id, region, study_id)\n\n\ndef _get_study(\n service_client: discovery.Resource,\n study_parent: Text,\n study_id: Text,\n study_should_exist: bool = False,\n):\n \"\"\"Method for loading a study.\n\n Given the study_parent and the study_id, this method will load the specified\n study, up to constants.MAX_NUM_TRIES_FOR_STUDIES tries.\n\n Args:\n service_client: An API client of CAIP Optimizer service.\n study_parent: Prefix of the study name. The full study name will be\n {study_parent}/studies/{study_id}.\n study_id: An identifier of the study.\n study_should_exist: Indicates whether it should be assumed that the\n study with the given study_id exists.\n \"\"\"\n study_name = \"{}/studies/{}\".format(study_parent, study_id)\n tf.get_logger().info(\n \"Study already exists: {}.\\nLoad existing study...\".format(study_name))\n num_tries = 0\n while True:\n try:\n service_client.projects().locations().studies().get(\n name=study_name\n ).execute()\n except errors.HttpError as err:\n num_tries += 1\n if num_tries >= constants.MAX_NUM_TRIES_FOR_STUDIES:\n if (\n study_should_exist\n and err.resp.status == http.HTTPStatus.NOT_FOUND.value\n ):\n raise ValueError(\n \"GetStudy failed. Study not found: {}.\".format(study_id)\n )\n else:\n raise RuntimeError(\n \"GetStudy failed. Max retries reached: {0!s}\".format(\n err\n )\n )\n time.sleep(1) # wait 1 second before trying to get the study again\n else:\n break\n" ]
[ [ "tensorflow.get_logger" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
lrgr/imuse-server
[ "b80e1626ad645f63e66efead8f00bbc5af50ac5a" ]
[ "server/scale_exposures.py" ]
[ "import pandas as pd\nimport numpy as np\n\nfrom web_constants import *\nfrom signatures import Signatures, get_signatures_by_mut_type\nfrom project_data import ProjectData, get_selected_project_data\n\nfrom compute_exposures import compute_exposures\n\ndef scale_exposures(chosen_sigs, projects, mut_type, single_sample_id=None, exp_sum=False, exp_normalize=False, tricounts_method=None):\n result = [0, 0]\n\n exps_df = compute_exposures(chosen_sigs, projects, mut_type, single_sample_id=single_sample_id, normalize=exp_normalize, tricounts_method=tricounts_method)\n \n if exp_sum:\n exps_df = exps_df.sum(axis=1)\n exps_df_max = exps_df.max()\n else:\n exps_df_max = exps_df.max().max()\n \n exps_df_max = exps_df_max if pd.notnull(exps_df_max) else 0.0\n\n result = [0, exps_df_max]\n return result" ]
[ [ "pandas.notnull" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Apucs/Name-Entity-Recognition
[ "6eadc81871ad615726c82f7a4506baca5b7facee" ]
[ "src/inference.py" ]
[ "import torch\r\nfrom spacy.lang.en import English\r\nfrom build_dataloader import corpus\r\nfrom data import config\r\n\r\n\r\n\r\ndef infer(checkpoint_path, sentence, true_tags=None):\r\n\r\n model = torch.jit.load(checkpoint_path)\r\n model.eval()\r\n # tokenize sentence\r\n nlp = English()\r\n tokens = [token.text.lower() for token in nlp(sentence)]\r\n print(\"\\n\",tokens)\r\n # transform to indices based on corpus vocab\r\n numericalized_tokens = [corpus.word_field.vocab.stoi[t] for t in tokens]\r\n # find unknown words\r\n unk_idx = corpus.word_field.vocab.stoi[corpus.word_field.unk_token]\r\n unks = [t for t, n in zip(tokens, numericalized_tokens) if n == unk_idx]\r\n \r\n print(\"Tokens size:\", len(tokens))\r\n token_tensor = torch.LongTensor(numericalized_tokens)\r\n print(\"Tokens size long Tensor:\", token_tensor.shape)\r\n token_tensor = token_tensor.unsqueeze(-1)\r\n print(\"Tokens size updated:\", token_tensor.shape)\r\n predictions = model(token_tensor)\r\n print(\"Size of the predictions:\", predictions.size())\r\n top_predictions = predictions.argmax(-1)\r\n print(\"Size of the top predictions:\", top_predictions.size())\r\n predicted_tags = [corpus.tag_field.vocab.itos[t.item()] for t in top_predictions]\r\n \r\n modified_tags = predicted_tags\r\n\r\n #print(corpus.tag_field.vocab.itos) \r\n ###['<pad>', 'O', 'B-LOC', 'B-PER', 'B-ORG', 'I-PER', 'I-ORG', 'B-MISC', 'I-LOC', 'I-MISC']\r\n\r\n for i, tag in enumerate(modified_tags):\r\n if tag == \"I-PER\" or tag == \"B-PER\":\r\n modified_tags[i] = \"PERSON\"\r\n\r\n elif tag == \"I-ORG\" or tag == \"B-ORG\":\r\n modified_tags[i] = \"ORGANIZATION\"\r\n\r\n elif tag == \"I-LOC\" or tag == \"B-LOC\":\r\n modified_tags[i] = \"LOCATION\"\r\n\r\n else:\r\n modified_tags[i] = \"O\"\r\n\r\n\r\n print(\"\\n\")\r\n print(\"word\".ljust(20), \"entity\")\r\n print(\"-\".ljust(30,\"-\"))\r\n\r\n\r\n\r\n for word, tag in zip(tokens, modified_tags):\r\n print(word.ljust(20), tag)\r\n\r\n\r\n\r\n return tokens, predicted_tags, modified_tags, unks \r\n\r\n\r\ndef main():\r\n checkpoint_path = config.CHECKPOINT3\r\n\r\n sen = \"Mark Elliot Zuckerberg is an American internet entrepreneur. He is known for co-founding the social media website Facebook and its parent company Meta, located in Menlo Park, California\"\r\n\r\n words, infer_tags, mod_infer_tags, unknown_tokens = infer(checkpoint_path, sen, true_tags=None)\r\n\r\n print(\"Unknow tokens:\", unknown_tokens)\r\n\r\nif __name__=='__main__':\r\n main()\r\n" ]
[ [ "torch.LongTensor", "torch.jit.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
leifdenby/convorg
[ "1a1279c3438fa5283578c30b15bb71686a83f846" ]
[ "convorg/cloudstatistics.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Statistical functions for binary cloud masks. \"\"\"\nimport numpy as np\nimport scipy as sc\n\nfrom skimage import measure\nfrom scipy.spatial.distance import pdist\n\n\n__all__ = [\n 'get_cloudproperties',\n 'neighbor_distance',\n 'iorg',\n 'scai',\n]\n\n\ndef get_cloudproperties(cloudmask, connectivity=1):\n \"\"\"Calculate basic cloud properties from binary cloudmask.\n\n Note:\n All parameters are calculated in pixels!!\n\n See also:\n :func:`skimage.measure.label`:\n Used to find different clouds. \n :func:`skimage.measure.regionprops`:\n Used to calculate cloud properties.\n\n Parameters:\n cloudmask (ndarray): 2d binary cloud mask.\n connectivity (int): Maximum number of orthogonal hops to consider\n a pixel/voxel as a neighbor (see :func:`skimage.measure.label`).\n\n Returns:\n list:\n List of :class:`RegionProperties`\n (see :func:`skimage.measure.regionprops`)\n \"\"\"\n cloudmask[np.isnan(cloudmask)] = 0\n\n labels = measure.label(cloudmask, connectivity=connectivity)\n\n return measure.regionprops(labels)\n\n\ndef neighbor_distance(cloudproperties):\n \"\"\"Calculate nearest neighbor distance for each cloud.\n\n Note: \n Distance is given in pixels.\n\n See also: \n :class:`scipy.spatial.cKDTree`:\n Used to calculate nearest neighbor distances. \n\n Parameters: \n cloudproperties (list[:class:`RegionProperties`]):\n List of :class:`RegionProperties`\n (see :func:`skimage.measure.regionprops` or\n :func:`get_cloudproperties`).\n\n Returns: \n ndarray: Nearest neighbor distances in pixels.\n \"\"\"\n centroids = [prop.centroid for prop in cloudproperties]\n indices = np.arange(len(centroids))\n neighbor_distance = np.zeros(len(centroids))\n centroids_array = np.asarray(centroids)\n\n for n, point in enumerate(centroids):\n # use all center of mass coordinates, but the one from the point\n mytree = sc.spatial.cKDTree(centroids_array[indices != n])\n dist, indexes = mytree.query(point)\n neighbor_distance[n] = dist\n\n return neighbor_distance\n\n\ndef _iorg(neighbor_distance, cloudmask):\n \"\"\"Calculate the cloud cluster index 'I_org'.\n\n See also: \n :func:`scipy.integrate.trapz`:\n Used to calculate the integral along the given axis using\n the composite trapezoidal rule.\n\n Parameters: \n neighbor_distance (list or ndarray): Nearest neighbor distances. \n Output of :func:`neighbor_distance`. \n cloudmask (ndarray): 2d binary cloud mask.\n\n Returns:\n float: cloud cluster index I_org.\n\n References: \n Tompkins, A. M., and A. G. Semie (2017), Organization of tropical \n convection in low vertical wind shears: Role of updraft entrainment, \n J. Adv. Model. Earth Syst., 9, 1046–1068, doi: 10.1002/2016MS000802.\n \n \"\"\"\n nn_sorted = np.sort(neighbor_distance)\n \n nncdf = np.array(range(len(neighbor_distance))) / len(neighbor_distance)\n \n # theoretical nearest neighbor cumulative frequency\n # distribution (nncdf) of a random point process (Poisson)\n lamb = (len(neighbor_distance) /\n (cloudmask.shape[0] * cloudmask.shape[1]))\n nncdf_poisson = 1 - np.exp(-lamb * np.pi * nn_sorted**2)\n\n return sc.integrate.trapz(y=nncdf, x=nncdf_poisson)\n\ndef iorg(cloudmask):\n cloud_props = get_cloudproperties(cloudmask)\n dists = neighbor_distance(cloud_props)\n return _iorg(neighbor_distance=dists, cloudmask=cloudmask)\n\n\ndef _scai(cloudproperties, cloudmask, connectivity=1):\n \"\"\"Calculate the 'Simple Convective Aggregation Index (SCAI)'. \n\n The SCAI is defined as the ratio of convective disaggregation\n to a potential maximal disaggregation.\n\n See also: \n :func:`scipy.spatial.distance.pdist`:\n Used to calculate pairwise distances between cloud entities. \n :func:`scipy.stats.mstats.gmean`:\n Used to calculate the geometric mean of all clouds in pairs. \n\n Parameters:\n cloudproperties (list[:class:`RegionProperties`]):\n Output of function :func:`get_cloudproperties`. \n cloudmask (ndarray): 2d binary cloud mask.\n connectivity (int): Maximum number of orthogonal hops to consider\n a pixel/voxel as a neighbor (see :func:`skimage.measure.label`).\n mask (ndarray): 2d mask of non valid pixels.\n\n Returns:\n float: SCAI.\n\n References: \n Tobin, I., S. Bony, and R. Roca, 2012: Observational Evidence for \n Relationships between the Degree of Aggregation of Deep Convection, \n Water Vapor, Surface Fluxes, and Radiation. J. Climate, 25, 6885–6904,\n https://doi.org/10.1175/JCLI-D-11-00258.1\n\n \"\"\"\n centroids = [prop.centroid for prop in cloudproperties]\n\n # number of cloud clusters\n N = len(centroids)\n\n # potential maximum of N depending on cloud connectivity\n if connectivity == 1:\n chessboard = np.ones(cloudmask.shape).flatten()\n # assign every second element with \"0\"\n chessboard[np.arange(1, len(chessboard), 2)] = 0\n # reshape to original cloudmask.shape\n chessboard = np.reshape(chessboard, cloudmask.shape)\n # inlcude NaNmask\n chessboard[np.isnan(cloudmask)] = np.nan\n N_max = np.nansum(chessboard)\n elif connectivity == 2:\n chessboard[np.arange(1, cloudmask.shape[0], 2), :] = 0\n chessboard = np.reshape(chessboard, cloudmask.shape)\n chessboard[np.isnan(cloudmask)] = np.nan\n N_max = np.sum(chessboard)\n else:\n raise ValueError('Connectivity argument should be `1` or `2`.')\n\n # distance between points (center of mass of clouds) in pairs\n di = pdist(centroids, 'euclidean')\n # order-zero diameter\n D0 = sc.stats.mstats.gmean(di)\n\n # characteristic length of the domain (in pixels): diagonal of box\n L = np.sqrt(cloudmask.shape[0]**2 + cloudmask.shape[1]**2)\n\n return N / N_max * D0 / L * 1000\n\ndef scai(cloudmask):\n cloud_props = get_cloudproperties(cloudmask)\n return _scai(cloudproperties=cloud_props, cloudmask=cloudmask)\n" ]
[ [ "numpy.sum", "numpy.sqrt", "numpy.asarray", "scipy.integrate.trapz", "numpy.reshape", "numpy.isnan", "numpy.arange", "numpy.sort", "numpy.ones", "scipy.stats.mstats.gmean", "scipy.spatial.distance.pdist", "numpy.nansum", "numpy.exp", "scipy.spatial.cKDTree" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
matteonicolo/Forex
[ "efc2ba94417a3c0f9c034cd002242eb37c235cf8" ]
[ "bk_data.py" ]
[ "import sys\r\nsys.path.append(\"C:/Users/GiovanniRocco/Anaconda3/envs/forex\")\r\n\r\nimport time\r\nimport pandas as pd\r\nimport numpy as np\r\nimport json\r\nimport oandapyV20.endpoints.instruments as instruments\r\nfrom oandapyV20 import API\r\nfrom setup import account_id, key, api\r\nimport datetime\r\nfrom rfc3339 import rfc3339\r\n\r\ndef download(date, currency):\r\n date2 = date + datetime.timedelta(minutes = 1)\r\n rc39 = rfc3339(date, utc=True, use_system_timezone=True)\r\n print(rc39)\r\n rc2 = rfc3339(date2, utc=True, use_system_timezone=True)\r\n r = instruments.InstrumentsCandles(instrument = currency,\r\n params = {\"granularity\" : \"M1\",\r\n \"price\" : \"M\",\r\n \"from\" : rc39,\r\n \"to\" : rc2})\r\n rs = api.request(r)\r\n response = rs.get(\"candles\")\r\n print (response)\r\n return response\r\n\r\ndef bid(date, currency):\r\n date2 = date + datetime.timedelta(minutes = 1)\r\n rc39 = rfc3339(date, utc=True, use_system_timezone=True)\r\n rc2 = rfc3339(date2, utc=True, use_system_timezone=True)\r\n r = instruments.InstrumentsCandles(instrument=currency,\r\n params = {\"granularity\" : \"M1\",\r\n \"price\" : \"B\",\r\n \"from\" : rc39,\r\n \"to\" : rc2})\r\n rs = api.request(r)\r\n response = rs.get(\"candles\")\r\n return response\r\n\r\ndef save(dw, data, date, openo, high, low, close, volume, adj_close, y, bid, vd):\r\n sc = dw[0]\r\n voluma = sc.get(\"volume\")\r\n candle = sc.get(\"mid\")\r\n\r\n apertura = candle.get(\"o\")\r\n massimo = candle.get(\"h\")\r\n minimo = candle.get(\"l\")\r\n chiusura = candle.get(\"c\")\r\n aggiusta_close = chiusura\r\n quantità = voluma\r\n baid = bid[0]\r\n daib = baid.get(\"bid\")\r\n bd = daib.get(\"c\")\r\n\r\n data.append(date)\r\n openo.append(apertura)\r\n high.append(massimo)\r\n low.append(minimo)\r\n close.append(chiusura)\r\n volume.append(quantità)\r\n adj_close.append(aggiusta_close)\r\n vd.append(bd)\r\n\r\ndef csv(date, openo, high, low, close, volume, adj_close, vd, cur):\r\n lista = {'Date': date,\r\n 'Open' : openo,\r\n 'High' : high,\r\n 'Low' : low,\r\n 'Close': close,\r\n 'Volume': volume,\r\n 'Adj Close': adj_close,\r\n 'Bid': vd\r\n }\r\n filename = \"%s.csv\" % cur\r\n df = pd.DataFrame(lista, columns = ['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj Close', 'Bid'])\r\n df.to_csv(filename, index=False)\r\n\r\ndef null(dw, data, date, openo, high, low, close, volume, adj_close, y, bid, vd):\r\n data.append(date)\r\n openo.append(\"null\")\r\n high.append(\"null\")\r\n low.append(\"null\")\r\n close.append(\"null\")\r\n volume.append(\"null\")\r\n adj_close.append(\"null\")\r\n vd.append(\"null\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n start_date = datetime.datetime(2017, 6, 5, 0, 0, 0)\r\n end_date = datetime.datetime(2017, 6, 5, 1, 0, 0)\r\n cur = \"EUR_USD\"\r\n\r\n x = 0\r\n y = 0\r\n dw = 0\r\n oldD = None\r\n\r\n date = start_date\r\n print(\"Inizio download...\")\r\n\r\n data = []\r\n openo = []\r\n high = []\r\n low = []\r\n close = []\r\n volume = []\r\n adj_close = []\r\n vd = []\r\n while date <= end_date:\r\n dw = download(date, cur)\r\n print (dw)\r\n bd = bid(date, cur)\r\n print (y)\r\n try:\r\n if dw[0] != oldD:\r\n save(dw, data, date, openo, high, low, close, volume, adj_close, y, bd, vd)\r\n oldD = dw[0]\r\n except IndexError:\r\n print (date)\r\n null(dw, data, date, openo, high, low, close, volume, adj_close, y, bd, vd)\r\n date = date + datetime.timedelta(minutes = 1)\r\n y = y + 1\r\n dw = None\r\n time.sleep(0.5)\r\n\r\n x = x + 1\r\n date = start_date\r\n print(\"saved\")\r\n csv(data, openo, high, low, close, volume, adj_close, vd, cur)\r\n\r\n print(\"download completato.\")\r\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
AliJahan/examples
[ "ae625ca94ff5b82d7743d4555ddebeb728cc1430" ]
[ "imagenet/main.py" ]
[ "import argparse\nimport os\nimport random\nimport shutil\nimport time\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('-j', '--workers', default=0, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n################################################################################\n\"\"\" AliJahan: These flags are used to run the benchmark \"\"\"\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('-b', '--batch-size', default=32, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--algo', default=None, type=int,\n help='Conv. fwd algo to use.')\nparser.add_argument('--mem_trace', dest='mem_trace', action='store_true',\n help='Makes evaluation fucntion just feed one batch to the network')\n\n\nbest_acc1 = 0\n\n\ndef main():\n args = parser.parse_args()\n print(torch.__version__)\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n\n ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n # Simply call main_worker function\n main_worker(args.gpu, ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n global best_acc1\n args.gpu = gpu\n\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n # create model\n if args.pretrained:\n print(\"=> using pre-trained model '{}'\".format(args.arch))\n model = models.__dict__[args.arch](pretrained=True)\n else:\n print(\"=> creating model '{}'\".format(args.arch))\n model = models.__dict__[args.arch]()\n\n if not torch.cuda.is_available():\n print('using CPU, this will be slow')\n elif args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model = model.cuda(args.gpu)\n else:\n # DataParallel will divide and allocate batch_size to all available GPUs\n if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n else:\n model = torch.nn.DataParallel(model).cuda()\n\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().cuda(args.gpu)\n\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n if args.gpu is None:\n checkpoint = torch.load(args.resume)\n else:\n # Map model to be loaded to specified single gpu.\n loc = 'cuda:{}'.format(args.gpu)\n checkpoint = torch.load(args.resume, map_location=loc)\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n if args.gpu is not None:\n # best_acc1 may be from a checkpoint from a different GPU\n best_acc1 = best_acc1.to(args.gpu)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n # Data loading code\n valdir = os.path.join(args.data, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n #print(\"@:model.begin\")\n #print(model)\n #print(\"@:model.end\")\n cudnn.benchmark = True\n #cudnn.conv_fwd_algo = args.algo\n acc = validate(val_loader, model, criterion, args)\n\ndef validate(val_loader, model, criterion, args):\n #import os\n #print(os.getpid())\n #input(\"?\")\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(val_loader),\n [batch_time, losses, top1, top5],\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n #with torch.autograd.profiler.profile(use_cuda=True) as prof:\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n if torch.cuda.is_available():\n target = target.cuda(args.gpu, non_blocking=True)\n #<AliJahan/>\n if args.mem_trace:\n print(\"Start-------------------------\")\n #</AliJahan>\n\n # compute output\n output = model(images)\n #<AliJahan/>\n if args.mem_trace:\n print(\"End-------------------------\")\n if args.mem_trace:\n print(\"Mem_trace mode has been enabled, returning after feeding the first batch to the model\")\n break\n #</AliJahan>\n loss = criterion(output, target)\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n if i % args.print_freq == 0:\n progress.display(i)\n # TODO: this should also be done with the ProgressMeter\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n #print(prof) \n #prof.export_chrome_trace(\"./trace.json\")\n return top1.avg\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'model_best.pth.tar')\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.distributed.init_process_group", "torch.multiprocessing.spawn", "torch.cuda.set_device", "torch.load", "torch.manual_seed", "torch.nn.DataParallel", "torch.no_grad", "torch.cuda.is_available", "torch.cuda.device_count", "torch.nn.parallel.DistributedDataParallel", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yogurfrul/tensorpack
[ "af5864439e22bb63a55eb2349164087e89a2ae6e" ]
[ "tensorpack/dataflow/dataset/cifar.py" ]
[ "# -*- coding: utf-8 -*-\n# File: cifar.py\n\n# Yukun Chen <[email protected]>\n\nimport os\nimport pickle\nimport numpy as np\nimport tarfile\nimport six\nfrom six.moves import range\n\nfrom ...utils import logger\nfrom ...utils.fs import download, get_dataset_path\nfrom ..base import RNGDataFlow\n\n__all__ = ['Cifar10', 'Cifar100']\n\n\nDATA_URL_CIFAR_10 = ('http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', 170498071)\nDATA_URL_CIFAR_100 = ('http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz', 169001437)\n\n\ndef maybe_download_and_extract(dest_directory, cifar_classnum):\n \"\"\"Download and extract the tarball from Alex's website. Copied from tensorflow example \"\"\"\n assert cifar_classnum == 10 or cifar_classnum == 100\n if cifar_classnum == 10:\n cifar_foldername = 'cifar-10-batches-py'\n else:\n cifar_foldername = 'cifar-100-python'\n if os.path.isdir(os.path.join(dest_directory, cifar_foldername)):\n logger.info(\"Found cifar{} data in {}.\".format(cifar_classnum, dest_directory))\n return\n else:\n DATA_URL = DATA_URL_CIFAR_10 if cifar_classnum == 10 else DATA_URL_CIFAR_100\n filename = DATA_URL[0].split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n download(DATA_URL[0], dest_directory, expect_size=DATA_URL[1])\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n\n\ndef read_cifar(filenames, cifar_classnum):\n assert cifar_classnum == 10 or cifar_classnum == 100\n ret = []\n for fname in filenames:\n fo = open(fname, 'rb')\n if six.PY3:\n dic = pickle.load(fo, encoding='bytes')\n else:\n dic = pickle.load(fo)\n data = dic[b'data']\n if cifar_classnum == 10:\n label = dic[b'labels']\n IMG_NUM = 10000 # cifar10 data are split into blocks of 10000\n elif cifar_classnum == 100:\n label = dic[b'fine_labels']\n IMG_NUM = 50000 if 'train' in fname else 10000\n fo.close()\n for k in range(IMG_NUM):\n img = data[k].reshape(3, 32, 32)\n img = np.transpose(img, [1, 2, 0])\n ret.append([img, label[k]])\n return ret\n\n\ndef get_filenames(dir, cifar_classnum):\n assert cifar_classnum == 10 or cifar_classnum == 100\n if cifar_classnum == 10:\n train_files = [os.path.join(\n dir, 'cifar-10-batches-py', 'data_batch_%d' % i) for i in range(1, 6)]\n test_files = [os.path.join(\n dir, 'cifar-10-batches-py', 'test_batch')]\n meta_file = os.path.join(dir, 'cifar-10-batches-py', 'batches.meta')\n elif cifar_classnum == 100:\n train_files = [os.path.join(dir, 'cifar-100-python', 'train')]\n test_files = [os.path.join(dir, 'cifar-100-python', 'test')]\n meta_file = os.path.join(dir, 'cifar-100-python', 'meta')\n return train_files, test_files, meta_file\n\n\ndef _parse_meta(filename, cifar_classnum):\n with open(filename, 'rb') as f:\n obj = pickle.load(f)\n return obj['label_names' if cifar_classnum == 10 else 'fine_label_names']\n\n\nclass CifarBase(RNGDataFlow):\n def __init__(self, train_or_test, shuffle=True, dir=None, cifar_classnum=10):\n assert train_or_test in ['train', 'test']\n assert cifar_classnum == 10 or cifar_classnum == 100\n self.cifar_classnum = cifar_classnum\n if dir is None:\n dir = get_dataset_path('cifar{}_data'.format(cifar_classnum))\n maybe_download_and_extract(dir, self.cifar_classnum)\n train_files, test_files, meta_file = get_filenames(dir, cifar_classnum)\n if train_or_test == 'train':\n self.fs = train_files\n else:\n self.fs = test_files\n for f in self.fs:\n if not os.path.isfile(f):\n raise ValueError('Failed to find file: ' + f)\n self._label_names = _parse_meta(meta_file, cifar_classnum)\n self.train_or_test = train_or_test\n self.data = read_cifar(self.fs, cifar_classnum)\n self.dir = dir\n self.shuffle = shuffle\n\n def size(self):\n return 50000 if self.train_or_test == 'train' else 10000\n\n def get_data(self):\n idxs = np.arange(len(self.data))\n if self.shuffle:\n self.rng.shuffle(idxs)\n for k in idxs:\n # since cifar is quite small, just do it for safety\n yield self.data[k]\n\n def get_per_pixel_mean(self):\n \"\"\"\n Returns:\n a mean image of all (train and test) images of size 32x32x3\n \"\"\"\n train_files, test_files, _ = get_filenames(self.dir, self.cifar_classnum)\n all_imgs = [x[0] for x in read_cifar(train_files + test_files, self.cifar_classnum)]\n arr = np.array(all_imgs, dtype='float32')\n mean = np.mean(arr, axis=0)\n return mean\n\n def get_label_names(self):\n \"\"\"\n Returns:\n [str]: name of each class.\n \"\"\"\n return self._label_names\n\n def get_per_channel_mean(self):\n \"\"\"\n return three values as mean of each channel\n \"\"\"\n mean = self.get_per_pixel_mean()\n return np.mean(mean, axis=(0, 1))\n\n\nclass Cifar10(CifarBase):\n \"\"\"\n Produces [image, label] in Cifar10 dataset,\n image is 32x32x3 in the range [0,255].\n label is an int.\n \"\"\"\n def __init__(self, train_or_test, shuffle=True, dir=None):\n \"\"\"\n Args:\n train_or_test (str): either 'train' or 'test'.\n shuffle (bool): shuffle the dataset.\n \"\"\"\n super(Cifar10, self).__init__(train_or_test, shuffle, dir, 10)\n\n\nclass Cifar100(CifarBase):\n \"\"\" Similar to Cifar10\"\"\"\n def __init__(self, train_or_test, shuffle=True, dir=None):\n super(Cifar100, self).__init__(train_or_test, shuffle, dir, 100)\n\n\nif __name__ == '__main__':\n ds = Cifar10('train')\n mean = ds.get_per_channel_mean()\n print(mean)\n\n import cv2\n ds.reset_state()\n for i, dp in enumerate(ds.get_data()):\n if i == 100:\n break\n img = dp[0]\n cv2.imwrite(\"{:04d}.jpg\".format(i), img)\n" ]
[ [ "numpy.array", "numpy.mean", "numpy.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fcdl94/ICL
[ "9e79abf8b3d45334302c4716ebc1fa9b3119d986" ]
[ "methods/icarl_revgrad.py" ]
[ "from .icarl_da import ICarlDA\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport logging\n\n\nclass ICarlRG(ICarlDA):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.domain_criterion = nn.BCEWithLogitsLoss()\n self.lam = 0\n self.count = 0\n self.constant = 1\n\n def observe(self, epoch, iteration, train_loader, valid_loader, optimizer):\n self.network.train()\n self.network2.eval()\n\n train_loss = 0\n train_correct = 0\n train_total = 0\n self.count = 0\n # steps\n start_steps = epoch * len(train_loader)\n total_steps = self.epochs * len(train_loader)\n\n if iteration == 0 or not self.protos: # if I DON'T use protos (I don't use them in first iteration as well)\n self.lam = 0\n const = self.constant\n self.constant = 0\n self.network.set_target()\n for batch in train_loader:\n optimizer.zero_grad()\n\n loss, trt_tot, trt_crc, loss_cl = self._compute_loss(batch, iteration)\n\n loss.backward()\n optimizer.step()\n # update stats\n train_loss += loss_cl.item()\n train_total += trt_tot\n train_correct += trt_crc\n self.constant = const\n else: # if I USE protos\n batch_idx = 0\n for source_loader, target_loader in train_loader:\n\n p = float(batch_idx + start_steps) / total_steps\n self.lam = 2. / (1. + np.exp(-10 * p)) - 1\n\n optimizer.zero_grad()\n\n # train the source\n self.network.set_source()\n loss_bx_src, tr_tot, tr_crc, loss_cl = self._compute_loss(source_loader, iteration, target=False)\n train_total += tr_tot\n train_correct += tr_crc\n train_loss += loss_cl.item()\n\n # train the target\n self.network.set_target()\n loss_bx_tar, tr_tot, tr_crc, loss_cl = self._compute_loss(target_loader, iteration)\n train_total += tr_tot\n train_correct += tr_crc\n train_loss += loss_cl.item()\n\n loss_bx = loss_bx_src + loss_bx_tar\n loss_bx.backward()\n optimizer.step()\n\n batch_idx += 1\n\n # make validation\n self.network.eval()\n if iteration == 0:\n self.network.set_target()\n else:\n self.network.set_source()\n test_loss = 0\n test_correct = 0\n test_total = 0\n for inputs, targets_prep in valid_loader:\n targets = np.zeros((inputs.shape[0], self.n_classes), np.float32)\n targets[range(len(targets_prep)), targets_prep.type(torch.int32)] = 1.\n\n inputs = inputs.to(self.device)\n\n logits, feats = self.network.forward(inputs) # make the embedding\n outputs = self.network.predict(logits) # make the prediction with sigmoid, making g_y(xi)\n targets = torch.tensor(targets).to(self.device)\n targets_prep = torch.LongTensor(targets_prep).to(self.device)\n\n loss_bx = self.loss(outputs, targets) # without distillation? -> YES, validation only on new classes\n\n test_loss += loss_bx.item()\n _, predicted = outputs.max(1)\n test_total += targets.size(0)\n test_correct += predicted.eq(targets_prep).sum().item()\n\n # normalize and print stats\n train_acc = 100. * train_correct / train_total\n test_acc = 100. * test_correct / test_total\n\n test_loss /= len(valid_loader)\n train_loss /= len(train_loader)\n\n return train_loss, train_acc, test_loss, test_acc\n\n def _compute_loss(self, loader, iteration, target=True):\n inputs, targets_prep = loader\n\n if target:\n domain = torch.ones(inputs.shape[0], 1).to(self.device) # target is one\n else:\n domain = torch.zeros(inputs.shape[0], 1).to(self.device) # source is zero\n\n targets = np.zeros((inputs.shape[0], self.n_classes), np.float32)\n targets[range(len(targets_prep)), targets_prep.type(torch.int32)] = 1.\n\n inputs = inputs.to(self.device)\n\n logits, feats = self.network.forward(inputs) # make the embedding\n prediction = self.network.predict(logits) # make the prediction with sigmoid, making g_y(xi)\n domain_pred = self.network.discriminate_domain(feats, self.lam) # the predicted domain\n\n targets = torch.tensor(targets).to(self.device)\n targets_prep = torch.LongTensor(targets_prep).to(self.device)\n\n if iteration > 0 and self.distillation: # apply distillation\n logits_old, feat_old = self.network2.forward(inputs)\n prediction_old = self.network2.predict(logits_old)\n to = self.compute_num_classes(iteration - 1) # until the number of classes of last iteration\n targets[:, np.array(self.dataset.order[range(0, to)])] = \\\n torch.sigmoid(prediction_old[:, np.array(self.dataset.order[range(0, to)])])\n\n loss_dm = self.domain_criterion(domain_pred, domain)\n loss_bx = self.loss(prediction, targets) # joins classification and distillation losses\n _, predicted = prediction.max(1)\n train_total = targets.size(0)\n train_correct = predicted.eq(targets_prep).sum().item()\n\n domain_acc = torch.sigmoid(domain_pred.detach()).mean().cpu().item()\n if not target:\n domain_acc = 1 - domain_acc\n\n total_loss = loss_bx + self.constant * loss_dm\n if self.count % 400 == 0 or self.count % 400 == 1:\n logging.info(f\"{self.count:5d}: Lam {self.lam:.4f} --- Class Loss {loss_bx:.4f} \"\n f\"--- Domain Loss {loss_dm:4f} --- {'TarDom' if target else 'SrcDom'} Acc {domain_acc:.3f}\")\n self.count += 1\n\n return total_loss, train_total, train_correct, loss_bx\n" ]
[ [ "torch.LongTensor", "torch.ones", "torch.zeros", "torch.tensor", "torch.nn.BCEWithLogitsLoss", "numpy.exp", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tjhlp/tests
[ "7407df7dbfdf12a5f69ba7bc8bf8d14131534ac2" ]
[ "numpytest/matplotlib/matplotlib_tjh_02.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nx = np.linspace(-1, 1, 50)\nprint(x)\ny1 = 2 * x + 1\ny2 = x ** 2\nplt.figure(num=1)\n# image\nplt.plot(x, y1, label='linear')\nplt.plot(x, y2, color='red', label='cubic',linestyle='--')\n# limit\nplt.xlim(-1, 2)\nplt.ylim(-2, 3)\nticks = np.linspace(-1,2,5)\nplt.xticks(ticks)\nplt.yticks([-2, -1, 1, 2],\n [r'$really\\ bad$', '$bad$', '$well$', '$really\\ well$']\n )\nax = plt.gca()\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nplt.legend(loc='best')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.yticks", "matplotlib.pyplot.legend", "numpy.linspace", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Hhhhhhhhhhao/image-cartoonization
[ "073b51656b96b069496917d212119caad7bf4728" ]
[ "utils/wb_utils.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom skimage import segmentation, color\nfrom joblib import Parallel, delayed\n\n\ndef box_filter(x, r):\n ch = list(x.size())[1]\n \n weight = 1 / ((2*r+1) ** 2)\n\n box_kernel = weight * np.ones((ch, 1, 2*r+1, 2*r+1))\n box_kernel = np.array(box_kernel).astype(np.float32)\n box_kernel = torch.from_numpy(box_kernel).to(x.device)\n output = F.conv2d(x, box_kernel, bias=None, stride=1, padding=(2*r+1)//2, groups=ch)\n return output\n\n\ndef guided_filter(x, y, r, eps=1e-2):\n x_shape = list(x.size())\n\n N = box_filter(torch.ones((1, 1, x_shape[2], x_shape[3])), r).to(x.device)\n\n mean_x = box_filter(x, r) / (N + eps)\n mean_y = box_filter(y, r) / (N + eps)\n cov_xy = box_filter(x*y, r) / (N - mean_x * mean_y + eps)\n var_x = box_filter(x*x, r) / (N - mean_x * mean_y + eps)\n\n A = cov_xy / (var_x + eps)\n b = mean_y - A * mean_x\n\n mean_A = box_filter(A, r) / (N + eps)\n mean_b = box_filter(b, r) / (N + eps)\n\n output = mean_A * x + mean_b\n return output\n\n\ndef color_shift(image1, mode='uniform'):\n r1, g1, b1 = image1[:, 0, :, :], image1[:, 1, :, :], image1[:, 2, :, :]\n if mode == 'normal':\n r_weight = torch.normal(mean=0.299, std=0.1)\n g_weight = torch.normal(mean=0.587, std=0.1)\n b_weight = torch.normal(mean=0.114, std=0.1)\n elif mode == 'uniform':\n r_weight = (0.399-0.199) * torch.rand(1) + 0.199\n g_weight = (0.687-0.487) * torch.rand(1) + 0.487\n b_weight = (0.214-0.014) * torch.rand(1) + 0.014\n r_weight = r_weight.to(image1.device)\n g_weight = g_weight.to(image1.device)\n b_weight = b_weight.to(image1.device)\n output1 = (r_weight*r1 + g_weight*g1 + b_weight*b1) / (r_weight+g_weight+b_weight + 1e-12)\n return output1.unsqueeze(1).repeat(1, 3, 1, 1)\n\n\ndef superpixel(batch_image, seg_num=100):\n batch_image = (batch_image + 1) / 2\n batch_image = batch_image * 255\n batch_image = batch_image.astype(np.uint8)\n\n def process_slic(image):\n seg_label = segmentation.slic(np.array(image), n_segments=seg_num, sigma=1, compactness=10, convert2lab=True)\n image = color.label2rgb(seg_label, np.array(image), kind='avg')\n return image\n\n num_job = np.shape(batch_image)[0]\n batch_out = Parallel(n_jobs=num_job)(delayed(process_slic)\\\n (image) for image in batch_image)\n\n batch_out = np.asarray(batch_out)\n batch_out = batch_out / 255.\n batch_out = batch_out * 2 - 1\n batch_out = batch_out.astype(np.float32)\n return batch_out.transpose(0, 3, 1, 2)" ]
[ [ "torch.normal", "torch.ones", "numpy.asarray", "torch.nn.functional.conv2d", "torch.from_numpy", "numpy.ones", "numpy.shape", "torch.rand", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DCMLab/pitchplots
[ "ce29631380bc93e267d6bf62e342d377a9e75f18" ]
[ "static.py" ]
[ "\"\"\"\r\nFunctions for none moving charts\r\n\"\"\"\r\nimport math\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as patches\r\nimport matplotlib\r\n\r\nfrom pitchplots.reader import get_df_short\r\nfrom pitchplots.functions import get_acc, get_step, get_pc, get_dic_nei, put_flat_sharp, get_fifth_nb, get_fifth_note, is_tpc, is_pc\r\n\r\nclass StaticError(Exception):\r\n \"\"\"Exception thrown when the static module cannot plot.\"\"\"\r\n pass\r\n\r\nclass InvalidDataTypeTypeError(StaticError):\r\n \"\"\"Exception thrown when pitch_type is not pc or tpc\"\"\"\r\n pass\r\n\r\nclass InvalidSetMeasureTypeError(StaticError):\r\n \"\"\"Exception thrown when set_measure is not a list of 2 numbers with the first and last measures to take in count\"\"\"\r\n pass\r\n\r\nclass InvalidConvertTableTypeError(StaticError):\r\n \"\"\"Exception thrown when vocabulary does not have 12 elements or its elements are not tpc notes\"\"\"\r\n pass\r\n\r\ndef line(\r\n piece,\r\n pitch_type='tpc',\r\n measures=None,\r\n log=False,\r\n normalize=False,\r\n vocabulary={0:'C', 1:'Db', 2:'D', 3:'Eb', 4:'E', 5:'F', 6:'Gb', 7:'G', 8:'Ab', 9:'A', 10:'Bb', 11:'B'},\r\n pitch_class_display=False,\r\n duration=False,\r\n color='blue',\r\n figsize=[6, 4],\r\n xmin=None,\r\n xmax=None,\r\n start=0,\r\n show=False,\r\n **kwargs):\r\n \"\"\"return the figure of a linechart with the notes in the X axis and their value in the Y axis\r\n\r\n Keyword arguments:\r\n piece -- the absolute path to the .csv file containing the data or a DataFrame\r\n pitch_type -- the type of data that you want to be read (default 'tpc'), 'pc' could be use for twelve parts chart tpc form\r\n (tpc:[A, B#, Gbbb, ...], pc (pitch class):[0, 3, 7, ...])\r\n measures -- give a set of measures example [5, 18], will display the notes of the measures 5 to 18 included\r\n log -- if True the colors are distributed on a log scale, by default it's a lineare scale (default False)\r\n vocabulary -- the conversion dictionary from pitch class to tpc(F#, A, ...) format,\r\n pitch_class_display -- if True display the pitch class and no the tpc values and so the grid repeat itself.\r\n duration -- tell him if he has to class the notes by their total duration or their number of appearance\r\n figsize -- tell the size of the figure in inches [x, y]\r\n xmin, xmax -- the notes that will be displayed are in this range according to this values\r\n {0 : F, 1 : C, 2 : G, 3 : D, 4 : A, 5 : E, 6 : B} and +- 7 for a sharp and a flat\r\n display -- if True the figure is displayed, if False it is hidden so you can have only the returned figure\r\n **kwargs -- these arguments are redirected to the matplotlib.pyplot.pie function, see informations at\r\n https://matplotlib.org/api/_as_gen/matplotlib.pyplot.bar.html\r\n \"\"\"\r\n #get the df\r\n if pitch_class_display:\r\n df = get_df_short(piece, vocabulary=vocabulary, pitch_type='pc', measures=measures)\r\n else:\r\n df = get_df_short(piece, vocabulary=vocabulary, pitch_type=pitch_type, measures=measures)\r\n #create the figure and close it so it wont be display\r\n fig = plt.figure(figsize=figsize)\r\n if not show:\r\n plt.close(fig)\r\n ax = fig.add_subplot(111)\r\n \r\n if not pitch_class_display:\r\n df['fifth_number'] = df['tpc'].apply(get_fifth_nb)\r\n xmin = df['fifth_number'].min() if xmin == None else xmin+1\r\n xmax = df['fifth_number'].max() if xmax == None else xmax+1\r\n labels = [get_fifth_note(i) for i in range(xmin, xmax+1)]\r\n # Give the value to the notes, for their number of appearance\r\n if normalize:\r\n s = pd.Series(df['duration']/df['duration'].sum()) if duration else pd.Series(df['nb']/df['nb'].sum())\r\n else:\r\n s = pd.Series(df['duration']) if duration else pd.Series(df['nb'])\r\n s.index = df['pc'] if pitch_class_display else df['tpc']\r\n if pitch_class_display:\r\n #reindex with integers to be compatible with the 'pc' value\r\n pc_labels = np.roll([0, 7, 2, 9, 4, 11, 6, 1, 8, 3, 10, 5],\r\n -([0, 7, 2, 9, 4, 11, 6, 1, 8, 3, 10, 5].index(start)))\r\n s = s.reindex(pc_labels).fillna(0)\r\n #get the index in strings so it wont be reorder by the bar function\r\n s.index = np.roll(['0', '7', '2', '9', '4', '11', '6', '1', '8', '3', '10', '5'],\r\n -([0, 7, 2, 9, 4, 11, 6, 1, 8, 3, 10, 5].index(start)))\r\n else:\r\n s = s.reindex(labels).fillna(0)\r\n # Do the bar plot\r\n ax.bar(x=s.index, color=color, height = s.values, log=log, **kwargs)\r\n \r\n return fig\r\n \r\ndef circle(\r\n piece,\r\n pitch_type='tpc',\r\n measures=None, # need documentation\r\n log=False,\r\n vocabulary={0:'C', 1:'Db', 2:'D', 3:'Eb', 4:'E', 5:'F', 6:'Gb', 7:'G', 8:'Ab', 9:'A', 10:'Bb', 11:'B'},\r\n pitch_class_display=False,\r\n colorbar=True,\r\n duration=False,\r\n fifths=True,\r\n figsize=[7, 4],\r\n top=None,\r\n rotation=0,\r\n clockwise=True,\r\n cmap='Blues',\r\n nan_color=None,\r\n show=False,\r\n **kwargs):\r\n \"\"\"return the figure of a piechart with importance of the notes that are represented by the colour as a heatmap\r\n\r\n Keyword arguments:\r\n piece -- the absolute path to the .csv file containing the data or a DataFrame\r\n pitch_type -- the type of data that you want to be read (default 'tpc'), 'pc' could be use for twelve parts chart tpc form\r\n (tpc:[A, B#, Gbbb, ...], pc (pitch class):[0, 3, 7, ...])\r\n measures -- give a set of measures example [5, 18], will display the notes of the measures 5 to 18 included\r\n log -- if True the colors are distributed on a log scale, by default it's a lineare scale (default False)\r\n vocabulary -- the conversion dictionary from pitch class to tpc(F#, A, ...) format,\r\n pitch_class_display -- if True display the pitch class and no the tpc values and so the grid repeat itself.\r\n colorbar -- if true display the colorbar aside of the pie chart\r\n duration -- tell him if he has to class the notes by their total duration or their number of appearance\r\n fifths -- if True class the notes by fifths order, if not class by the chromatic order\r\n figsize -- tell the size of the figure in inches [x, y]\r\n top -- tell which note should be on the top of the piechart, different for tpc or pc\r\n rotation -- allows to rotate the piechart, int angle in degrees\r\n clockwise -- if True the piechart is displayed clockwise if not counter-clockwise\r\n cmap -- indicate the type of color to use for the heatmap, see matplotlib color documentation (default 'Blues')\r\n nan_color -- give the possibility to set a color for the note that do not appear in the piece (default 'nan')\r\n display -- if True the figure is displayed, if False it is hidden so you can have only the returned figure\r\n **kwargs -- these arguments are redirected to the matplotlib.pyplot.pie function, see informations at\r\n https://matplotlib.org/api/_as_gen/matplotlib.pyplot.pie.html\r\n \"\"\"\r\n #settings\r\n df = get_df_short(piece, vocabulary=vocabulary, pitch_type=pitch_type, measures=measures, duration=duration)\r\n\r\n #color map\r\n cmap = matplotlib.cm.get_cmap(cmap)\r\n color_note = []\r\n\r\n #dataFrame for the plot if tpc\r\n df_tpc_pie = pd.DataFrame(columns=['note', 'part', 'pc'])\r\n\r\n #put top in the right form\r\n if pd.isnull(top) == False:\r\n if is_tpc(top) and pitch_class_display:\r\n top = get_pc(top)\r\n if is_pc(top) and not pitch_class_display:\r\n top = vocabulary[int(top)]\r\n\r\n #remember position of data in Series\r\n s_pos = pd.Series()\r\n count = 0\r\n part = 0\r\n letter = 'nan'\r\n s_fifth = pd.Series()\r\n \r\n fig = plt.figure(figsize=figsize)\r\n if not show:\r\n plt.close(fig)\r\n ax = fig.add_subplot(111, aspect='equal')\r\n \r\n #Set the order in function of fifth\r\n if fifths:\r\n s_tpc_format = pd.Series((0, 7, 2, 9, 4, 11, 6, 1, 8, 3, 10, 5))\r\n else:\r\n s_tpc_format = pd.Series((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))\r\n\r\n #for plot if pitch_class_display\r\n s_twelve_ones = pd.Series((1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), index=s_tpc_format)\r\n\r\n #if it show the tpc values\r\n if pitch_class_display == False:\r\n #put the right values in 'number'\r\n if duration:\r\n df_data = df.copy()\r\n df_data.rename(columns={'duration': 'number'},inplace=True)\r\n else:\r\n df_data = df.copy()\r\n df_data.rename(columns={'nb': 'number'},inplace=True)\r\n\r\n #Normalize the values for the colors\r\n max_value = df_data['number'].max()\r\n min_value = df_data['number'].min()\r\n if log:\r\n norm = matplotlib.colors.LogNorm(vmin=min_value, vmax=max_value)\r\n else:\r\n norm = matplotlib.colors.Normalize(0, vmax=max_value)\r\n \r\n #for chromatic order\r\n if fifths == False:\r\n\r\n #for each pitch class values\r\n for i in range(12):\r\n\r\n #if a pitch class is represented in the data\r\n if df_data['pc'].isin([s_tpc_format[i]]).any():\r\n count = 0\r\n s_pos.drop(s_pos.index, inplace=True)\r\n \r\n #count how much time there is tpc values for a same pitch class\r\n for j in range(df_data['pc'].isin([s_tpc_format[i]]).shape[0]):\r\n if df_data['pc'].isin([s_tpc_format[i]])[j]:\r\n s_pos.at[count] = j\r\n count = count + 1\r\n\r\n #devide the pie part and set color\r\n for j in range(count):\r\n part = 1/count\r\n letter = df_data.at[s_pos.at[j], 'step']\r\n\r\n #write the notes\r\n letter = put_flat_sharp(letter, df_data.at[s_pos.at[j], 'acc'])\r\n\r\n #register the informations\r\n df_tpc_pie = df_tpc_pie.append({'note':letter, 'part':part},\r\n ignore_index=True)\r\n color_note.append(cmap(norm(df_data.at[s_pos.at[j], 'number'])))\r\n\r\n #if the pitch class do no appear in the piece\r\n else:\r\n letter = vocabulary[s_tpc_format[i]]\r\n\r\n df_tpc_pie = df_tpc_pie.append({'note':letter, 'part':1}, ignore_index=True)\r\n if pd.isnull(nan_color):\r\n color_note.append(cmap(0))\r\n else:\r\n color_note.append(nan_color)\r\n else:\r\n #get the fifth numbers of the notes\r\n for i in range(df_data.shape[0]):\r\n s_fifth.at[i] = get_fifth_nb(df_data.at[i, 'tpc'])\r\n df_data['fifth'] = s_fifth\r\n\r\n #create df_tpc_pie and get the colours\r\n for i in range(df_data['fifth'].max()-df_data['fifth'].min()+1):\r\n #the part are equal for the moment\r\n df_tpc_pie.at[i, 'part'] = 1\r\n df_tpc_pie.at[i, 'note'] = get_fifth_note(i + df_data['fifth'].min())\r\n df_tpc_pie.at[i, 'pc'] = get_pc(df_tpc_pie.at[i, 'note'])\r\n \r\n if df_data['fifth'].isin([i + df_data['fifth'].min()]).any():\r\n #get the colour for the note who has the good fifth number\r\n color_note.append(cmap(norm(df_data['number'][df_data['fifth']==(i + df_data['fifth'].min())].iat[0])))\r\n elif df_data['fifth'].isin([i + df_data['fifth'].min()]).any() == False and pd.isnull(nan_color) == False:\r\n color_note.append(nan_color)\r\n else:\r\n color_note.append(cmap(0))\r\n\r\n #if clockwise invert the order of the data to be displayed clockwise, inverse also the index\r\n if clockwise:\r\n df_tpc_pie = df_tpc_pie.iloc[::-1]\r\n color_note = list(reversed(color_note))\r\n\r\n #calculate the angle for the topPitchClass to be at the top\r\n if pd.isnull(top) == False and fifths == False and df_tpc_pie['note'].isin([top]).any() == True:\r\n if clockwise:\r\n rotation = rotation + 90 + df_tpc_pie.at[0, 'part'] * 15\r\n else:\r\n rotation = rotation + 90 - df_tpc_pie.at[0, 'part'] * 15\r\n for i in range(df_tpc_pie.shape[0]):\r\n if top == df_tpc_pie.at[i, 'note']:\r\n if df_tpc_pie.at[i, 'part'] != 1:\r\n if clockwise:\r\n rotation = rotation - 15*df_tpc_pie.at[i, 'part']\r\n else:\r\n rotation = rotation + 15*df_tpc_pie.at[i, 'part']\r\n break\r\n else:\r\n if clockwise:\r\n rotation = rotation + 30*df_tpc_pie.at[i, 'part']\r\n else:\r\n rotation = rotation - 30*df_tpc_pie.at[i, 'part']\r\n\r\n #put the top note at the top\r\n if pd.isnull(top) == False and fifths == True and df_tpc_pie['note'].isin([top]).any() == True:\r\n if clockwise:\r\n rotation = rotation + 90 + 180/df_tpc_pie.shape[0]\r\n else:\r\n rotation = rotation + 90 - 180/df_tpc_pie.shape[0]\r\n for i in range (df_tpc_pie.shape[0]):\r\n if df_tpc_pie.at[i, 'note'] == top:\r\n break\r\n else:\r\n #the sens of reading depend on the orientation\r\n if clockwise:\r\n rotation = rotation + 360/df_tpc_pie.shape[0]\r\n else:\r\n rotation = rotation - 360/df_tpc_pie.shape[0]\r\n \r\n\r\n #put nice sharps and flats\r\n for i in range(df_tpc_pie.shape[0]):\r\n df_tpc_pie.at[i, 'note'] = df_tpc_pie.at[i, 'note'].replace('b', r'$\\flat$')\\\r\n .replace('#', r'$\\sharp$')\r\n \r\n #plot the piechart with index 'tpc'\r\n df_tpc_pie.index = df_tpc_pie['note']\r\n \r\n #do the pie chart\r\n ax.pie(labels=df_tpc_pie.index, x=df_tpc_pie['part'], colors=color_note, startangle=rotation, **kwargs)\r\n\r\n #if asked plot the colorbar left of the piechart\r\n if colorbar:\r\n ax2 = fig.add_subplot(1, 10, 1)\r\n cb1 = matplotlib.colorbar.ColorbarBase(ax2, cmap=cmap, norm=norm, orientation='vertical')\r\n \r\n #display with the pc values\r\n else:\r\n #put the right values in 'number'\r\n if duration:\r\n df_data = pd.concat(\r\n [df['pc'], df['duration']],\r\n axis=1,\r\n keys=['pc', 'number'])\r\n else:\r\n df_data = pd.concat(\r\n [df['pc'], df['nb']],\r\n axis=1,\r\n keys=['pc', 'number'])\r\n\r\n #Normalize the values for the colors\r\n max_value = df_data['number'].max()\r\n min_value = df_data['number'].min()\r\n if log:\r\n norm = matplotlib.colors.LogNorm(vmin=min_value, vmax=max_value)\r\n else:\r\n norm = matplotlib.colors.Normalize(0, vmax=max_value)\r\n \r\n #set data df_data\r\n df_data = (df_data.groupby('pc')).sum()\r\n df_data = df_data.reindex(s_tpc_format)\r\n df_data.fillna(0, inplace=True)\r\n\r\n #set colors\r\n for i in range(0, 12):\r\n if df_data.iat[i, 0] != 0:\r\n color_note.append(cmap(norm(df_data.iat[i, 0])))\r\n else:\r\n if pd.isnull(nan_color):\r\n color_note.append(cmap(0))\r\n else:\r\n color_note.append(nan_color)\r\n\r\n #if clockwise invert the order of the data to be displayed clockwise\r\n if clockwise:\r\n s_twelve_ones = s_twelve_ones.iloc[::-1]\r\n color_note = list(reversed(color_note))\r\n\r\n #calculate the angle for the topPitchClass to be at the top\r\n if pd.isnull(top) == False:\r\n for i in range(s_tpc_format.shape[0]):\r\n if top == (s_twelve_ones.index)[i]:\r\n rotation = rotation + 75 - i * 30\r\n break\r\n ax.pie(labels=s_twelve_ones.index, x=s_twelve_ones, colors=color_note, startangle=rotation, **kwargs)\r\n\r\n #if asked plot the colorbar left of the piechart\r\n if colorbar:\r\n ax2 = fig.add_subplot(1, 10, 1)\r\n cb1 = matplotlib.colorbar.ColorbarBase(ax2, cmap=cmap, norm=norm, orientation='vertical')\r\n return fig\r\n\r\n\r\ndef tonnetz(\r\n piece,\r\n pitch_type='tpc',\r\n measures=None,\r\n pitch_class_display=False,\r\n duplicate=True,\r\n duration=False,\r\n log=False,\r\n colorbar=True,\r\n vocabulary={0:'C', 1:'Db', 2:'D', 3:'Eb', 4:'E', 5:'F', 6:'Gb', 7:'G', 8:'Ab', 9:'A', 10:'Bb', 11:'B'},\r\n radius=3,\r\n hex_size=1,\r\n fontsize=1,\r\n figsize=[7, 4],\r\n cmap='Blues',\r\n nan_color=None,\r\n edgecolor=None,\r\n center=None,\r\n show=False, # CHANGE IT TO SHOW\r\n **kwargs):\r\n \"\"\"return the figure of a 2D grid of hexagons, each hexagons being a note\r\n\r\n Keyword arguments:\r\n piece -- the absolute path to the .csv file containing the data or a DataFrame\r\n pitch_type -- the type of data that you want to be read (default 'tpc'), 'pc' could be use for twelve parts chart tpc form\r\n (tpc:[A, B#, Gbbb, ...], pc (pitch class):[0, 3, 7, ...])\r\n measures -- give a set of measures example [5, 18], will display the notes of the measures 5 to 18 included\r\n pitch_class_display -- if True display the pitch class and not the tpc values and so the grid repeat itself.\r\n duplicate -- it False avoid any repetition of the notes in the grid\r\n duration -- if True the values taking account is the duration and not the number of appearence\r\n log -- if True the colors are distributed on a log scale, by default it's a lineare scale (default False)\r\n colorbar -- if true display the colorbar aside of the chart\r\n vocabulary -- the conversion dictionary from pitch class to tpc(F#, A, ...) format,\r\n radius -- define the number of layers of the hexagonal grid (default 3)\r\n hex_size -- indicate the size of the hexagons (default 1)\r\n fontsize -- indicate the size of the typo for the labels (default 1)\r\n figsize -- tell the size of the figure in inches [x, y]\r\n cmap -- indicate the type of color to use for the heatmap, see matplotlib color documentation (default 'Blues')\r\n nan_color -- give the possibility to set a color for the note that do not appear in the piece (default None)\r\n center -- you can set the note that will be in the center of the grid,\r\n by default it put the most recurent note in the center (default None)\r\n display -- if True the figure is displayed, if False it is hidden so you can have only the returned figure\r\n **kwargs -- these arguments are redirected to matplotlib.patches.RegularPolygon, see informations at\r\n https://matplotlib.org/api/_as_gen/matplotlib.patches.RegularPolygon.html\r\n \"\"\"\r\n #===================================================================================\r\n #constant, parameter, variables\r\n #===================================================================================\r\n\r\n #settings\r\n df_data = get_df_short(piece, vocabulary=vocabulary, pitch_type=pitch_type, measures=measures, duration=duration)\r\n \r\n #constant\r\n HEXEDGE = math.sqrt(3)/2 #math constant\r\n\r\n #intern variables\r\n length = 0.05 * hex_size * 1.5 * 3 / radius#radius and border length of the hexagons\r\n center_pos = [0.5, 0.5] # set the center on the center of the map\r\n size_text = length * 150 * fontsize # parameter fontsize\r\n pos = [0, 0, 0] #x, y, z\r\n pos_ser = (0, 0, 0) #for serching in the data\r\n a_center = ['F', 0] # the center that was define (note, sup)\r\n color_nb = 0\r\n color_text = 'Black' # by default\r\n show_hex = True\r\n\r\n #Normalize the numbers for colours\r\n if duration:\r\n max_val_tpc = df_data['duration'].max()\r\n min_val_tpc = df_data['duration'].min()\r\n else:\r\n max_val_tpc = df_data['nb'].max()\r\n min_val_tpc = df_data['nb'].min()\r\n if log:\r\n norm = matplotlib.colors.LogNorm(vmin=min_val_tpc, vmax=max_val_tpc)\r\n else:\r\n norm = matplotlib.colors.Normalize(vmin=0, vmax=max_val_tpc)\r\n\r\n found = False\r\n\r\n #define figure\r\n fig = plt.figure(figsize=figsize)\r\n if not show:\r\n plt.close(fig)\r\n ax = fig.add_subplot(111, aspect='equal')\r\n \r\n \r\n #colormap for the layout\r\n cmap = matplotlib.cm.get_cmap(cmap)\r\n\r\n #is the list of hexagon already define\r\n if pitch_class_display:\r\n columns = ['pos', 'note']\r\n else:\r\n columns = ['pos', 'note', 'acc']\r\n df_pos = pd.DataFrame(columns=columns)\r\n\r\n #give the notes'neighbours\r\n df_nei = pd.DataFrame.from_dict(get_dic_nei(pitch_class_display))\r\n\r\n #give the direction to look to for the nearest define hexagon\r\n x_list = [-1, 1, 0, 0, 1, -1]\r\n y_list = [1, -1, -1, 1, 0, 0]\r\n z_list = [0, 0, 1, -1, -1, 1]\r\n\r\n #===================================================================================\r\n #hexgrid\r\n #===================================================================================\r\n\r\n #do the first hexagon of the center\r\n #if not define it takes the most current note\r\n if pd.isnull(center):\r\n #draw the hexagon\r\n p = patches.RegularPolygon(center_pos, 6, radius=length, color=cmap(1/1), **kwargs)\r\n \r\n if pitch_class_display:\r\n ax.text(\r\n center_pos[0],\r\n center_pos[1],\r\n str(int(df_data['pc'][0])),\r\n color='white',\r\n horizontalalignment='center',\r\n verticalalignment='center',\r\n size=size_text)\r\n df_pos = df_pos.append(\r\n {'pos':(0,0,0), 'note':df_data['pc'][0]},\r\n ignore_index=True)\r\n else:\r\n ax.text(\r\n center_pos[0],\r\n center_pos[1],\r\n put_flat_sharp(df_data['step'][0], df_data['acc'][0]).replace('#', r'$\\sharp$') \\\r\n .replace('b', r'$\\flat$'),\r\n color='white',\r\n horizontalalignment='center',\r\n verticalalignment='center',\r\n size=size_text)\r\n df_pos = df_pos.append(\r\n {'pos':(0,0,0), 'note':df_data['step'][0], 'acc':df_data['acc'][0]},\r\n ignore_index=True)\r\n ax.add_patch(p)\r\n \r\n else: #read the given note and display it\r\n if pitch_class_display:\r\n df_pos = df_pos.append(\r\n {'pos':(0,0,0), 'note':center},\r\n ignore_index=True)\r\n else:\r\n a_center[0] = get_step(center)\r\n a_center[1] = get_acc(center)\r\n df_pos = df_pos.append(\r\n {'pos':(0,0,0), 'note':a_center[0], 'acc':a_center[1]},\r\n ignore_index=True)\r\n \r\n #set the color\r\n color = cmap(0)\r\n found = False\r\n color_nb = 0\r\n for l in range(df_data.shape[0]):\r\n if pitch_class_display:\r\n if str(int(df_data.at[l, 'pc'])) == str(center):\r\n if duration:\r\n color = cmap(norm(df_data.at[l, 'duration']))\r\n color_nb = norm(df_data.at[l, 'duration'])\r\n else:\r\n color = cmap(norm(df_data.at[l, 'nb']))\r\n color_nb = norm(df_data.at[l, 'nb'])\r\n found = True\r\n else:\r\n if df_data.at[l, 'step'] == a_center[0] and df_data.at[l, 'acc'] == a_center[1]:\r\n if duration:\r\n color = cmap(norm(df_data.at[l, 'duration']))\r\n color_nb = norm(df_data.at[l, 'duration'])\r\n else:\r\n color = cmap(norm(df_data.at[l, 'nb']))\r\n color_nb = norm(df_data.at[l, 'nb'])\r\n found = True\r\n \r\n if found == False and pd.isnull(nan_color) == False:\r\n color = nan_color\r\n\r\n #define the color af the label in function of the color of the hexagon\r\n if color_nb > 0.6:\r\n color_text = 'White'\r\n else:\r\n color_text = 'Black'\r\n \r\n if pitch_class_display == False:\r\n a_center[0] = put_flat_sharp(a_center[0], a_center[1])\r\n \r\n if not edgecolor:\r\n edgecolor = color\r\n #draw and add labels\r\n p = patches.RegularPolygon(\r\n center_pos,\r\n 6,\r\n radius=length,\r\n facecolor=color,\r\n edgecolor=edgecolor,\r\n **kwargs)\r\n if pitch_class_display:\r\n ax.text(\r\n center_pos[0],\r\n center_pos[1],\r\n str(int(center)),\r\n color=color_text,\r\n horizontalalignment='center',\r\n verticalalignment='center',\r\n size=size_text)\r\n else:\r\n ax.text(\r\n center_pos[0],\r\n center_pos[1],\r\n a_center[0].replace('#', r'$\\sharp$') \\\r\n .replace('b', r'$\\flat$'),\r\n color=color_text,\r\n horizontalalignment='center',\r\n verticalalignment='center',\r\n size=size_text)\r\n ax.add_patch(p)\r\n\r\n #do the rest of the plot except the first hex\r\n for layer in range(radius + 1): #for each layer\r\n for i in range(3): #for x,y,z\r\n for j in range(2): #for negative and positive value\r\n for k in range(layer): #to do the number of hexagon on sides\r\n #set the position of the hexagon\r\n pos[(0 + i) % 3] = layer * ((-1) ** j)\r\n pos[(1 + i) % 3] = (-layer + k) * ((-1) ** j)\r\n pos[(2 + i) % 3] = (-k) * ((-1) ** j)\r\n\r\n #position of the nearest hexagon already defined\r\n pos_ser = (\r\n pos[0] + x_list[j+i*2],\r\n pos[1] + y_list[j+i*2], \r\n pos[2] + z_list[j+i*2])\r\n \r\n #position to search in df_nei\r\n pos_ser_n = (\r\n x_list[j+i*2] * (-1),\r\n y_list[j+i*2] * (-1),\r\n z_list[j+i*2] * (-1))\r\n\r\n select_data = df_pos['pos'] == pos_ser\r\n\r\n if pitch_class_display == False:\r\n current_sup = df_pos[select_data].iat[0, 2]\r\n\r\n #get df for the note of reference from df_nei\r\n df_nei_gr = df_nei.groupby('ref').get_group(df_pos[select_data].iat[0, 1])\r\n\r\n #select the name of the note from the hexagone\r\n select_data = df_nei_gr['pos'] == pos_ser_n\r\n\r\n #register the hex in function of the type of value\r\n if pitch_class_display:\r\n current_note = df_nei_gr[select_data].iat[0, 2]\r\n df_pos = df_pos.append(\r\n {'pos':(pos[0], pos[1], pos[2]),\r\n 'note':current_note},\r\n ignore_index=True)\r\n else:\r\n current_note = df_nei_gr[select_data].iat[0, 2]\r\n current_sup = current_sup + df_nei_gr[select_data].iat[0, 3]\r\n df_pos = df_pos.append(\r\n {'pos':(pos[0], pos[1], pos[2]),\r\n 'note':current_note,\r\n 'acc':current_sup},\r\n ignore_index=True)\r\n \r\n #set the facecolor of the hex\r\n color = cmap(0)\r\n color_nb = 0\r\n found = False\r\n for l in range(df_data.shape[0]):\r\n if pitch_class_display:\r\n #check if he finds the note in the data and get its value for color\r\n if str(int(df_data.at[l, 'pc'])) == str(current_note):\r\n if duration:\r\n color = cmap(norm(df_data.at[l, 'duration']))\r\n color_nb = norm(df_data.at[l, 'duration'])\r\n else:\r\n color = cmap(norm(df_data.at[l, 'nb']))\r\n color_nb = norm(df_data.at[l, 'nb'])\r\n found = True\r\n else:\r\n if df_data.at[l, 'step'] == current_note and df_data.at[l, 'acc'] == current_sup:\r\n if duration:\r\n color = cmap(norm(df_data.at[l, 'duration']))\r\n color_nb = norm(df_data.at[l, 'duration'])\r\n else:\r\n color = cmap(norm(df_data.at[l, 'nb']))\r\n color_nb = norm(df_data.at[l, 'nb'])\r\n found = True\r\n \r\n if found == False and pd.isnull(nan_color) == False:\r\n color = nan_color\r\n\r\n #define the color af the label in function of the color of the hexagon\r\n if color_nb > 0.6:\r\n color_text = 'White'\r\n else:\r\n color_text = 'Black'\r\n\r\n if pitch_class_display == False:\r\n current_note = put_flat_sharp(current_note, current_sup)\r\n\r\n #calcul the center position of the hex in function of the coordonnate\r\n center_pos = [0.5 + pos[0] * HEXEDGE * length - pos[1] * HEXEDGE * length,\r\n 0.5 + pos[0] * length / 2 + pos[1] * length / 2 - pos[2] * length]\r\n\r\n show_hex = True\r\n\r\n #if no duplicate then check if the note is already display\r\n if duplicate == False:\r\n for l in range(df_pos.shape[0] - 1):\r\n if pitch_class_display:\r\n if df_pos.at[l, 'note'] == df_pos.at[df_pos.shape[0] - 1, 'note']:\r\n show_hex = False\r\n else:\r\n if df_pos.at[l, 'note'] == df_pos.at[df_pos.shape[0] - 1, 'note'] and\\\r\n df_pos.at[l, 'acc'] == df_pos.at[df_pos.shape[0] - 1, 'acc']:\r\n show_hex = False\r\n\r\n #draw\r\n if show_hex:\r\n if not edgecolor:\r\n edgecolor = color\r\n p = patches.RegularPolygon(\r\n center_pos,\r\n 6,\r\n radius=length,\r\n facecolor=color,\r\n edgecolor=edgecolor,\r\n **kwargs)\r\n if pitch_class_display:\r\n ax.text(\r\n center_pos[0],\r\n center_pos[1],\r\n str(int(current_note)),\r\n color=color_text,\r\n horizontalalignment='center',\r\n verticalalignment='center',\r\n size=size_text)\r\n else:\r\n ax.text(\r\n center_pos[0],\r\n center_pos[1],\r\n current_note.replace('#', r'$\\sharp$') \\\r\n .replace('b', r'$\\flat$'),\r\n color=color_text,\r\n horizontalalignment='center',\r\n verticalalignment='center',\r\n size=size_text)\r\n ax.add_patch(p)\r\n \r\n #display a colorbar if asked\r\n if colorbar:\r\n ax2 = fig.add_subplot(1, 10, 1)\r\n cb1 = matplotlib.colorbar.ColorbarBase(ax2, cmap=cmap,\r\n norm=norm,\r\n orientation='vertical')\r\n\r\n #display off the axis\r\n ax.axis('off')\r\n \r\n return fig\r\n" ]
[ [ "pandas.concat", "matplotlib.colors.LogNorm", "pandas.Series", "pandas.isnull", "matplotlib.patches.RegularPolygon", "pandas.DataFrame", "matplotlib.colors.Normalize", "matplotlib.colorbar.ColorbarBase", "matplotlib.pyplot.close", "matplotlib.cm.get_cmap", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
calum-chamberlain/generalized-phase-detection
[ "9a7ff77fe292c0be6d9dcaaca41f158736f22b73" ]
[ "gpd/helpers/numpy.py" ]
[ "import numpy as np\n\n\ndef sliding_window(data, size, stepsize=1, padded=False, axis=-1, copy=True):\n \"\"\"\n Calculate a sliding window over a signal\n Parameters\n ----------\n data : numpy array\n The array to be slided over.\n size : int\n The sliding window size\n stepsize : int\n The sliding window stepsize. Defaults to 1.\n axis : int\n The axis to slide over. Defaults to the last axis.\n copy : bool\n Return strided array as copy to avoid sideffects when manipulating the\n output array.\n Returns\n -------\n data : numpy array\n A matrix where row in last dimension consists of one instance\n of the sliding window.\n Notes\n -----\n - Be wary of setting `copy` to `False` as undesired sideffects with the\n output values may occurr.\n Examples\n --------\n >>> a = numpy.array([1, 2, 3, 4, 5])\n >>> sliding_window(a, size=3)\n array([[1, 2, 3],\n [2, 3, 4],\n [3, 4, 5]])\n >>> sliding_window(a, size=3, stepsize=2)\n array([[1, 2, 3],\n [3, 4, 5]])\n See Also\n --------\n pieces : Calculate number of pieces available by sliding\n \"\"\"\n if axis >= data.ndim:\n raise ValueError(\n \"Axis value out of range\"\n )\n\n if stepsize < 1:\n raise ValueError(\n \"Stepsize may not be zero or negative\"\n )\n\n if size > data.shape[axis]:\n raise ValueError(\n \"Sliding window size may not exceed size of selected axis\"\n )\n\n shape = list(data.shape)\n shape[axis] = np.floor(\n data.shape[axis] / stepsize - size / stepsize + 1).astype(int)\n shape.append(size)\n\n strides = list(data.strides)\n strides[axis] *= stepsize\n strides.append(data.strides[axis])\n\n strided = np.lib.stride_tricks.as_strided(\n data, shape=shape, strides=strides\n )\n\n if copy:\n return strided.copy()\n else:\n return strided\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.main()\n" ]
[ [ "numpy.lib.stride_tricks.as_strided", "numpy.floor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ryankshah/kymatio
[ "38cead012d1b134843a1dd0d5ea160042037c7da" ]
[ "tests/scattering1d/test_torch_scattering1d.py" ]
[ "import pytest\nimport torch\nfrom kymatio import Scattering1D\nimport math\nimport os\nimport io\nimport numpy as np\n\n\nbackends = []\nskcuda_available = False\ntry:\n if torch.cuda.is_available():\n from skcuda import cublas\n import cupy\n skcuda_available = True\nexcept:\n Warning('torch_skcuda backend not available.')\n\nif skcuda_available:\n from kymatio.scattering1d.backend.torch_skcuda_backend import backend\n backends.append(backend)\n\nfrom kymatio.scattering1d.backend.torch_backend import backend\nbackends.append(backend)\n\n\nif torch.cuda.is_available():\n devices = ['cuda', 'cpu']\nelse:\n devices = ['cpu']\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_simple_scatterings(device, backend, random_state=42):\n \"\"\"\n Checks the behaviour of the scattering on simple signals\n (zero, constant, pure cosine)\n \"\"\"\n\n rng = np.random.RandomState(random_state)\n J = 6\n Q = 8\n T = 2**9\n scattering = Scattering1D(J, T, Q, backend=backend, frontend='torch').to(device)\n return\n\n # zero signal\n x0 = torch.zeros(2, T).to(device)\n\n if backend.name.endswith('_skcuda') and device == 'cpu':\n with pytest.raises(TypeError) as ve:\n s = scattering(x0)\n assert \"CPU\" in ve.value.args[0]\n return\n s = scattering(x0)\n\n # check that s is zero!\n assert torch.max(torch.abs(s)) < 1e-7\n\n # constant signal\n x1 = rng.randn(1)[0] * torch.ones(1, T).to(device)\n if not backend.name.endswith('_skcuda') or device != 'cpu':\n s1 = scattering(x1)\n\n # check that all orders above 1 are 0\n assert torch.max(torch.abs(s1[:, 1:])) < 1e-7\n\n # sinusoid scattering\n meta = scattering.meta()\n for _ in range(3):\n k = rng.randint(1, T // 2, 1)[0]\n x2 = torch.cos(2 * math.pi * float(k) * torch.arange(0, T, dtype=torch.float32) / float(T))\n x2 = x2.unsqueeze(0).to(device)\n if not backend.name.endswith('_skcuda') or device != 'cpu':\n s2 = scattering(x2)\n\n assert(s2[:,torch.from_numpy(meta['order']) != 1,:].abs().max() < 1e-2)\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_sample_scattering(device, backend):\n \"\"\"\n Applies scattering on a stored signal to make sure its output agrees with\n a previously calculated version.\n \"\"\"\n test_data_dir = os.path.dirname(__file__)\n\n with open(os.path.join(test_data_dir, 'test_data_1d.npz'), 'rb') as f:\n buffer = io.BytesIO(f.read())\n data = np.load(buffer)\n\n\n x = torch.from_numpy(data['x']).to(device)\n J = data['J']\n Q = data['Q']\n Sx0 = torch.from_numpy(data['Sx']).to(device)\n\n T = x.shape[-1]\n\n scattering = Scattering1D(J, T, Q, backend=backend, frontend='torch').to(device)\n\n if backend.name.endswith('_skcuda') and device == 'cpu':\n with pytest.raises(TypeError) as ve:\n Sx = scattering(x)\n assert \"CPU\" in ve.value.args[0]\n return\n\n Sx = scattering(x)\n assert torch.allclose(Sx, Sx0)\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_computation_Ux(backend, device, random_state=42):\n \"\"\"\n Checks the computation of the U transform (no averaging for 1st order)\n \"\"\"\n rng = np.random.RandomState(random_state)\n J = 6\n Q = 8\n T = 2**12\n scattering = Scattering1D(J, T, Q, average=False,\n max_order=1, vectorize=False, frontend='torch', backend=backend).to(device)\n # random signal\n x = torch.from_numpy(rng.randn(1, T)).float().to(device)\n\n if not backend.name.endswith('skcuda') or device != 'cpu':\n s = scattering(x)\n\n # check that the keys in s correspond to the order 0 and second order\n for k in range(len(scattering.psi1_f)):\n assert (k,) in s.keys()\n for k in s.keys():\n if k is not ():\n assert k[0] < len(scattering.psi1_f)\n else:\n assert True\n\n scattering.max_order = 2\n\n s = scattering(x)\n\n count = 1\n for k1, filt1 in enumerate(scattering.psi1_f):\n assert (k1,) in s.keys()\n count += 1\n for k2, filt2 in enumerate(scattering.psi2_f):\n if filt2['j'] > filt1['j']:\n assert (k1, k2) in s.keys()\n count += 1\n\n assert count == len(s)\n\n with pytest.raises(ValueError) as ve:\n scattering.vectorize = True\n scattering(x)\n assert \"mutually incompatible\" in ve.value.args[0]\n\n\n# Technical tests\[email protected](\"backend\", backends)\ndef test_scattering_GPU_CPU(backend, random_state=42):\n \"\"\"\n This function tests whether the CPU computations are equivalent to\n the GPU ones\n \"\"\"\n if torch.cuda.is_available() and not backend.name.endswith('_skcuda'):\n torch.manual_seed(random_state)\n\n J = 6\n Q = 8\n T = 2**12\n\n # build the scattering\n scattering = Scattering1D(J, T, Q, backend=backend, frontend='torch').cpu()\n\n x = torch.randn(2, T)\n s_cpu = scattering(x)\n\n scattering = scattering.cuda()\n x_gpu = x.clone().cuda()\n s_gpu = scattering(x_gpu).cpu()\n # compute the distance\n\n Warning('Tolerance has been slightly lowered here...')\n assert torch.allclose(s_cpu, s_gpu, atol=1e-7)\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_coordinates(device, backend, random_state=42):\n \"\"\"\n Tests whether the coordinates correspond to the actual values (obtained\n with Scattering1d.meta()), and with the vectorization\n \"\"\"\n\n torch.manual_seed(random_state)\n J = 6\n Q = 8\n T = 2**12\n\n scattering = Scattering1D(J, T, Q, max_order=2, backend=backend, frontend='torch')\n\n x = torch.randn(2, T)\n\n scattering.to(device)\n x = x.to(device)\n\n for max_order in [1, 2]:\n scattering.max_order = max_order\n\n scattering.vectorize = False\n\n if backend.name.endswith('skcuda') and device == 'cpu':\n with pytest.raises(TypeError) as ve:\n s_dico = scattering(x)\n assert \"CPU\" in ve.value.args[0]\n else:\n s_dico = scattering(x)\n s_dico = {k: s_dico[k].data for k in s_dico.keys()}\n scattering.vectorize = True\n\n if backend.name.endswith('_skcuda') and device == 'cpu':\n with pytest.raises(TypeError) as ve:\n s_vec = scattering(x)\n assert \"CPU\" in ve.value.args[0]\n else:\n s_vec = scattering(x)\n s_dico = {k: s_dico[k].cpu() for k in s_dico.keys()}\n s_vec = s_vec.cpu()\n\n meta = scattering.meta()\n\n if not backend.name.endswith('_skcuda') or device != 'cpu':\n assert len(s_dico) == s_vec.shape[1]\n\n for cc in range(s_vec.shape[1]):\n k = meta['key'][cc]\n assert torch.allclose(s_vec[:, cc], torch.squeeze(s_dico[k]))\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_precompute_size_scattering(device, backend, random_state=42):\n \"\"\"\n Tests that precompute_size_scattering computes a size which corresponds\n to the actual scattering computed\n \"\"\"\n torch.manual_seed(random_state)\n\n J = 6\n Q = 8\n T = 2**12\n\n scattering = Scattering1D(J, T, Q, vectorize=False, backend=backend, frontend='torch')\n\n x = torch.randn(2, T)\n\n scattering.to(device)\n x = x.to(device)\n if not backend.name.endswith('_skcuda') or device != 'cpu':\n for max_order in [1, 2]:\n scattering.max_order = max_order\n s_dico = scattering(x)\n for detail in [True, False]:\n # get the size of scattering\n size = scattering.output_size(detail=detail)\n if detail:\n num_orders = {0: 0, 1: 0, 2: 0}\n for k in s_dico.keys():\n if k is ():\n num_orders[0] += 1\n else:\n if len(k) == 1: # order1\n num_orders[1] += 1\n elif len(k) == 2:\n num_orders[2] += 1\n todo = 2 if max_order == 2 else 1\n for i in range(todo):\n assert num_orders[i] == size[i]\n # check that the orders are completely equal\n else:\n assert len(s_dico) == size\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_differentiability_scattering(device, backend, random_state=42):\n \"\"\"\n It simply tests whether it is really differentiable or not.\n This does NOT test whether the gradients are correct.\n \"\"\"\n\n if backend.name.endswith(\"_skcuda\"):\n pytest.skip(\"The skcuda backend does not pass differentiability\"\n \"tests, but that's ok (for now).\")\n\n torch.manual_seed(random_state)\n\n J = 6\n Q = 8\n T = 2**12\n\n scattering = Scattering1D(J, T, Q, frontend='torch', backend=backend).to(device)\n\n x = torch.randn(2, T, requires_grad=True, device=device)\n\n s = scattering.forward(x)\n loss = torch.sum(torch.abs(s))\n loss.backward()\n assert torch.max(torch.abs(x.grad)) > 0.\n\n\[email protected](\"backend\", backends)\ndef test_scattering_shape_input(backend):\n # Checks that a wrong input to shape raises an error\n J, Q = 6, 8\n with pytest.raises(ValueError) as ve:\n shape = 5, 6\n s = Scattering1D(J, shape, Q, backend=backend, frontend='torch')\n assert \"exactly one element\" in ve.value.args[0]\n\n\n with pytest.raises(ValueError) as ve:\n shape = 1.5\n s = Scattering1D(J, shape, Q, backend=backend, frontend='torch')\n # should invoke the else branch\n assert \"1-tuple\" in ve.value.args[0]\n assert \"integer\" in ve.value.args[0]\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_batch_shape_agnostic(device, backend):\n J, Q = 3, 8\n length = 1024\n shape = (length,)\n\n length_ds = length / 2**J\n\n S = Scattering1D(J, shape, Q, backend=backend, frontend='torch').to(device)\n\n with pytest.raises(ValueError) as ve:\n S(torch.zeros(()).to(device))\n assert \"at least one axis\" in ve.value.args[0]\n\n x = torch.zeros(shape).to(device)\n\n if backend.name.endswith('_skcuda') and device == 'cpu':\n with pytest.raises(TypeError) as ve:\n Sx = S(x)\n assert \"CPU\" in ve.value.args[0]\n return\n\n Sx = S(x)\n\n assert Sx.dim() == 2\n assert Sx.shape[-1] == length_ds\n\n n_coeffs = Sx.shape[-2]\n\n test_shapes = ((1,) + shape, (2,) + shape, (2,2) + shape, (2,2,2) + shape)\n\n for test_shape in test_shapes:\n x = torch.zeros(test_shape).to(device)\n\n S.vectorize = True\n Sx = S(x)\n\n assert Sx.dim() == len(test_shape)+1\n assert Sx.shape[-1] == length_ds\n assert Sx.shape[-2] == n_coeffs\n assert Sx.shape[:-2] == test_shape[:-1]\n\n S.vectorize = False\n Sx = S(x)\n\n assert len(Sx) == n_coeffs\n for k, v in Sx.items():\n assert v.shape[-1] == length_ds\n assert v.shape[-2] == 1\n assert v.shape[:-2] == test_shape[:-1]\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_pad_1d(device, backend, random_state=42):\n \"\"\"\n Tests the correctness and differentiability of pad_1d\n \"\"\"\n torch.manual_seed(random_state)\n N = 128\n for pad_left in range(0, N - 16, 16):\n for pad_right in [pad_left, pad_left + 16]:\n x = torch.randn(2, 4, N, requires_grad=True, device=device)\n x_pad = backend.pad_1d(x, pad_left, pad_right, mode='reflect')\n # Check the size\n x2 = x.clone()\n x_pad2 = x_pad.clone()\n for t in range(1, pad_left + 1):\n assert torch.allclose(x_pad2[..., pad_left - t],x2[..., t])\n for t in range(x2.shape[-1]):\n assert torch.allclose(x_pad2[..., pad_left + t], x2[..., t])\n for t in range(1, pad_right + 1):\n assert torch.allclose(x_pad2[..., x_pad.shape[-1] - 1 - pad_right + t], x2[..., x.shape[-1] - 1 - t])\n # check the differentiability\n loss = 0.5 * torch.sum(x_pad**2)\n loss.backward()\n # compute the theoretical gradient for x\n x_grad_original = x.clone()\n x_grad = x_grad_original.new(x_grad_original.shape).fill_(0.)\n x_grad += x_grad_original\n for t in range(1, pad_left + 1):\n x_grad[..., t] += x_grad_original[..., t]\n for t in range(1, pad_right + 1): # it is counted twice!\n t0 = x.shape[-1] - 1 - t\n x_grad[..., t0] += x_grad_original[..., t0]\n # get the difference\n assert torch.allclose(x.grad, x_grad)\n # Check that the padding shows an error if we try to pad\n with pytest.raises(ValueError):\n backend.pad_1d(x, x.shape[-1], 0, mode='reflect')\n with pytest.raises(ValueError):\n backend.pad_1d(x, 0, x.shape[-1], mode='reflect')\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_modulus(device, backend, random_state=42):\n \"\"\"\n Tests the stability and differentiability of modulus\n \"\"\"\n torch.manual_seed(random_state)\n # Test with a random vector\n x = torch.randn(2, 4, 128, 2, requires_grad=True, device=device)\n\n if backend.name.endswith('_skcuda') and device == 'cpu':\n # If we are using a GPU-only backend, make sure it raises the proper\n # errors for CPU tensors.\n with pytest.raises(TypeError) as re:\n x_bad = torch.randn((4, 2)).cpu()\n backend.modulus_complex(x_bad)\n assert \"for CPU tensors\" in re.value.args[0]\n return\n\n\n x_abs = backend.modulus_complex(x)\n\n assert len(x_abs.shape) == len(x.shape)\n # check the value\n x_abs2 = x_abs.clone()\n x2 = x.clone()\n assert torch.allclose(x_abs2[..., 0], torch.sqrt(x2[..., 0]**2 + x2[..., 1]**2))\n\n with pytest.raises(TypeError) as te:\n x_bad = torch.randn(4).to(device)\n backend.modulus_complex(x_bad)\n assert \"should be complex\" in te.value.args[0]\n\n if backend.name.endswith(\"_skcuda\"):\n pytest.skip(\"The skcuda backend does not pass differentiability\"\n \"tests, but that's ok (for now).\")\n\n # check the gradient\n loss = torch.sum(x_abs)\n loss.backward()\n x_grad = x2 / x_abs2[..., 0].unsqueeze(dim=-1)\n assert torch.allclose(x.grad, x_grad)\n\n\n # Test the differentiation with a vector made of zeros\n x0 = torch.zeros(100, 4, 128, 2, requires_grad=True, device=device)\n x_abs0 = backend.modulus_complex(x0)\n loss0 = torch.sum(x_abs0)\n loss0.backward()\n assert torch.max(torch.abs(x0.grad)) <= 1e-7\n\n\[email protected](\"backend\", backends)\[email protected](\"device\", devices)\ndef test_subsample_fourier(backend, device, random_state=42):\n \"\"\"\n Tests whether the periodization in Fourier performs a good subsampling\n in time\n \"\"\"\n if backend.name.endswith('_skcuda') and device == 'cpu':\n with pytest.raises(TypeError) as re:\n x_bad = torch.randn((4, 2)).cpu()\n backend.subsample_fourier(x_bad, 1)\n assert \"for CPU tensors\" in re.value.args[0]\n return\n rng = np.random.RandomState(random_state)\n J = 10\n x = rng.randn(2, 4, 2**J) + 1j * rng.randn(2, 4, 2**J)\n x_f = np.fft.fft(x, axis=-1)[..., np.newaxis]\n x_f.dtype = 'float64' # make it a vector\n x_f_th = torch.from_numpy(x_f).to(device)\n\n for j in range(J + 1):\n x_f_sub_th = backend.subsample_fourier(x_f_th, 2**j).cpu()\n x_f_sub = x_f_sub_th.numpy()\n x_f_sub.dtype = 'complex128'\n x_sub = np.fft.ifft(x_f_sub[..., 0], axis=-1)\n assert np.allclose(x[:, :, ::2**j], x_sub)\n\n # If we are using a GPU-only backend, make sure it raises the proper\n # errors for CPU tensors.\n if device=='cuda':\n with pytest.raises(TypeError) as te:\n x_bad = torch.randn(4).cuda()\n backend.subsample_fourier(x_bad, 1)\n assert \"should be complex\" in te.value.args[0]\n" ]
[ [ "torch.abs", "torch.ones", "numpy.allclose", "numpy.fft.fft", "torch.zeros", "torch.sqrt", "torch.manual_seed", "torch.randn", "numpy.load", "torch.sum", "torch.from_numpy", "numpy.fft.ifft", "torch.cuda.is_available", "torch.arange", "torch.allclose", "numpy.random.RandomState", "torch.squeeze" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tkhe/ssd-family
[ "a797ec36fda59549aff54419c105813c33d8cdd3" ]
[ "ssd/modeling/extra_layers/pelee_extra_layers.py" ]
[ "import torch.nn as nn\n\nfrom ssd.layers import Conv2d\n\n\nclass ResBlock(nn.Module):\n def __init__(self, in_channels):\n super(ResBlock, self).__init__()\n\n self.branch1 = nn.Sequential(\n Conv2d(in_channels, 128, kernel_size=1),\n Conv2d(128, 128, kernel_size=3),\n Conv2d(128, 256, kernel_size=1)\n )\n self.branch2 = Conv2d(in_channels, 256, kernel_size=1)\n\n def forward(self, x):\n out1 = self.branch1(x)\n out2 = self.branch2(x)\n return out1 + out2\n\n\nclass PeleeExtraLayers(nn.Module):\n def __init__(self, in_channels):\n super(PeleeExtraLayers, self).__init__()\n\n self.resblock1 = ResBlock(in_channels[0])\n self.resblock2 = ResBlock(in_channels[1])\n self.stage3 = nn.Sequential(\n Conv2d(in_channels[1], 256, kernel_size=1),\n Conv2d(256, 256, kernel_size=3, stride=2)\n )\n self.resblock3 = ResBlock(256)\n self.stage4 = nn.Sequential(\n Conv2d(256, 128, kernel_size=1),\n nn.Conv2d(128, 256, kernel_size=3, stride=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True)\n )\n self.resblock4 = ResBlock(256)\n self.stage5 = nn.Sequential(\n Conv2d(256, 128, kernel_size=1),\n nn.Conv2d(128, 256, kernel_size=3, stride=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True)\n )\n self.resblock5 = ResBlock(256)\n\n self.out_channels = (256, 256, 256, 256, 256, 256)\n\n def forward(self, x):\n outputs = []\n\n out1 = self.resblock1(x[0])\n out2 = self.resblock2(x[1])\n outputs += [out1, out1, out2]\n\n out = self.stage3(x[1])\n out = self.resblock3(out)\n outputs.append(out)\n\n out = self.stage4(out)\n out = self.resblock3(out)\n outputs.append(out)\n\n out = self.stage5(out)\n out = self.resblock5(out)\n outputs.append(out)\n return outputs\n" ]
[ [ "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.BatchNorm2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KshitijKarthick/tvecs
[ "bcec2d09045319472036aa7aa03084ca2569b7bb" ]
[ "tvecs/evaluation/evaluation.py" ]
[ "#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n\"\"\"Module to Evaluate T-Vecs model against Human Semantic Similarity Score.\"\"\"\nimport os\nimport codecs\nfrom scipy.stats import pearsonr\nfrom gensim.models import Word2Vec\n\nfrom tvecs.logger import init_logger as log\nfrom tvecs.bilingual_generator import bilingual_generator as bg\nfrom tvecs.vector_space_mapper.vector_space_mapper import VectorSpaceMapper\n\nLOGGER = log.initialise(\"TVecs.Evaluation\")\n\n\ndef extract_correlation_coefficient(score_data_path, vsm):\n \"\"\"\n Extract Human Score, Word1, Word2. Compute T-Vecs Score.\n\n API Documentation\n :param score_data_path: File generated by preprocessor/yandex\n :param vsm: Vector spaces mapped using 2 models.\n :type score_data_path: :class:`String`\n :type vsm: :mod:`tvecs.vector_space_mapper.vector_space_mapper`\n :return: Returns (Correlation coefficient, P-Value)\n :rtype: :class:`Tuple(Float, Float)`\n \"\"\"\n LOGGER.info(\"Extracting Human Score from score data path: %s\", score_data_path)\n with codecs.open(score_data_path, \"r\", encoding=\"utf-8\") as score_file:\n human_score, calculated_score = zip(\n *[\n [\n data.split()[2],\n vsm.obtain_cosine_similarity(data.split()[0], data.split()[1]),\n ]\n for data in score_file.readlines()\n ]\n )\n human_score, calculated_score = zip(\n *[\n [float(hs), float(cs)]\n for hs, cs in zip(human_score, calculated_score)\n if hs is not None and cs is not None\n ]\n )\n return get_correlation_coefficient(list(human_score), list(calculated_score))\n\n\ndef get_correlation_coefficient(human_score, calculated_score):\n \"\"\"\n Measure correlation using Pearson's Coefficient.\n\n - The correlation is between the T-Vecs Model and\n - Human Semantic Similarity Score.\n\n API Documentation:\n :param human_score: List of human scores.\n :param calculated_score: List of calculated scores.\n :type human_score: :class:`List`\n :type calculated_score: :class:`List`\n :return: (Correlation Coefficient, P-Value)\n :rtype: :class:`Tuple(Float, Float)`\n\n .. note::\n * correlation_coefficient - Measure of degree of relatedness\n between two variables\n * p-value - The null hypothesis is that the\n two variables are uncorrelated. The p-value is a number between zero\n and one that represents the probability that your data would have\n arisen if the null hypothesis were true.\n\n .. seealso::\n * :mod:`scipy.stats`\n \"\"\"\n LOGGER.info(\"Computing Correlation Coefficient b/w human, t-vecs score\")\n return pearsonr(human_score, calculated_score)\n\n\ndef _load_vector_space_mapper(model_1_path, model_2_path, bilingual_path):\n \"\"\"Build a vector space mapper from model 1,2 and bilingual dict.\"\"\"\n model_1 = Word2Vec.load(model_1_path)\n model_2 = Word2Vec.load(model_2_path)\n bilingual_dict = bg.load_bilingual_dictionary(bilingual_path)\n tvecs_vm = VectorSpaceMapper(model_1, model_2, bilingual_dict)\n tvecs_vm.map_vector_spaces()\n return tvecs_vm\n\n\nif __name__ == \"__main__\":\n log.set_logger_normal(LOGGER)\n EVAL_DATASET = {\n \"Wordsim-253-REL\": os.path.join(\n \"data\", \"evaluate\", \"wordsim_relatedness_goldstandard.txt_translate\"\n ),\n \"MEN\": os.path.join(\n \"data\", \"evaluate\", \"MEN_dataset_natural_form_full_translate\"\n ),\n \"MTurk-287\": os.path.join(\"data\", \"evaluate\", \"Mturk_287.txt_translate\"),\n \"MTurk-771\": os.path.join(\"data\", \"evaluate\", \"MTURK-771.csv_translate\"),\n }\n for DATASET in EVAL_DATASET.keys():\n LOGGER.info(\n \"Evaluation of T-Vecs Model against Human Semantic\"\n \" Similarity Score %s Dataset:\" % DATASET\n )\n CORRELATION_SCORE, P_VALUE = extract_correlation_coefficient(\n score_data_path=EVAL_DATASET[DATASET],\n vsm=_load_vector_space_mapper(\n model_1_path=os.path.join(\"data\", \"models\", \"t-vex-english-model\"),\n model_2_path=os.path.join(\"data\", \"models\", \"t-vex-hindi-model\"),\n bilingual_path=os.path.join(\n \"data\", \"bilingual_dictionary\", \"english_hindi_train_bd\"\n ),\n ),\n )\n LOGGER.info(\n \"Correlation Score obtained: %s\\nP-Value obtained: %s\",\n CORRELATION_SCORE,\n P_VALUE,\n )\n" ]
[ [ "scipy.stats.pearsonr" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
alxyok/ransplacement
[ "9434dbe458b5d1e31be65c976fc693965997d504" ]
[ "trainer.py" ]
[ "# MIT License\n\n# Copyright (c) 2021 alxyok\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport config\nimport data\nimport json\nimport model\nimport os.path as osp\nimport pytorch_lightning as pl\nfrom pytorch_lightning.utilities.cli import LightningCLI\nimport torch\nfrom typing import List\n\nclass Trainer(pl.Trainer):\n def __init__(self, \n accelerator: str = 'cpu', \n devices: List[int] = None, \n max_epochs: int = 1000, \n gradient_clip_val: int = 1000,\n fast_dev_run: int = False, \n callbacks: List[pl.callbacks.Callback] = None):\n \n if accelerator == 'cpu':\n devices = None\n \n logger = pl.loggers.TensorBoardLogger(config.logs_path, name=None, log_graph=True)\n \n super().__init__(\n default_root_dir=config.logs_path,\n logger=logger,\n accelerator=accelerator,\n devices=devices,\n max_epochs=max_epochs,)\n \n def test(self, **kwargs):\n results = super().test(**kwargs)[0]\n \n with open(osp.join(config.artifacts_path, \"results.json\"), \"w\") as f:\n json.dump(results, f)\n \n torch.save(self.model, osp.join(config.artifacts_path, 'model.pth'))\n \ndef main():\n \n cli = LightningCLI(trainer_class=Trainer)\n cli.trainer.test(model=cli.model, datamodule=cli.datamodule)\n \n \nif __name__ == '__main__':\n \n torch.set_default_dtype(torch.double)\n main()" ]
[ [ "torch.set_default_dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
offroad-robotics/gpr-lib
[ "a5d61c825f22dd3aae8a1107142356d22907474f" ]
[ "examples/ground_truth_2d_example.py" ]
[ "# Copyright (c) 2021, Jeremy Roy\r\n# All rights reserved.\r\n\r\n# Redistribution and use in source and binary forms, with or without\r\n# modification, are permitted provided that the following conditions are met:\r\n# 1. Redistributions of source code must retain the above copyright\r\n# notice, this list of conditions and the following disclaimer.\r\n# 2. Redistributions in binary form must reproduce the above copyright\r\n# notice, this list of conditions and the following disclaimer in the\r\n# documentation and/or other materials provided with the distribution.\r\n# 3. Neither the name of the Offroad Robotics Lab at Queen's University nor the\r\n# names of its contributors may be used to endorse or promote products\r\n# derived from this software without specific prior written permission.\r\n\r\n# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ''AS IS'' AND ANY\r\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\r\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r\n# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\r\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\r\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\r\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\r\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n\r\n# This file plots the ground truth for the 2D example\r\n#\r\n# Author: Jeremy Roy <[email protected]>\r\n# License: BSD 2.0\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n# Center of terrains element chuncks\r\nbump_center = np.array([ 2046.298, 1599.936]) # mm\r\ngrass_center = np.array([ 236.904, -2487.764]) # mm\r\nrocks_center = np.array([-1917.432, 1926.706]) # mm\r\n\r\n# Corners of terrain element chuncks\r\nbumps1 = (bump_center + np.array([ 441.689, -898.057])) / 1000 # m\r\nbumps2 = (bump_center + np.array([-443.011, -900.151])) / 1000 # m\r\nbumps3 = (bump_center + np.array([-442.218, 899.451])) / 1000 # m\r\nbumps4 = (bump_center + np.array([ 443.539, 898.757])) / 1000 # m\r\n\r\ngrass1 = (grass_center + np.array([ 1805.520, -438.652])) / 1000 # m\r\ngrass2 = (grass_center + np.array([ 1804.230, 443.375])) / 1000 # m\r\ngrass3 = (grass_center + np.array([-1799.550, 438.900])) / 1000 # m\r\ngrass4 = (grass_center + np.array([-1810.200, -443.624])) / 1000 # m\r\n\r\nrocks1 = (rocks_center + np.array([ -4.514, 1263.860])) / 1000 # m\r\nrocks2 = (rocks_center + np.array([ 1259.800, 12.871])) / 1000 # m\r\nrocks3 = (rocks_center + np.array([ 8.747, -1267.760])) / 1000 # m\r\nrocks4 = (rocks_center + np.array([-1264.030, -8.969])) / 1000 # m\r\n\r\n# Group corner markers of terrain elements for 2D plotting\r\nbumps_2d = np.array([bumps1, bumps2, bumps3, bumps4, bumps1])\r\ngrass_2d = np.array([grass1, grass2, grass3, grass4, grass1])\r\nrocks_2d = np.array([rocks1, rocks2, rocks3, rocks4, rocks1])\r\n\r\n\r\n# Plot the wheelbase buffers\r\ndef plot_ground_truth(ax):\r\n # Plot terrain element markers\r\n ax.plot(bumps_2d.T[0], bumps_2d.T[1], \"-\", color=\"#ffb31a\", label=\"Bumps\")\r\n ax.plot(grass_2d.T[0], grass_2d.T[1], \"-\", color=\"#00802b\", label=\"Grass\")\r\n ax.plot(rocks_2d.T[0], rocks_2d.T[1], \"-\", color=\"black\", label=\"Rocks\")\r\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
arssly/openvqa
[ "f10254a05819f813b71337a2ee45b2be09ab8e5d" ]
[ "openvqa/premodels/resnet/preproc.py" ]
[ "import torch\nfrom PIL import Image\nfrom openvqa.premodels.resnet.resnet_model import model, preproc_transform\n\n\ndef preproc_to_feats(image):\n input_tensor = preproc_transform(image)\n input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model\n\n if torch.cuda.is_available():\n input_batch = input_batch.to('cuda')\n with torch.no_grad():\n output = model(input_batch).detach().squeeze().cpu()\n output = output.numpy().reshape((2048, -1)).transpose()\n return output\n\n\nif __name__ == \"__main__\":\n output = preproc_to_feats(Image.open('/Users/macbook/Downloads/vqa/val2014/COCO_val2014_000000000042.jpg'))\n print ('output shape', output.shape)\n" ]
[ [ "torch.no_grad", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VivianYuan12138/python_matplotlib_learning
[ "c1011b8ed850959f66472e2d81b7d41f0623d06b" ]
[ "example/example1.py" ]
[ "# ① 导入库\nimport matplotlib.pyplot as plt #导入matplotlib库中的pyplot子库\n# ② 新建绘图区\nfig, ax = plt.subplots(figsize=(6,4)) #指定绘图区大小为6*4英寸\nplt.rcParams['font.sans-serif'] = ['SimHei'] #设置显示中文字体(黑体)\n# ③ 准备数据\nseazons=['一季度', '二季度', '三季度','四季度'] #设置分类轴显示文本\nsales=[2780,1950,2680,2120] #设置某产品的销售量数据\nindex=[0,1,2,3] #index控制分类轴刻度\nbarW=0.7 #barW控制条形的宽度\n# ④ 设置图表属性\nplt.title(\"产品销售\") #设置图表标题\nplt.xlabel(\"时间\") #设置X坐标轴标签\nplt.ylabel(\"销售量\") #设置Y坐标轴标签\nplt.xticks(index,seazons) #设置分类轴标记\nplt.grid(axis=\"y\") #显示横向网格线\n# ⑤ 绘制图表\nplt.bar(index,sales,barW,color='b',label='某产品') #绘制柱状图\nfor a,b in zip(index,sales): #显示数值标注\n plt.text(a,b+30, '%.0f'%b, ha='center', fontsize=9) #ha设置水平对齐方式\n# ⑥ 显示图例\nplt.legend(loc='upper right' ) #在右上角显示图例文字\n# ⑦ 显示图表\nplt.show( )" ]
[ [ "matplotlib.pyplot.text", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "matplotlib.pyplot.grid", "matplotlib.pyplot.bar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cikrhazo/FERSNet
[ "a5d9d2e71cf4e4026012f4c2cca7db6a11d162f9" ]
[ "network.py" ]
[ "import torch\nimport torch.nn as nn\nfrom LeakyUnit import LeakyUnit\nfrom net_utlz.blocks import BasicBlock, conv1x1, UNetUp, Transform\n\ncfg = {\n 'VGG13': [(64, 64), (64, 64), 'D1:128, 64',\n (128, 128), (128, 128), 'D:128, 64',\n (256, 128), (256, 256), 'D:256, 128',\n (512, 256), (512, 512), 'D:512, 256',\n (512, 512), (1024, 512), 'D:512, 512'],\n}\n\n\nclass Decoder(nn.Module):\n def __init__(self, vgg_name='VGG13', out_channel=3, mem=512, num_class=6):\n super(Decoder, self).__init__()\n self.module = self.make_layers(cfg[vgg_name])\n self.out_layer = nn.Conv2d(64, out_channel, 3, 1, 1)\n self.transform = Transform(in_channel=mem, style_dim=num_class)\n\n def forward(self, feature, prob_t, shortcut_list, transform=False):\n shortcut_list = shortcut_list[::-1]\n if transform:\n y = self.transform(feature, prob_t)\n else:\n y = feature\n k = 0\n for operation in self.module:\n if isinstance(operation, UNetUp):\n shortcut = shortcut_list[k]\n y = operation(y, shortcut, prob_t)\n k = k + 1\n else:\n y = operation(y)\n return self.out_layer(y)\n\n def make_layers(self, cfg):\n layers = []\n for x in cfg:\n if 'D1' in x:\n chs = x.split(\":\")[-1]\n in_ch, out_ch = int(chs.split(\",\")[0]), int(chs.split(\",\")[1])\n layers += [nn.LeakyReLU(0.2),\n nn.InstanceNorm2d(out_ch),\n nn.ConvTranspose2d(in_ch, out_ch, 4, 2, 1, bias=False)]\n elif 'D' in x:\n chs = x.split(\":\")[-1]\n in_ch, out_ch = int(chs.split(\",\")[0]), int(chs.split(\",\")[1])\n layers += [UNetUp(in_ch, out_ch)]\n else:\n in_ch, out_ch = x[0], x[1]\n layers += [nn.LeakyReLU(0.2),\n nn.InstanceNorm2d(out_ch),\n nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1)]\n layers = layers[::-1]\n return nn.ModuleList(layers)\n\n\nclass Classifier(nn.Module):\n def __init__(self, nz=7, nc=512, _size=1):\n super(Classifier, self).__init__()\n self.pooling = nn.Sequential(\n nn.Conv2d(nc, nc, 3, 1, 1),\n nn.BatchNorm2d(nc),\n nn.ReLU(),\n nn.AdaptiveAvgPool2d(_size)\n )\n self.classifier = nn.Linear(nc, nz)\n\n def forward(self, feature):\n pooled = self.pooling(feature).view(feature.size(0), -1)\n out = self.classifier(pooled)\n return out\n\n\nclass Discriminator(nn.Module):\n def __init__(self, input_shape=(1, 96, 96), num_class=6):\n super(Discriminator, self).__init__()\n\n channels, height, width = input_shape\n\n # Calculate output shape of image discriminator (PatchGAN)\n self.out_size = (1, height // 2 ** 4, width // 2 ** 4)\n\n def discriminator_block(in_filters, out_filters, normalize=True):\n \"\"\"Returns downsampling layers of each discriminator block\"\"\"\n layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]\n if normalize:\n layers.append(nn.InstanceNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n self.model = nn.Sequential(\n *discriminator_block(channels + num_class, 64, normalize=False),\n *discriminator_block(64, 128),\n *discriminator_block(128, 256),\n *discriminator_block(256, 512),\n nn.ZeroPad2d((1, 0, 1, 0)),\n nn.Conv2d(512, 1, 4, padding=1),\n )\n\n def forward(self, img, cond):\n img_input = torch.cat((img, cond), 1)\n dis_out = self.model(img_input)\n return dis_out\n\n\nclass FERSNet(nn.Module):\n def __init__(self, vgg_name='VGG13', num_class=6, mem_size=512, k_channel=1, inter=64):\n super(FERSNet, self).__init__()\n self.leakyunitxy1 = LeakyUnit(n_features=inter * 2)\n self.leakyunityx1 = LeakyUnit(n_features=inter * 2)\n self.leakyunitxy2 = LeakyUnit(n_features=inter * 4)\n self.leakyunityx2 = LeakyUnit(n_features=inter * 4)\n self.leakyunitxy3 = LeakyUnit(n_features=inter * 8)\n self.leakyunityx3 = LeakyUnit(n_features=inter * 8)\n\n self.stem = nn.Sequential(\n nn.Conv2d(k_channel, inter, 3, 1, 1),\n nn.BatchNorm2d(inter),\n nn.ReLU(inplace=True),\n )\n self.conv1x = BasicBlock(\n inplanes=inter, planes=2 * inter, downsample=conv1x1(inter, 2 * inter)\n )\n self.conv1y = BasicBlock(\n inplanes=inter, planes=2 * inter, downsample=conv1x1(inter, 2 * inter), norm_layer=nn.InstanceNorm2d\n )\n\n self.conv2x = BasicBlock(\n inplanes=2 * inter, planes=4 * inter, downsample=conv1x1(2 * inter, 4 * inter)\n )\n self.conv2y = BasicBlock(\n inplanes=2 * inter, planes=4 * inter, downsample=conv1x1(2 * inter, 4 * inter), norm_layer=nn.InstanceNorm2d\n )\n\n self.conv3x = BasicBlock(\n inplanes=4 * inter, planes=8 * inter, downsample=conv1x1(4 * inter, 8 * inter)\n )\n self.conv3y = BasicBlock(\n inplanes=4 * inter, planes=8 * inter, downsample=conv1x1(4 * inter, 8 * inter), norm_layer=nn.InstanceNorm2d\n )\n\n self.conv4xy = BasicBlock(inplanes=16 * inter, planes=16 * inter)\n\n self.classifier = Classifier(nz=num_class, nc=512, _size=1)\n self.decoder = Decoder(vgg_name=vgg_name, out_channel=k_channel, mem=mem_size, num_class=num_class)\n self.pooling = nn.MaxPool2d(stride=2, kernel_size=2)\n\n def forward(self, x, prob_t):\n f1 = self.pooling(self.stem(x))\n shortcut = [f1]\n f_x2, f_y2 = self.pooling(self.conv1x(f1)), self.pooling(self.conv1y(f1))\n f_x2_hat, r_xy2, z_xy2 = self.leakyunitxy1(f_x2, f_y2)\n f_y2_hat, r_yx2, z_yx2 = self.leakyunityx1(f_y2, f_x2)\n shortcut.append(f_y2_hat)\n\n f_x3, f_y3 = self.pooling(self.conv2x(f_x2_hat)), self.pooling(self.conv2y(f_y2_hat))\n f_x3_hat, r_xy3, z_xy3 = self.leakyunitxy2(f_x3, f_y3)\n f_y3_hat, r_yx3, z_yx3 = self.leakyunityx2(f_y3, f_x3)\n shortcut.append(f_y3_hat)\n\n f_x4, f_y4 = self.pooling(self.conv3x(f_x3_hat)), self.pooling(self.conv3y(f_y3_hat))\n f_x4_hat, r_xy4, z_xy4 = self.leakyunitxy3(f_x4, f_y4)\n f_y4_hat, r_yx4, z_yx4 = self.leakyunityx3(f_y4, f_x4)\n shortcut.append(f_y4_hat)\n\n joint = torch.cat((f_x4_hat, f_y4_hat), dim=1)\n f_xy5 = self.pooling(self.conv4xy(joint))\n\n f_x5, f_y5 = f_xy5[:, :512], f_xy5[:, 512:]\n prob5 = self.classifier(f_x5)\n\n out = self.decoder(f_y5, prob_t, shortcut, transform=True)\n # out = self.decoder(f_xy5, prob_t, shortcut, transform=False)\n\n return out, prob5\n\n\nif __name__ == '__main__':\n net = FERSNet(k_channel=3)\n net.cuda()\n dis = Discriminator()\n dis.cuda()\n\n inp = torch.randn(2, 3, 96, 96)\n inp = inp.cuda()\n prob_t = torch.randn(2, 6)\n prob_t = prob_t.cuda()\n\n out_tran, logits = net(inp, prob_t)\n\n print(out_tran.size())\n print(logits.size())\n" ]
[ [ "torch.nn.ZeroPad2d", "torch.nn.ConvTranspose2d", "torch.cat", "torch.randn", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.LeakyReLU", "torch.nn.InstanceNorm2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
arekmula/skull_stripping
[ "d03cef81392f8cd243dc1c6d32ffa897af922eb2" ]
[ "tf_implementation/experiments/show_slices_from_generator.py" ]
[ "from argparse import ArgumentParser\nfrom pathlib import Path\nfrom matplotlib import pyplot as plt\n\nfrom segmentation.dataset import scans_generator\n\n\ndef main(args):\n train_path = Path(args.train_dir_path)\n val_path = Path(args.val_dir_path)\n\n train_scans, val_scans, train_samples, val_samples = scans_generator(train_path, val_path)\n # Get batch of data\n images, masks = next(train_scans)\n\n for i in range(len(images)):\n fig = plt.figure()\n fig.add_subplot(1, 2, 1)\n print(\"Please note that preprocessing for specific backbone might cause strange image for human eye\")\n plt.imshow(images[i], cmap=\"gray\")\n\n fig.add_subplot(1, 2, 2)\n plt.imshow(masks[i], cmap=\"gray\")\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--train_dir_path\", metavar=\"train_dir_path\", type=str, required=True)\n parser.add_argument(\"--val_dir_path\", metavar=\"val_dir_path\", type=str, required=True)\n\n args, _ = parser.parse_known_args()\n\n main(args)\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nng555/fairseq
[ "c9730a125825a85f33042e1b9fd1959b8ca829e5" ]
[ "scripts/wilcox_test.py" ]
[ "import os\nimport argparse\nimport time\nimport copy\nimport numpy as np\nfrom scipy.stats import wilcoxon, mannwhitneyu\n\nimport hydra\nimport omegaconf\nfrom omegaconf import DictConfig\nfrom hydra import slurm_utils\n\[email protected](config_path='/h/nng/conf/robust/config.yaml', strict=False)\ndef display_results(cfg: DictConfig):\n if cfg.extra:\n cfg.display.dir.name.append(cfg.extra)\n\n if cfg.gen.seed is not None:\n cfg.display.dir.name[3] = '_'.join(cfg.display.dir.name[3].split('_')[:-1])\n orig_avg = []\n compare_avg = []\n\n for fdset in cfg.display.fdset:\n cfg.display.dir.name[2] = fdset\n for noise in empty_to_list(cfg.display.noise):\n cfg.display.dir.name[1] = noise\n row = []\n\n for tdset in cfg.display.tdset:\n cfg.display.dir.name[5] = tdset\n seed_res = []\n\n compare_dir = copy.deepcopy(cfg.display.dir.name)\n compare_dir[3] = cfg.display.compare.bin\n compare_res = []\n\n # check original eval\n for seed in empty_to_list(cfg.display.seed):\n cfg.display.dir.name[6] = seed\n\n for gen_seed in empty_to_list(cfg.gen.seed):\n cfg.display.dir.name[4] = gen_seed\n #print(slurm_utils.resolve_name(cfg.display.dir.name))\n #print(cfg.display.dir.name)\n display_dir = os.path.join('/h/nng/slurm', cfg.display.dir.date, slurm_utils.resolve_name(cfg.display.dir.name), 'log')\n #print(display_dir)\n if not os.path.exists(display_dir):\n #print(\"{} does not exist!\".format(display_dir))\n continue\n fnames = sorted(os.listdir(display_dir))[::-1]\n for fname in fnames:\n if 'err' in fname:\n continue\n res = open(os.path.join(display_dir, fname), 'r').readlines()\n if res != [] and 'Accuracy' in res[-1]:\n seed_res.append(float(res[-1].rstrip().split(' ')[-1]))\n break\n\n # check comparison eval\n for seed in empty_to_list(cfg.display.seed):\n\n compare_dir[6] = seed\n # check without any gen seed first\n if cfg.display.compare.no_seed:\n compare_dir[4] = None\n display_dir = os.path.join('/h/nng/slurm', cfg.display.compare.date, slurm_utils.resolve_name(compare_dir), 'log')\n print(display_dir)\n if not os.path.exists(display_dir):\n #print(\"{} does not exist!\".format(display_dir))\n continue\n fnames = sorted(os.listdir(display_dir))[::-1]\n for fname in fnames:\n if 'err' in fname:\n continue\n res = open(os.path.join(display_dir, fname), 'r').readlines()\n if res != [] and 'Accuracy' in res[-1]:\n compare_res.append(float(res[-1].rstrip().split(' ')[-1]))\n break\n else:\n for gen_seed in empty_to_list(cfg.gen.seed):\n compare_dir[4] = gen_seed\n display_dir = os.path.join('/h/nng/slurm', cfg.display.compare.date, slurm_utils.resolve_name(compare_dir), 'log')\n if not os.path.exists(display_dir):\n #print(\"{} does not exist!\".format(display_dir))\n continue\n fnames = sorted(os.listdir(display_dir))[::-1]\n for fname in fnames:\n if 'err' in fname:\n continue\n res = open(os.path.join(display_dir, fname), 'r').readlines()\n if res != [] and 'Accuracy' in res[-1]:\n compare_res.append(float(res[-1].rstrip().split(' ')[-1]))\n break\n\n print(seed_res)\n if seed_res == [] or compare_res == []:\n orig_avg.append(0)\n compare_avg.append(0)\n continue\n\n if len(seed_res) != 1:\n orig_avg.append(np.average(seed_res))\n compare_avg.append(np.average(compare_res))\n print(orig_avg)\n print(compare_avg)\n\n offset = len(cfg.display.tdset) + 1\n orig_id = [orig_avg[i] for i in range(len(orig_avg)) if i % offset == 0]\n orig_ood = [orig_avg[i] for i in range(len(orig_avg)) if i % offset != 0]\n print(orig_id)\n print(orig_ood)\n compare_id = [compare_avg[i] for i in range(len(compare_avg)) if i % offset == 0]\n compare_ood = [compare_avg[i] for i in range(len(compare_avg)) if i % offset != 0]\n print(compare_id)\n print(compare_ood)\n print(wilcoxon(orig_id, compare_id, alternative='greater'))\n print(wilcoxon(orig_ood, compare_ood, alternative='greater'))\n #print(mannwhitneyu(orig_id, compare_id, alternative='greater'))\n #print(mannwhitneyu(orig_ood, compare_ood, alternative='greater'))\n\ndef empty_to_list(l):\n if l is None:\n return [None]\n else:\n return list(l)\n\nif __name__ == \"__main__\":\n display_results()\n" ]
[ [ "scipy.stats.wilcoxon", "numpy.average" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
hgfe/DCSSR
[ "949359eeab248a220834f49ab238dbbf93e656d5" ]
[ "utils.py" ]
[ "from PIL import Image\nimport os\nfrom torch.utils.data.dataset import Dataset\nfrom torchvision.transforms import ToTensor\nimport random\nimport torch\nimport numpy as np\nfrom skimage import measure\nfrom torch.nn import init\n\nclass TrainSetLoader(Dataset):\n def __init__(self, dataset_dir, cfg):\n super(TrainSetLoader, self).__init__()\n if isinstance(dataset_dir, str):\n self.dataset_dir = dataset_dir + '/patches_x' + str(cfg.scale_factor)\n self.file_list = os.listdir(self.dataset_dir)\n self.all_list = self.file_list\n \n elif isinstance(dataset_dir, list):\n k = len(dataset_dir)\n self.dataset_dir = []\n self.file_list = []\n self.all_list = []\n for i in range(k):\n temp = dataset_dir[i] + '/patches_x' + str(cfg.scale_factor)\n self.dataset_dir.append(temp)\n self.file_list = os.listdir(temp)\n for j in range(len(self.file_list)):\n self.all_list.append(temp + '/' + self.file_list[j])\n \n def __getitem__(self, index):\n \n if isinstance(self.dataset_dir, str):\n img_hr_left = Image.open(self.dataset_dir + '/' + self.file_list[index] + '/hr0.png')\n img_hr_right = Image.open(self.dataset_dir + '/' + self.file_list[index] + '/hr1.png')\n img_lr_left = Image.open(self.dataset_dir + '/' + self.file_list[index] + '/lr0.png')\n img_lr_right = Image.open(self.dataset_dir + '/' + self.file_list[index] + '/lr1.png')\n\n img_hr_left = np.array(img_hr_left, dtype=np.float32)\n img_hr_right = np.array(img_hr_right, dtype=np.float32)\n img_lr_left = np.array(img_lr_left, dtype=np.float32)\n img_lr_right = np.array(img_lr_right, dtype=np.float32)\n\n img_hr_left, img_hr_right, img_lr_left, img_lr_right = augumentation(img_hr_left, img_hr_right, img_lr_left, img_lr_right)\n return toTensor(img_hr_left), toTensor(img_hr_right), toTensor(img_lr_left), toTensor(img_lr_right)\n \n elif isinstance(self.dataset_dir, list): \n img_hr_left = Image.open(self.all_list[index] + '/hr0.png')\n img_hr_right = Image.open(self.all_list[index] + '/hr1.png')\n img_lr_left = Image.open(self.all_list[index] + '/lr0.png')\n img_lr_right = Image.open(self.all_list[index] + '/lr1.png')\n \n img_hr_left = np.array(img_hr_left, dtype=np.float32)\n img_hr_right = np.array(img_hr_right, dtype=np.float32)\n img_lr_left = np.array(img_lr_left, dtype=np.float32)\n img_lr_right = np.array(img_lr_right, dtype=np.float32)\n\n img_hr_left, img_hr_right, img_lr_left, img_lr_right = augumentation(img_hr_left, img_hr_right, img_lr_left, img_lr_right)\n return toTensor(img_hr_left), toTensor(img_hr_right), toTensor(img_lr_left), toTensor(img_lr_right)\n \n def __len__(self):\n return len(self.all_list)\n \nclass TrainSetLoaderMono(Dataset):\n def __init__(self, dataset_dir, cfg):\n super(TrainSetLoaderMono, self).__init__()\n self.scale_factor = cfg.scale_factor\n if isinstance(dataset_dir, str):\n self.dataset_dir = dataset_dir + '/patches_x' + str(cfg.scale_factor)\n self.file_list = os.listdir(self.dataset_dir)\n self.all_list = self.file_list\n \n elif isinstance(dataset_dir, list):\n k = len(dataset_dir)\n self.dataset_dir = []\n self.file_list = []\n self.all_list = []\n for i in range(k):\n temp = dataset_dir[i] + '/patches_x' + str(cfg.scale_factor)\n self.dataset_dir.append(temp)\n self.file_list = os.listdir(temp)\n for j in range(len(self.file_list)):\n self.all_list.append(temp + '/' + self.file_list[j])\n \n def __getitem__(self, index):\n \n if isinstance(self.dataset_dir, str):\n img_hr_left = Image.open(self.dataset_dir + '/' + self.file_list[index] + '/hr0.png')\n img_lr_left = Image.open(self.dataset_dir + '/' + self.file_list[index] + '/lr0.png')\n\n img_lr_left = img_lr_left.resize((img_lr_left.size[0] * self.scale_factor, img_lr_left.size[1] * self.scale_factor),Image.BICUBIC)\n\n img_hr_left = np.array(img_hr_left, dtype=np.float32)\n img_lr_left = np.array(img_lr_left, dtype=np.float32)\n\n img_hr_left, img_lr_left = augumentation(img_hr_left, img_lr_left)\n return toTensor(img_hr_left), toTensor(img_lr_left)\n \n elif isinstance(self.dataset_dir, list): \n img_hr_left = Image.open(self.all_list[index] + '/hr0.png')\n img_lr_left = Image.open(self.all_list[index] + '/lr0.png')\n \n img_lr_left = img_lr_left.resize((img_lr_left.size[0] * self.scale_factor, img_lr_left.size[1] * self.scale_factor),Image.BICUBIC)\n \n img_hr_left = np.array(img_hr_left, dtype=np.float32)\n img_lr_left = np.array(img_lr_left, dtype=np.float32)\n\n img_hr_left, img_lr_left = augumentation_mono(img_hr_left, img_lr_left)\n return toTensor(img_hr_left), toTensor(img_lr_left)\n \n def __len__(self):\n return len(self.all_list)\n\n\nclass TestSetLoader(Dataset):\n def __init__(self, dataset_dir, scale_factor):\n super(TestSetLoader, self).__init__()\n self.dataset_dir = dataset_dir\n self.scale_factor = scale_factor\n self.file_list = os.listdir(os.path.join(dataset_dir, 'hr'))\n def __getitem__(self, index):\n hr_image_left = Image.open(os.path.join(self.dataset_dir, 'hr', self.file_list[index], 'hr0.png'))\n hr_image_right = Image.open(os.path.join(self.dataset_dir, 'hr', self.file_list[index], 'hr1.png'))\n lr_image_left = Image.open(os.path.join(self.dataset_dir, 'lr_x' + str(self.scale_factor), self.file_list[index], 'lr0.png'))\n lr_image_right = Image.open(os.path.join(self.dataset_dir, 'lr_x' + str(self.scale_factor), self.file_list[index], 'lr1.png'))\n hr_image_left = ToTensor()(hr_image_left)\n hr_image_right = ToTensor()(hr_image_right)\n lr_image_left = ToTensor()(lr_image_left)\n lr_image_right = ToTensor()(lr_image_right)\n return hr_image_left, hr_image_right, lr_image_left[:3, :, :], lr_image_right[:3, :, :]\n def __len__(self):\n return len(self.file_list)\n\n\nclass TestSetLoaderMono(Dataset):\n def __init__(self, dataset_dir, scale_factor):\n super(TestSetLoaderMono, self).__init__()\n self.dataset_dir = dataset_dir\n self.scale_factor = scale_factor\n self.file_list = os.listdir(os.path.join(dataset_dir, 'hr'))\n def __getitem__(self, index):\n hr_image_left = Image.open(os.path.join(self.dataset_dir, 'hr', self.file_list[index], 'hr0.png'))\n lr_image_left = Image.open(os.path.join(self.dataset_dir, 'lr_x' + str(self.scale_factor), self.file_list[index], 'lr0.png'))\n lr_image_left = lr_image_left.resize((lr_image_left.size[0] * self.scale_factor, lr_image_left.size[1] * self.scale_factor),Image.BICUBIC)\n hr_image_left = ToTensor()(hr_image_left)\n lr_image_left = ToTensor()(lr_image_left)\n return hr_image_left, lr_image_left[:3, :, :]\n def __len__(self):\n return len(self.file_list)\n\n\n\ndef augumentation(hr_image_left, hr_image_right, lr_image_left, lr_image_right):\n \n '''\n if random.random()<0.5: # flip horizonly\n lr_image_left = lr_image_left[:, ::-1, :]\n lr_image_right = lr_image_right[:, ::-1, :]\n hr_image_left = hr_image_left[:, ::-1, :]\n hr_image_right = hr_image_right[:, ::-1, :]\n ''' \n \n if random.random()<0.5: #flip vertically\n lr_image_left = lr_image_left[::-1, :, :]\n lr_image_right = lr_image_right[::-1, :, :]\n hr_image_left = hr_image_left[::-1, :, :]\n hr_image_right = hr_image_right[::-1, :, :]\n\n return np.ascontiguousarray(hr_image_left), np.ascontiguousarray(hr_image_right), \\\n np.ascontiguousarray(lr_image_left), np.ascontiguousarray(lr_image_right)\n\ndef augumentation_mono(hr_image_left, lr_image_left):\n \n '''\n if random.random()<0.5: # flip horizonly\n lr_image_left = lr_image_left[:, ::-1, :]\n lr_image_right = lr_image_right[:, ::-1, :]\n hr_image_left = hr_image_left[:, ::-1, :]\n hr_image_right = hr_image_right[:, ::-1, :]\n ''' \n \n if random.random()<0.5: #flip vertically\n lr_image_left = lr_image_left[::-1, :, :]\n hr_image_left = hr_image_left[::-1, :, :]\n\n return np.ascontiguousarray(hr_image_left), np.ascontiguousarray(lr_image_left)\n\n\ndef toTensor(img):\n img = torch.from_numpy(img.transpose((2, 0, 1)))\n return img.float().div(255)\n\nclass L1Loss(object):\n def __call__(self, input, target):\n return torch.abs(input - target).mean()\n\ndef cal_psnr(img1, img2):\n img1_np = np.array(img1.cpu())\n img2_np = np.array(img2.cpu())\n\n return measure.compare_psnr(img1_np, img2_np)\n\ndef save_ckpt(state, save_path='log', filename='checkpoint.pth.tar'):\n torch.save(state, os.path.join(save_path,filename))\n\n\ndef weights_init_xavier(m):\n classname = m.__class__.__name__\n if classname.find('Conv2d') != -1:\n init.xavier_normal(m.weight.data)\n \n\ndef cal_ssim(img1, img2):\n img1 = img1.permute(0,2,3,1)\n img2 = img2.permute(0,2,3,1)\n \n img1_np = np.array(img1.cpu())\n img2_np = np.array(img2.cpu())\n img1_np = np.squeeze(img1_np, axis = 0)\n img2_np = np.squeeze(img2_np)\n \n return measure.compare_ssim(img1_np, img2_np, multichannel = True)\n" ]
[ [ "torch.abs", "numpy.ascontiguousarray", "numpy.squeeze", "numpy.array", "torch.nn.init.xavier_normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ContinuumIO/chaco
[ "e4a42b91cb25ef7191fd465caaef2c3256fc668e", "e4a42b91cb25ef7191fd465caaef2c3256fc668e" ]
[ "chaco/tests/create_2d_test_case.py", "examples/demo/edit_line.py" ]
[ "from __future__ import with_statement\n\nfrom chaco.api import Plot, ArrayPlotData\n\nfrom traits.api import HasTraits, Instance\nfrom enable.component_editor import ComponentEditor\nfrom traitsui.api import Item, View\n\nimport numpy as np\n\nimport nose\nfrom _tools import store_exceptions_on_all_threads, assert_raises\n\n\nclass PlotViewer(HasTraits):\n plot = Instance(Plot)\n traits_view = View(Item('plot', editor=ComponentEditor()))\n\n\ndef test_bounds_2d_case():\n # test for bug: contour and image plots should support the case where\n # xbounds and ybounds are 2d arrays resulting from meshgrids\n\n xs = np.linspace(-10,10,200)\n ys = np.linspace(-10,10,400)\n x, y = np.meshgrid(xs,ys)\n z = x + y\n\n plotdata = ArrayPlotData()\n plotdata.set_data(\"z\", z)\n\n plot = Plot(plotdata)\n plot.contour_plot(\"z\", xbounds=x, ybounds=y)\n\n # try to display it, that's when the exception is raised\n with store_exceptions_on_all_threads():\n pv = PlotViewer(plot=plot)\n pv.edit_traits()\n\n\ndef test_process_2d_bounds():\n # behavior: _process_2d_bounds accepts all possible ways to set x and y\n # bounds in 2d plots and returns a 1d array with equally spaced\n # intervals between the lower and upper bound of the data. The number\n # of elements in the 1d array must be of one element larger than the\n # shape of the data, because it includes the upper bound.\n\n height, width = 20, 10\n array_data = np.ones(shape=(height, width))\n plot = Plot()\n\n # bounds is None : infer from array_data shape\n xs = plot._process_2d_bounds(None, array_data, 1)\n assert xs.shape[0] == width + 1\n ys = plot._process_2d_bounds(None, array_data, 0)\n assert ys.shape[0] == height + 1\n\n # bounds is a tuple : it defines lower and upper range\n bounds = (1.0, 100.0)\n xs = plot._process_2d_bounds(bounds, array_data, 1)\n assert xs.shape[0] == width + 1\n assert xs[0] == bounds[0] and xs[-1] == bounds[1]\n\n # bounds is a 1D array: the first and last elements are used to create\n # equally spaced intervals. Bounds must be of one element larger than the\n # corresponding axis in array_data, or it will raise a Value error\n bounds = np.zeros((height+1, ))\n bounds[0], bounds[-1] = 0.2, 21.3\n ys = plot._process_2d_bounds(bounds, array_data, 0)\n assert ys.shape[0] == height + 1\n assert ys[0] == bounds[0] and ys[-1] == bounds[-1]\n with assert_raises(ValueError):\n bounds = np.zeros((width // 2, ))\n plot._process_2d_bounds(bounds, array_data, 0)\n\n # bounds is a 2D array: the first and last elements along the appropriate\n # axis are used to create equally spaced intervals.\n # The size of the bounds must be the same as the data array, or this\n # sill raise a ValueError\n xbounds, ybounds = np.meshgrid(np.arange(width), np.arange(height))\n\n xs = plot._process_2d_bounds(xbounds, array_data, 1)\n assert xs.shape[0] == width + 1\n assert xs[0] == xbounds[0,0] and xs[-2] == xbounds[0,-1]\n with assert_raises(ValueError):\n plot._process_2d_bounds(xbounds[:5,:], array_data, 1)\n\n ys = plot._process_2d_bounds(ybounds, array_data, 0)\n assert ys.shape[0] == height + 1\n assert ys[0] == ybounds[0,0] and ys[-2] == ybounds[-1,0]\n\n\nif __name__ == '__main__':\n nose.main()\n", "#!/usr/bin/env python\n\"\"\"\nAllows editing of a line plot.\n\nLeft-dragging a point will move its position.\n\nRight-drag pans the plot.\n\nMousewheel up and down zooms the plot in and out.\n\nPressing \"z\" brings up the Zoom Box, and you can click-drag a rectangular region to\nzoom. If you use a sequence of zoom boxes, pressing alt-left-arrow and\nalt-right-arrow moves you forwards and backwards through the \"zoom history\".\n\"\"\"\n\n# Major library imports\nfrom numpy import linspace\nfrom scipy.special import jn\n\nfrom chaco.example_support import COLOR_PALETTE\n\n# Enthought library imports\nfrom enable.tools.api import DragTool\nfrom enable.api import Component, ComponentEditor\nfrom traits.api import HasTraits, Instance, Int, Tuple\nfrom traitsui.api import UItem, View\n\n# Chaco imports\nfrom chaco.api import add_default_axes, add_default_grids, \\\n OverlayPlotContainer, PlotLabel, ScatterPlot, create_line_plot\nfrom chaco.tools.api import PanTool, ZoomTool\n\n\n\nclass PointDraggingTool(DragTool):\n\n component = Instance(Component)\n\n # The pixel distance from a point that the cursor is still considered\n # to be 'on' the point\n threshold = Int(5)\n\n # The index of the point being dragged\n _drag_index = Int(-1)\n\n # The original dataspace values of the index and value datasources\n # corresponding to _drag_index\n _orig_value = Tuple\n\n def is_draggable(self, x, y):\n # Check to see if (x,y) are over one of the points in self.component\n if self._lookup_point(x, y) is not None:\n return True\n else:\n return False\n\n def normal_mouse_move(self, event):\n plot = self.component\n\n ndx = plot.map_index((event.x, event.y), self.threshold)\n if ndx is None:\n if plot.index.metadata.has_key('selections'):\n del plot.index.metadata['selections']\n else:\n plot.index.metadata['selections'] = [ndx]\n\n plot.invalidate_draw()\n plot.request_redraw()\n\n\n def drag_start(self, event):\n plot = self.component\n ndx = plot.map_index((event.x, event.y), self.threshold)\n if ndx is None:\n return\n self._drag_index = ndx\n self._orig_value = (plot.index.get_data()[ndx], plot.value.get_data()[ndx])\n\n def dragging(self, event):\n plot = self.component\n\n data_x, data_y = plot.map_data((event.x, event.y))\n\n plot.index._data[self._drag_index] = data_x\n plot.value._data[self._drag_index] = data_y\n plot.index.data_changed = True\n plot.value.data_changed = True\n plot.request_redraw()\n\n def drag_cancel(self, event):\n plot = self.component\n plot.index._data[self._drag_index] = self._orig_value[0]\n plot.value._data[self._drag_index] = self._orig_value[1]\n plot.index.data_changed = True\n plot.value.data_changed = True\n plot.request_redraw()\n\n def drag_end(self, event):\n plot = self.component\n if plot.index.metadata.has_key('selections'):\n del plot.index.metadata['selections']\n plot.invalidate_draw()\n plot.request_redraw()\n\n def _lookup_point(self, x, y):\n \"\"\" Finds the point closest to a screen point if it is within self.threshold\n\n Parameters\n ==========\n x : float\n screen x-coordinate\n y : float\n screen y-coordinate\n\n Returns\n =======\n (screen_x, screen_y, distance) of datapoint nearest to the input *(x,y)*.\n If no data points are within *self.threshold* of *(x,y)*, returns None.\n \"\"\"\n\n if hasattr(self.component, 'get_closest_point'):\n # This is on BaseXYPlots\n return self.component.get_closest_point((x,y), threshold=self.threshold)\n\n return None\n\n\n#===============================================================================\n# # Create the Chaco plot.\n#===============================================================================\ndef _create_plot_component():\n\n container = OverlayPlotContainer(padding = 50, fill_padding = True,\n bgcolor = \"lightgray\", use_backbuffer=True)\n\n # Create the initial X-series of data\n numpoints = 30\n low = -5\n high = 15.0\n x = linspace(low, high, numpoints)\n y = jn(0, x)\n\n lineplot = create_line_plot((x,y), color=tuple(COLOR_PALETTE[0]), width=2.0)\n lineplot.selected_color = \"none\"\n scatter = ScatterPlot(index = lineplot.index,\n value = lineplot.value,\n index_mapper = lineplot.index_mapper,\n value_mapper = lineplot.value_mapper,\n color = tuple(COLOR_PALETTE[0]),\n marker_size = 5)\n scatter.index.sort_order = \"ascending\"\n\n scatter.bgcolor = \"white\"\n scatter.border_visible = True\n\n add_default_grids(scatter)\n add_default_axes(scatter)\n\n scatter.tools.append(PanTool(scatter, drag_button=\"right\"))\n\n # The ZoomTool tool is stateful and allows drawing a zoom\n # box to select a zoom region.\n zoom = ZoomTool(scatter, tool_mode=\"box\", always_on=False, drag_button=None)\n scatter.overlays.append(zoom)\n\n scatter.tools.append(PointDraggingTool(scatter))\n\n container.add(lineplot)\n container.add(scatter)\n\n # Add the title at the top\n container.overlays.append(PlotLabel(\"Line Editor\",\n component=container,\n font = \"swiss 16\",\n overlay_position=\"top\"))\n\n return container\n\n\n#===============================================================================\n# Attributes to use for the plot view.\nsize=(800,700)\ntitle=\"Simple line plot\"\n\n#===============================================================================\n# # Demo class that is used by the demo.py application.\n#===============================================================================\nclass Demo(HasTraits):\n plot = Instance(Component)\n\n traits_view = View(UItem('plot', editor=ComponentEditor()),\n width=size[0], height=size[1], resizable=True,\n title=title\n )\n\n def _plot_default(self):\n return _create_plot_component()\n\ndemo = Demo()\n\nif __name__ == \"__main__\":\n demo.configure_traits()\n\n#--EOF---\n" ]
[ [ "numpy.linspace", "numpy.arange", "numpy.ones", "numpy.meshgrid", "numpy.zeros" ], [ "scipy.special.jn", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ph-u/CMEECourseWork_pmH
[ "8d52d4dcc3a643da7d55874e350c18f3bf377138" ]
[ "Week7/Code/LV2.py" ]
[ "#!/bin/env python3\n\n# Author: ph-u\n# Script: LV2.py\n# Desc: Consumer-Resource cycle plotting\n# Input: python3 LV2.py\n# Output: 1. two graphical outputs in `results` subdirectory; 2. final numbers terminal output\n# Arguments: 0\n# Date: Nov 2019\n\n\n\"\"\"Consumer-Resource cycle plotting\"\"\"\n\n__appname__=\"LV2.py\"\n__author__=\"ph-u\"\n__version__=\"0.0.1\"\n__license__=\"None\"\n\nimport sys\nimport scipy as sc\nimport scipy.integrate as integrate\nimport matplotlib.pylab as p\n\ndef LV():\n \"\"\"adaptation for cProfile\"\"\"\n ## sys argv imput\n if len(sys.argv) < 4:\n print(\"not enough inputs, using defaults\")\n print(\"r=1 a=0.1 z=1.5 e=0.75\")\n r=1.;a=.1;z=1.5;e=.75\n else:\n r=float(sys.argv[1]) ## intrinsic (per-capita) growth rate\n a=float(sys.argv[2]) ## per-capita \"search-rate\" for resource\n z=float(sys.argv[3]) ## mortality rate\n e=float(sys.argv[4]) ## consumer's efficiency for resource -> biomass\n\n ## function\n def dCR_dt(pops, t=0):\n \"\"\"Lotka-Volterra model\"\"\"\n R=pops[0]\n C=pops[1]\n dRdt=r*R*(1-R/K)-a*R*C\n dCdt=-z*C+e*a*R*C\n ## dimension analysis required (unit balance)\n ## automatically determine min time steps needed\n return sc.array([dRdt, dCdt])\n\n ## set initial start parameters\n t=sc.linspace(0,15,1e3)\n K=37 ## carrying capacity\n R0=10;C0=5 ## initial population of resource & consumers\n RC0=sc.array([R0,C0])\n pops, infodict=integrate.odeint(dCR_dt, RC0, t, full_output=True);pops\n\n f1=p.figure(num=1);f1\n p.plot(t,pops[:,0], 'g-', label=\"Resource density\") ## plot green line as 1st graphic entry\n p.plot(t,pops[:,1], \"b-\", label=\"Consumer density\")\n p.grid()\n p.legend(loc=\"best\")\n p.xlabel(\"Time\")\n p.ylabel(\"Population density\")\n p.title(\"Consumer-Resource population dynamics\")\n ## text string for text box in graph\n tex='\\n'.join((\n r'$r = %.2f$ time$^{-1}$' %(r, ),\n r'$a = %.2f$ area * time$^{-1}$' %(a, ),\n r'$z = %.2f$ time$^{-1}$' %(z, ),\n r'$e = %.2f$ [no unit]' %(e, )\n ))\n box=dict(boxstyle=\"round\", facecolor=\"white\",alpha=.8)\n\n p.text(9,12,tex,bbox=box) ## <https://matplotlib.org/3.1.1/gallery/recipes/placing_text_boxes.html>\n # p.show()\n\n f2=p.figure(num=2);f2\n p.plot(pops[:,0],pops[:,1],'r-')\n p.grid()\n p.xlabel(\"Resource density\")\n p.ylabel(\"Consumer density\")\n p.title(\"Consumer-Resource population dynamics\")\n # p.show()\n\n f1.savefig(\"../results/LV2_model1.pdf\")\n f2.savefig(\"../results/LV2_model2.pdf\")\n\n ## print final values\n print(\"final Consumer population:\",round(pops[(pops.shape[0]-1),1], 2), \"individuals / units at time\",t[len(t)-1])\n print(\"final Resource population:\",round(pops[(pops.shape[0]-1),0], 2), \"individuals / units at time\",t[len(t)-1])\n\nLV()\n" ]
[ [ "scipy.linspace", "matplotlib.pylab.grid", "matplotlib.pylab.legend", "matplotlib.pylab.text", "matplotlib.pylab.title", "scipy.integrate.odeint", "matplotlib.pylab.figure", "matplotlib.pylab.ylabel", "matplotlib.pylab.plot", "scipy.array", "matplotlib.pylab.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Zhangzhicheng001/Danim
[ "c45addc3d7679b7adb4a2cfd241c2247ce0a6669" ]
[ "Danim/BubbleChart/bubble_constants.py" ]
[ "# for SPECIFIC_COLORS only\nimport pandas as pd\n\nimport numpy as np\nfrom manimlib.constants import *\n\n\n#-----------data settings 输入数据设置------------#\n#************************************************#\n\nDATA_DIR = \"manim\\\\Danim\\\\DATA\"\nfile_name_list = [\"X.csv\",\"Y.csv\",\"R.csv\"]\nX_file_name = \"X.csv\"\nY_file_name = \"Y.csv\"\nR_file_name = \"R.csv\"\n#Group_lables = \"Group_lable.csv\"\n#************************************************#\n\n\n\n#-----------axes settings坐标轴参数设置-----------#\n#************************************************#\n\n#whether to customize the axes range, \n#if not, the range is set by digest_data_and_set_the_axes()\n#是否手动调节坐标轴范围\n#可选择自动调节\nCUSTOM_AXES_RANGE = True\n# set the axes range yourself:\n# 手动调节坐标轴范围\nif CUSTOM_AXES_RANGE:\n\tXMIN = -6\n\tXMAX = 20\n\tYMIN = -3\n\tYMAX = 20\n\nSHOW_AXIS_LABLES = True\nX_AXIS_LABLE = \"CPI\"#\"人均寿命\"\nY_AXIS_LABLE = \"GDP增速\"\nTEXT_COLOR = LIGHT_GREY\nTEXT_SCALE_FACTOR = 0.8\nX_LABLE_ADJUST_VECTOR = 0.8*LEFT + 0.5*UP #ADJUST THE X_AXIS_LABLE POSITION\nY_LABLE_ADJUST_VECTOR = 0.8*RIGHT + 0.5*DOWN\n\n#pretty self-expainatory\nSHOW_X_NUMBERS = True\nSHOW_Y_NUMBERS = True\nX_DECIMAL_PLACES = 0\nY_DECIMAL_PLACES = 0\n\n# how big the axes numbers are, if shown\n# 调节数轴数字大小\nNUMBER_SCALE_FACTOR = 0.6\n\n#axes origin point setting: default in the left bottom corner\n#坐标原点设置: 默认在左下角 \n#若要调节坐标位置 屏幕坐标原点为中间 屏幕总高8 宽约为14.22(8*1920/1080)\nNEWORIGIN = np.array([-6.5,-3.5,0])\n\n#whether customize the number shown on axes:\n#if not, ticks will be evenly distributed on numberline\n#是否手动调节坐标轴上显示的数字:\n#如果不手动设置 则数字均匀分布在数轴上\nCUSTOM_AXES_NUMBER = False\n\nif CUSTOM_AXES_NUMBER:\n\tX_NUMBERS = range(-100,101,10)#list of numbers to show\n\tY_NUMBERS = range(-100,101,10)\nelse:\n\tX_NUMBERS = []#list of numbers to show\n\tY_NUMBERS = []\n\tNUM_OF_X_TICKS = 10\n\tNUM_OF_Y_TICKS = 10\n\n#left most coordinates on screen, tip not included\n#坐标轴最右侧的X坐标位置 不含箭头(不超过7.14, 否则超出屏幕)\nRIGHT_MOST_X_ON_SCREEN = 7.\n#top most coordinates on screen, tip not included\n#坐标轴最上侧的X坐标位置 不含箭头(不超过4,否则超出屏幕)\nTOP_MOST_Y_ON_SCREEN = 3.8\n\n#axes color\nAXIS_COLOR = LIGHT_GREY\n\n#time_lable\nTIME_LABLE_COLOR = PURPLE_E\nTIME_LABLE_SCALE_FACTOR = 1.0\nTIME_LABLE_POSITION = 6.0*RIGHT + 3.5*UP\n\n#************************************************#\n\n\n#-----------bubble settings 泡参数设置-----------#\n#************************************************#\n\n# default: 1.4 billion people is a 1.2unit area bubble\n# 数据和泡泡面积的比值 用于调整泡泡面积大小的参数\n# 默认设为2亿(人口)为1单位面积圆\n\n#100亿为单位圆\nR_per_circle_area =100000 #100000000000 #1000000000\n\n#bubbles' opacity\n#圆透明度\nFILL_OPACITY = 0.7\n\n#color generation group lables:\nGROUP_LABLE_CSV_FILE = \"D:\\\\PythonPro\\\\harfor\\\\data_downloaded_from_wind\\\\data_to_visualize\\\\Group_lable.csv\" #\"D:\\\\Anaconda3\\\\envs\\\\MAINM\\\\Lib\\\\manim\\\\Danim\\\\DATA\\\\Group_lable.csv\"\nCOLOR_LABLE_DICT = {\"中国\":RED,\"美国\":BLUE}\n'''\n{\"华北\":YELLOW,\"华南\":ORANGE,\"东北\":WHITE,\"华东\":BLUE,\"西南\":RED,\"华中\":GREEN,\"西北\":TEAL_E}\n'''\n\n'''\n{\"TOP TIER\":RED,\"MID TIER\":ORANGE,\"BOTTOM TIER\":YELLOW}\n'''\n\n'''{\n\t'仓位波动小':RED,\n\t'仓位波动适中':BLUE,\n\t'仓位波动大':YELLOW}\n'''\n\n'''\n{\n\t\"其他\":RED,\n\t\"宝盈基金\":GREEN,\n\t\"博时基金\":PURPLE_E,\n\t\"富国基金\":BLUE,\n\t\"华夏基金\":YELLOW,\n\t\"华安基金\":DARK_BROWN,\n\t\"汇添富基金\":MAROON_E,\n\t\"嘉实基金\":PINK\n\t}\n\n\n'''\n\n'''\n{\n\t\"AFRICA\":RED,\n\t\"ASIA\":GREEN,\n\t\"EUROPE\":BLUE,\n\t\"LATIN AMERICA AND THE CARIBBEAN\":YELLOW,\n\t\"OCEANIA\":PURPLE_E\n\t}\n'''\nGROUP_NAME = \"industry\"#group by which column name in file Group_lable.csv\n\n\n\nTHE_WHOLE_WORLD =[\"AFRICA\",\"ASIA\",\"EUROPE\",\"LATIN AMERICA AND THE CARIBBEAN\",\"OCEANIA\"] #**\nCH_THE_WHOLE_WORLD = [\"非洲\",\"亚洲\",\"欧洲\",\"美洲和加勒比\",\"岛国\"]\nAREA_COLOR_MAP = [RED,GREEN,BLUE,YELLOW,PURPLE_E]\nSOME_CONTRIES = []#[\"China\",\"India\",\"United States\",\"United Kingdom\",\"Russia\"]\n\n\n\n#color_lables\nSHOWN_ENTITY_NAMES = COLOR_LABLE_DICT.keys()#default THE_WHOLE_WORLD\nRECT_HIGHT = 0.2\nRECT_WIDTH = 0.5\nRECT_POSITION = 5.0*RIGHT + 3.0*UP\nRECT_TEXT_SCALE_FACTOR = 0.4\nSHOW_CN_NAMES = False # when True, the program only works if the Internet is connected\nRECT_INTERVAL_FACTOR = 2 # default interval is (2-1) * the rectangle heights, must be greater than 1\n#************************************************#\n\n\n#-----------Animation settings 动画设置-----------#\n#************************************************#\nSHOW_CREATION = True\nif SHOW_CREATION:\n\tCREATION_RUN_TIME = 2 #default 10s\nelse:\n\tCREATION_RUN_TIME = 0\n\nSET_BY_TOTAL_TIME = True\nTOTAL_TIME = 30 #default 30s\nproportion_of_transformation = 0.3 #30% of the time set to transform\nif SET_BY_TOTAL_TIME:\n\tBUBBLE_TRANSFROMATION_RUN_TIME = (TOTAL_TIME - CREATION_RUN_TIME)*proportion_of_transformation\n\tTIME_LABLE_TRANSFROMATION_RUN_TIME = BUBBLE_TRANSFROMATION_RUN_TIME/2\n\tWAIT_TIME = TOTAL_TIME - CREATION_RUN_TIME - BUBBLE_TRANSFROMATION_RUN_TIME\nelse:\n\n\tBUBBLE_TRANSFROMATION_RUN_TIME = 1\n\tTIME_LABLE_TRANSFROMATION_RUN_TIME = 0.5\n\tWAIT_TIME = 1\n\nDISPLAY_SPECIFIC_ENTITIES = False\n\nSOME_ENTITIES_TO_SHOW = SOME_CONTRIES #if DISPLAY_SPECIFIC_ENTITIES is True\n\n\n#************************************************#\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FofanovLab/VaST
[ "72670bfc1fed6418eb9bbc7123d864a91fa63173" ]
[ "VaST/Amplicon_Filter.py" ]
[ "import itertools as it\nimport logging\nfrom collections import namedtuple\n\nimport numpy as np\nimport pandas as pd\n\nfrom Pattern import Patterns\nfrom utils import AMBIGUOUS_DICT\n\nAmplicon = namedtuple('Amplicon', ['start', 'stop', 'genome', 'site_ids'])\n\n\nclass AmpliconFilter:\n def __init__(\n self, sites, var_matrix, flags, window, pz_size,\n pz_filter_length, pz_filter_percent, strict):\n self._patterns = Patterns()\n self._sites = pd.DataFrame(\n sites,\n columns=[\"Genome\", \"Start\", \"Stop\"])\n self._sites['Start'] = self._sites['Start'].apply(pd.to_numeric)\n self._sites['Stop'] = self._sites['Stop'].apply(pd.to_numeric)\n self._sites = self._sites.groupby(\"Genome\")\n self._var_matrix = var_matrix\n self._flags = flags\n self._window = window\n self._pz_size = pz_size\n self._pz_filter_length = pz_filter_length\n self._pz_filter_percent = pz_filter_percent\n self._logger = logging.getLogger(__name__)\n self._strict = strict\n\n def get_patterns(self):\n return self._patterns\n\n def filter_amplicons_get_patterns(self):\n self._logger.info(\"BEGIN Amplicon Filter\")\n for genome, sites in self._sites:\n self._logger.info(\"Filtering sites in: %s\", genome)\n sites = sites.sort_values(\"Start\")\n pos = 0\n filtered_amp_count = 0\n accepted_amp_count = 0\n current_site = sites.iloc[pos].Start\n last_site = sites.iloc[-1].Start\n genome_size = len(self._flags[genome])\n go = True\n while go:\n amplicon = self._amplicon(genome, sites.loc[\n (sites[\"Start\"] >= current_site) &\n (sites[\"Stop\"] < current_site + self._window)],\n genome_size)\n if amplicon is None:\n pos += 1\n filtered_amp_count += 1\n if current_site == last_site:\n go = False\n else:\n current_site = sites.iloc[pos].Start\n continue\n self.update_pattern(amplicon, pos, genome_size)\n accepted_amp_count += 1\n if len(amplicon) == 1:\n pos += 1\n else:\n # check if shifting to the next site\n # in the amplicon includes a new site\n # if yes shift to next site in amplicon\n # if no shift to next site outside of\n # amplicon\n next_site = amplicon.iloc[1].Start\n next_amplicon = sites.loc[\n (sites[\"Start\"] >= next_site) &\n (sites[\"Stop\"] < next_site + self._window)]\n if len(np.setdiff1d(next_amplicon.Start, amplicon.Start)):\n pos += 1\n else:\n pos += len(amplicon)\n if pos >= len(sites):\n go = False\n else:\n current_site = sites.iloc[pos].Start\n\n self._logger.info(\"%s amplicon(s) filtered\", filtered_amp_count)\n self._logger.info(\"%s amplicon(s) passed\", accepted_amp_count )\n self._logger.info(\"FINISHED Amplicon Filter\")\n return self._patterns\n\n def update_pattern(self, amplicon, position, genome_size):\n sites = self._var_matrix[\n position: position + len(amplicon)]\n amplicon = Amplicon(\n amplicon.iloc[0].Start,\n amplicon.iloc[-1].Stop,\n amplicon.iloc[0].Genome,\n list(amplicon.apply(\n lambda row: \"::\".join(\n [str(r) for r in row]), axis=1)))\n\n if not self._strict:\n sites = _adjust_missing_if_vntr(sites)\n\n # TODO: Add check for integers in array and change\n # missing calls to any of the other\n sites = [[tuple(AMBIGUOUS_DICT[call]) if call in\n AMBIGUOUS_DICT else tuple([call]) for call in site]\n for site in sites]\n\n sites = map(list, zip(*sites))\n # Check if any sites got expanded\n if np.all(\n np.equal(\n [[len(site_i) for site_i in\n site] for site in sites], 1)):\n self._patterns.add_unambiguous_amplicon(\n sites, amplicon, genome_size)\n else:\n self._patterns.add_ambiguous_amplicon(\n sites, amplicon, genome_size)\n\n else:\n self._patterns.add_unambiguous_amplicon(\n sites, amplicon, genome_size)\n # send directly to pattern\n\n def _amplicon(self, genome, amplicon, genome_size):\n if len(amplicon):\n start = amplicon.iloc[0].Start\n stop = amplicon.iloc[-1].Stop\n upstream, downstream = self._get_primer_zones(\n start, stop, genome, genome_size\n )\n pass_filter = self._filter(upstream, downstream)\n if pass_filter:\n return amplicon\n else:\n # do recursive call with last site removed\n return self._amplicon(\n genome, amplicon.iloc[:-1], genome_size)\n else:\n return None\n\n def _get_primer_zones(\n self, amp_start, amp_stop, genome, genome_size):\n # subtract one from each index to get back to zeroindexing\n # TODO: Add option for circular chromosome to wrap around\n up_start = amp_start - self._pz_size - 1 if amp_start - self._pz_size > 1 else 0\n up_stop = amp_start - 1\n down_start = amp_stop\n down_stop = (\n amp_stop + self._pz_size if amp_stop +\n self._pz_size < genome_size else genome_size - 1)\n upstream_flags = np.array(\n self._flags[genome].iloc[up_start: up_stop].Flag, dtype=bool)\n downstream_flags = np.array(\n self._flags[genome].iloc[down_start: down_stop].Flag, dtype=bool)\n return upstream_flags, downstream_flags\n\n def _filter(self, upstream, downstream):\n upstream = np.array([sum(1 for _ in g[1])\n for g in it.groupby(upstream) if np.all(g[0])], dtype=int)\n downstream = np.array([sum(1 for _ in g[1])\n for g in it.groupby(downstream) if np.all(g[0])], dtype=int)\n # Dividing by pz_size even though targets near the beginning and\n # end may be shorter.\n upstream = np.divide(\n np.sum(\n upstream[upstream > self._pz_filter_length]),\n float(self._pz_size)) * 100\n downstream = np.divide(\n np.sum(\n downstream[downstream > self._pz_filter_length]),\n float(self._pz_size)) * 100\n if (upstream >= self._pz_filter_percent\n and downstream >= self._pz_filter_percent):\n return True\n else:\n return False\n\n\ndef _adjust_missing_if_vntr(sites):\n for site in sites:\n mask = [(s).isdigit() for s in site]\n if np.any(mask):\n site[site == \"X\"] = \"*\"\n AMBIGUOUS_DICT['*'] = list(np.unique(site[mask]))\n return sites\n" ]
[ [ "numpy.unique", "pandas.DataFrame", "numpy.setdiff1d", "numpy.all", "numpy.any", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
microsoft/AR2
[ "c8df9379e0f7d50f9f52aa34982908c88b408a24" ]
[ "AR2/wiki/co_training_wiki_train.py" ]
[ "from os.path import join\nimport sys\n\nsys.path += ['../']\nimport argparse\nimport glob\nimport json\nimport logging\nimport os\nimport random\nimport numpy as np\nimport torch\n\nsys.path.append(os.getcwd())\nsys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))\n# \nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm\nimport torch.distributed as dist\nfrom torch import nn\nimport torch.nn.functional as F\nfrom model.models import BiBertEncoder, HFBertEncoder, Reranker\nfrom utils.lamb import Lamb\nimport random\nfrom transformers import (\n AdamW,\n BertTokenizer,\n get_linear_schedule_with_warmup,\n)\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\nimport pandas as pd\n\nlogger = logging.getLogger(__name__)\nfrom utils.util import (\n set_seed,\n is_first_worker,\n TraditionDataset\n)\nfrom utils.dpr_utils import (\n load_states_from_checkpoint,\n get_model_obj,\n CheckpointState,\n all_gather_list\n)\nimport collections\nretrieverBatch = collections.namedtuple(\n \"BiENcoderInput\",\n [\n \"q_ids\",\n \"q_attn_mask\",\n \"c_ids\",\n \"c_attn_mask\",\n \"c_q_mapping\",\n \"is_positive\",\n ],\n)\n\ndef get_optimizer(args, model: nn.Module, weight_decay: float = 0.0,\n lr=0.0, eps=0.0) -> torch.optim.Optimizer:\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if p.requires_grad and not any(nd in n for nd in no_decay)],\n 'weight_decay': weight_decay},\n {'params': [p for n, p in model.named_parameters() if p.requires_grad and any(nd in n for nd in no_decay)],\n 'weight_decay': 0.0}\n ]\n if args.optimizer == \"adamW\":\n return AdamW(optimizer_grouped_parameters, lr=lr, eps=eps)\n elif args.optimizer == \"lamb\":\n return Lamb(optimizer_grouped_parameters, lr=lr, eps=eps)\n else:\n raise Exception(\"optimizer {0} not recognized! Can only be lamb or adamW\".format(args.optimizer))\n\n\ndef get_bert_reader_components(args, **kwargs):\n encoder = HFBertEncoder.init_encoder(\n args, model_type=args.reranker_model_type\n )\n hidden_size = encoder.config.hidden_size\n reranker = Reranker(encoder, hidden_size)\n\n return reranker\n\n\ndef train(args, model, reranker_model, tokenizer, global_step=0):\n \"\"\" Train the model \"\"\"\n logger.info(\"Training/evaluation parameters %s\", args)\n tb_writer = None\n if is_first_worker():\n tb_writer = SummaryWriter(log_dir=args.log_dir)\n\n model.to(args.device)\n reranker_model.to(args.device)\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) # nll loss for query\n\n optimizer = get_optimizer(args, model, weight_decay=args.weight_decay,\n lr=args.learning_rate, eps=args.adam_epsilon)\n reranker_optimizer = get_optimizer(args, reranker_model, weight_decay=args.weight_decay,\n lr=args.reranker_learning_rate, eps=args.adam_epsilon)\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n reranker_model, reranker_optimizer = amp.initialize(reranker_model, reranker_optimizer,\n opt_level=args.fp16_opt_level)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n from apex.parallel import DistributedDataParallel as DDP\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.rank], output_device=args.rank, find_unused_parameters=False,\n )\n reranker_model = torch.nn.parallel.DistributedDataParallel(\n reranker_model, device_ids=[args.rank], output_device=args.rank, find_unused_parameters=False,\n )\n\n tr_loss = 0.0\n tr_normal_loss = 0.0\n tr_contr_loss = 0.0\n model.zero_grad()\n model.train()\n reranker_model.zero_grad()\n reranker_model.train()\n set_seed(args) # Added here for reproductibility\n train_flag = 1\n step = 0\n retriever_max_step = args.max_steps * (1 - args.iteration_reranker_step / args.iteration_step)\n reranker_max_step = args.max_steps * (args.iteration_reranker_step / args.iteration_step)\n\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=0.1 * retriever_max_step, num_training_steps=retriever_max_step\n )\n reranker_scheduler = get_linear_schedule_with_warmup(\n reranker_optimizer, num_warmup_steps=0.1 * reranker_max_step, num_training_steps=reranker_max_step\n )\n if global_step!=0:\n train_data_path = os.path.join(args.ann_dir, 'train_ce_' + str(global_step) + '.json')\n model_path = os.path.join(args.output_dir, 'checkpoint-' + str(global_step))\n reranker_model_path = os.path.join(args.output_dir, 'checkpoint-reranker' + str(global_step))\n saved_state = load_states_from_checkpoint(model_path)\n global_step = _load_saved_state(model, optimizer, scheduler, saved_state)\n saved_state = load_states_from_checkpoint(reranker_model_path)\n global_step = _load_saved_state(reranker_model, reranker_optimizer, reranker_scheduler, saved_state)\n train_dataset = TraditionDataset(train_data_path, tokenizer, num_hard_negatives=args.number_neg,\n max_seq_length=args.max_seq_length,max_q_length=args.max_query_length)\n else:\n train_dataset = TraditionDataset(args.origin_data_dir, tokenizer, num_hard_negatives=args.number_neg,\n max_seq_length=args.max_seq_length,max_q_length=args.max_query_length)\n\n train_sample = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sample,\n collate_fn=TraditionDataset.get_collate_fn(args),\n batch_size=args.train_batch_size, num_workers=10)\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n train_dataloader_iter = iter(epoch_iterator)\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Max steps = %d\", args.max_steps)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Dataset example num = %d\", len(train_dataset))\n logger.info(\" step each epoch = %d\",\n len(train_dataset) // (args.train_batch_size *\n args.gradient_accumulation_steps))\n\n while global_step < args.max_steps:\n try:\n batch = next(train_dataloader_iter)\n except StopIteration:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n train_dataloader_iter = iter(epoch_iterator)\n batch = next(train_dataloader_iter)\n dist.barrier()\n\n step += 1\n\n batch_retriever = batch['retriever']\n inputs_retriever = {\"query_ids\": batch_retriever[0].long().to(args.device),\n \"attention_mask_q\": batch_retriever[1].long().to(args.device),\n \"input_ids_a\": batch_retriever[2].long().to(args.device),\n \"attention_mask_a\": batch_retriever[3].long().to(args.device)}\n # reranker forward input_ids: T, attention_mask: T, start_positions=None, end_positions=None, answer_mask=None\n batch_reranker = tuple(t.to(args.device) for t in batch['reranker'])\n inputs_reranker = {\"input_ids\": batch_reranker[0].long(), \"attention_mask\": batch_reranker[1].long()}\n local_positive_idxs = batch_retriever[4]\n\n if train_flag == 0: # 0: 训练retriever:\n eps = 1e-7\n model.train()\n reranker_model.eval()\n local_q_vector, local_ctx_vectors = model(**inputs_retriever)\n retriever_local_ctx_vectors = local_ctx_vectors.reshape(local_q_vector.size(0),\n local_ctx_vectors.size(0) // local_q_vector.size(\n 0), -1)\n retriever_simila = torch.einsum(\"bh,bdh->bd\", local_q_vector, retriever_local_ctx_vectors)\n if args.scale_simmila:\n retriever_dist_p = F.softmax(retriever_simila / (local_q_vector.size(1) ** (1 / 2)), dim=1)\n else:\n retriever_dist_p = F.softmax(retriever_simila, dim=1)\n with torch.no_grad():\n output_reranker = reranker_model(**inputs_reranker)\n binary_logits, relevance_logits, _ = output_reranker\n reranker_logits = relevance_logits / args.temperature_normal\n probs = F.softmax(reranker_logits, dim=1)\n reranker_dist_p = probs\n\n\n positive_logits = relevance_logits[:,:1]\n negtive_logits = relevance_logits[:,:]\n positive_logits_expand = positive_logits.expand(negtive_logits.size())\n reward_logits = torch.stack((positive_logits_expand,negtive_logits),-1)\n reward_prob = F.softmax(reward_logits,dim=2)\n reward = torch.log(reward_prob[:,:,0]+eps)\n \n normal_loss = -reranker_dist_p*torch.log(retriever_dist_p+eps)\n normal_loss = normal_loss.sum()/retriever_dist_p.size(0)\n\n \n adv_loss = reward * torch.log(retriever_dist_p+eps)\n adv_loss = adv_loss.sum()\n\n loss = args.adv_lambda * adv_loss + (1-args.adv_lambda) * normal_loss\n loss = loss / args.gradient_accumulation_steps\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n tr_loss += loss.item()\n tr_normal_loss += normal_loss.item()\n if train_flag == 1: # 1: 训练reranker:\n reranker_model.train()\n model.eval()\n output_reranker = reranker_model(**inputs_reranker)\n binary_logits, relevance_logits, _ = output_reranker\n\n relevance_target = torch.zeros(relevance_logits.size(0), dtype=torch.long).to(args.device)\n loss_fct = torch.nn.CrossEntropyLoss()\n contr_loss = loss_fct(relevance_logits, relevance_target)\n\n loss = contr_loss + 0*binary_logits.sum()\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, reranker_optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n tr_contr_loss += contr_loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if train_flag == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n optimizer.step()\n scheduler.step()\n model.zero_grad()\n if train_flag == 1:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(reranker_optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(reranker_model.parameters(), args.max_grad_norm)\n reranker_optimizer.step()\n reranker_scheduler.step()\n reranker_model.zero_grad()\n global_step += 1\n\n if args.logging_steps > 0 and global_step % args.logging_steps == 0:\n logs = {}\n loss_scalar = tr_loss / args.logging_steps\n normal_loss_scalar = tr_normal_loss / args.logging_steps\n contr_loss_scalar = tr_contr_loss / args.logging_steps\n learning_rate_scalar = scheduler.get_last_lr()[0]\n logs[\"learning_rate\"] = learning_rate_scalar\n logs[\"loss\"] = loss_scalar\n logs[\"normal_loss\"] = normal_loss_scalar\n logs[\"contr_loss\"] = contr_loss_scalar\n tr_loss = 0\n tr_normal_loss = 0\n tr_contr_loss = 0\n if is_first_worker():\n for key, value in logs.items():\n tb_writer.add_scalar(key, value, global_step)\n logger.info(json.dumps({**logs, **{\"step\": global_step}}))\n\n if global_step % args.iteration_step > args.iteration_reranker_step:\n # 训练retriever\n train_flag = 0\n elif 0 < global_step % args.iteration_step < args.iteration_reranker_step:\n # 训练reranker\n train_flag = 1\n elif global_step % args.iteration_step == 0:\n if is_first_worker():\n _save_checkpoint(args, model, optimizer, scheduler, global_step)\n _save_reranker_checkpoint(args, reranker_model, reranker_optimizer, reranker_scheduler, global_step)\n torch.distributed.barrier()\n train_flag = 1\n break\n if args.save_steps > 0 and global_step % args.save_steps == 0:\n if is_first_worker():\n _save_checkpoint(args, model, optimizer, scheduler, global_step)\n _save_reranker_checkpoint(args, reranker_model, reranker_optimizer, reranker_scheduler, global_step)\n # tb_writer.add_scalar(\"dev_nll_loss/dev_avg_rank\", validate_rank, global_step)\n if global_step >= args.max_steps:\n break\n if args.local_rank == -1 or torch.distributed.get_rank() == 0:\n tb_writer.close()\n return global_step\n\n\ndef sum_main(x, opt):\n if opt.world_size > 1:\n dist.reduce(x, 0, op=dist.ReduceOp.SUM)\n return x\n\n\ndef _save_checkpoint(args, model, optimizer, scheduler, step: int) -> str:\n offset = step\n epoch = 0\n model_to_save = get_model_obj(model)\n cp = os.path.join(args.output_dir, 'checkpoint-' + str(offset))\n\n meta_params = {}\n\n state = CheckpointState(model_to_save.state_dict(),\n optimizer.state_dict(),\n scheduler.state_dict(),\n offset,\n epoch, meta_params\n )\n torch.save(state._asdict(), cp)\n logger.info('Saved checkpoint at %s', cp)\n return cp\n\n\ndef _save_reranker_checkpoint(args, model, optimizer, scheduler, step: int) -> str:\n offset = step\n epoch = 0\n model_to_save = get_model_obj(model)\n cp = os.path.join(args.output_dir, 'checkpoint-reranker' + str(offset))\n\n meta_params = {}\n\n state = CheckpointState(model_to_save.state_dict(),\n optimizer.state_dict(),\n scheduler.state_dict(),\n offset,\n epoch, meta_params\n )\n torch.save(state._asdict(), cp)\n logger.info('Saved checkpoint at %s', cp)\n return cp\n\ndef _load_saved_state(model, optimizer, scheduler, saved_state: CheckpointState):\n epoch = saved_state.epoch\n step = saved_state.offset\n logger.info('Loading checkpoint @ step=%s', step)\n\n model_to_load = get_model_obj(model)\n logger.info('Loading saved model state ...')\n model_to_load.load_state_dict(saved_state.model_dict) # set strict=False if you use extra projection\n optimizer.load_state_dict(saved_state.optimizer_dict)\n scheduler.load_state_dict(saved_state.scheduler_dict)\n return step\n\n\ndef get_arguments():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list:\",\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n )\n parser.add_argument(\n \"--model_name_or_path_ict\",\n default=None,\n type=str,\n )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n\n parser.add_argument(\n \"--num_epoch\",\n default=0,\n type=int,\n help=\"Number of epoch to train, if specified will use training data instead of ann\",\n )\n\n # Other parameters\n parser.add_argument(\n \"--config_name\", default=\"\", type=str, help=\"Pretrained config name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--corpus_path\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\",\n )\n parser.add_argument(\n \"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\",\n )\n\n parser.add_argument(\n \"--max_query_length\",\n default=32,\n type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\",\n )\n\n parser.add_argument(\"--triplet\", default=False, action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\n \"--log_dir\",\n default=None,\n type=str,\n help=\"Tensorboard log dir\",\n )\n\n parser.add_argument(\n \"--optimizer\",\n default=\"adamW\",\n type=str,\n help=\"Optimizer - lamb or adamW\",\n )\n\n parser.add_argument(\n \"--per_gpu_train_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for training.\",\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=2.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--max_steps\",\n default=300000,\n type=int,\n help=\"If > 0: set total number of training steps to perform\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n parser.add_argument(\"--logging_steps\", type=int, default=500, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=500, help=\"Save checkpoint every X updates steps.\")\n\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\n \"--gradient_checkpointing\",\n default=False,\n action=\"store_true\",\n )\n parser.add_argument(\n \"--contr_loss\",\n default=False,\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"--normal_loss\",\n default=False,\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"--origin_data_dir\",\n default=None,\n type=str,\n )\n parser.add_argument(\n \"--origin_data_dir_dev\",\n default=None,\n type=str,\n )\n # ----------------- ANN HyperParam ------------------\n\n parser.add_argument(\n \"--load_optimizer_scheduler\",\n default=False,\n action=\"store_true\",\n help=\"load scheduler from checkpoint or not\",\n )\n\n parser.add_argument(\n \"--single_warmup\",\n default=True,\n action=\"store_true\",\n help=\"use single or re-warmup\",\n )\n\n parser.add_argument(\"--adv_data_path\",\n type=str,\n default=None,\n help=\"adv_data_path\", )\n\n parser.add_argument(\"--ann_data_path\",\n type=str,\n default=None,\n help=\"adv_data_path\", )\n parser.add_argument(\n \"--fix_embedding\",\n default=False,\n action=\"store_true\",\n help=\"use single or re-warmup\",\n )\n parser.add_argument(\n \"--continue_train\",\n default=False,\n action=\"store_true\",\n help=\"use single or re-warmup\",\n )\n parser.add_argument(\n \"--adv_loss_alpha\",\n default=0.3,\n type=float,\n help=\"use single or re-warmup\",\n )\n\n parser.add_argument(\"--reranker_model_path\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--reranker_model_type\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--number_neg\", type=int, default=20, help=\"For distant debugging.\")\n parser.add_argument(\"--adv_lambda\", default=0., type=float)\n parser.add_argument(\"--adv_steps\", default=3, type=int)\n # ----------------- End of Doc Ranking HyperParam ------------------\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"For distributed training: local_rank\")\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"For distant debugging.\")\n\n parser.add_argument(\"--test_qa_path\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--train_qa_path\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--dev_qa_path\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--passage_path\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--iteration_step\", default=80, type=int)\n parser.add_argument(\"--iteration_reranker_step\", default=40, type=int)\n parser.add_argument(\"--temperature_normal\", default=1, type=float)\n\n parser.add_argument(\"--scale_simmila\", default=False, action=\"store_true\")\n parser.add_argument(\"--reranker_learning_rate\", default=0,type=float)\n parser.add_argument(\"--load_cache\", default=False, action=\"store_true\")\n parser.add_argument(\"--ann_dir\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--normal_term\", type=str, default=\"cross_e\", help=\"For distant debugging.\")\n \n parser.add_argument(\"--global_step\", type=int, default=0, help=\"For distant debugging.\")\n args = parser.parse_args()\n\n return args\n\n\ndef set_env(args):\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Set seed\n set_seed(args)\n\n\ndef load_states_from_checkpoint_ict(model_file: str) -> CheckpointState:\n from torch.serialization import default_restore_location\n logger.info('Reading saved model from %s', model_file)\n state_dict = torch.load(model_file, map_location=lambda s, l: default_restore_location(s, 'cpu'))\n logger.info('model_state_dict keys %s', state_dict.keys())\n new_stae_dict = {}\n for key, value in state_dict['model']['query_model']['language_model'].items():\n new_stae_dict['question_model.' + key] = value\n for key, value in state_dict['model']['context_model']['language_model'].items():\n new_stae_dict['ctx_model.' + key] = value\n return new_stae_dict\n\n\ndef load_model(args):\n # store args\n if args.local_rank != -1:\n args.world_size = torch.distributed.get_world_size()\n args.rank = dist.get_rank()\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n if is_first_worker():\n # Create output directory if needed\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n tokenizer = BertTokenizer.from_pretrained(\n \"bert-base-uncased\",\n do_lower_case=True)\n model = BiBertEncoder(args)\n if args.model_name_or_path_ict is not None:\n saved_state = load_states_from_checkpoint_ict(args.model_name_or_path_ict)\n model.load_state_dict(saved_state)\n if args.model_name_or_path is not None:\n saved_state = load_states_from_checkpoint(args.model_name_or_path)\n model.load_state_dict(saved_state.model_dict, strict=False)\n\n reranker_model = get_bert_reader_components(args)\n if args.reranker_model_path != '':\n reranker_saved_state = load_states_from_checkpoint(args.reranker_model_path)\n reranker_model.load_state_dict(reranker_saved_state.model_dict, strict=False)\n\n\n if args.local_rank == 0:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n return tokenizer, model, reranker_model\n\ndef main():\n args = get_arguments()\n set_env(args)\n tokenizer, model,reranker_model = load_model(args)\n\n basic_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(basic_format)\n log_path = os.path.join(args.output_dir, 'log.txt')\n # sh = logging.StreamHandler()\n if args.global_step ==0:\n handler = logging.FileHandler(log_path, 'w', 'utf-8')\n else:\n handler = logging.FileHandler(log_path, 'a', 'utf-8')\n\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n # logger.addHandler(sh)\n logger.setLevel(logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n print(logger)\n dist.barrier()\n global_step = args.global_step\n # eval_first(args,model,0,renew_tools)\n if global_step >= args.max_steps:\n pass\n else:\n global_step = train(args, model,reranker_model, tokenizer,global_step=global_step) # 训练,然后当需要弹出的时候弹出\n logger.info(\" global_step = %s\", global_step)\n\n if args.local_rank != -1:\n dist.barrier()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.nn.functional.softmax", "torch.serialization.default_restore_location", "torch.no_grad", "torch.cuda.is_available", "torch.device", "torch.distributed.get_rank", "torch.nn.CrossEntropyLoss", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.einsum", "torch.distributed.barrier", "torch.log", "torch.stack", "torch.cuda.device_count", "torch.distributed.get_world_size", "torch.nn.parallel.DistributedDataParallel", "torch.cuda.set_device", "torch.utils.data.RandomSampler", "torch.distributed.reduce" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Berkeley-Data/hpt
[ "65bd3cedee83d43fdc8b57646dcd2e2642ddee30" ]
[ "src/utils/plot_basetrain_robust.py" ]
[ "\nimport glob\nimport shutil\nimport sys\nimport json\nimport pandas as pd\nimport argparse \nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import NullFormatter # useful for `logit` scale\nimport seaborn as sns\nimport os \n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='')\n parser.add_argument(\n '--results-dir',\n default=os.path.realpath(os.path.join(dir_path, \"..\", \"results\")))\n parser.add_argument(\n '--out-dir',\n default=os.path.realpath(os.path.join(dir_path, \"..\", \"plot-results\")))\n parser.add_argument(\n '--dataset',\n default=\"all\")\n\n args = parser.parse_args()\n return args\n\n\n#only deals with linear plots \ndef gen_plots(data, options):\n\n # linear plots\n lin_dir = os.path.join(options['out_dir'], 'basetrain_robustness')\n os.makedirs(lin_dir, exist_ok=True)\n linear_data = data[data.result_type=='linear-eval']\n linear_data = linear_data[linear_data.variant==\"linear-eval-lr\"]\n\n #since catplot is a figure level function, it produces a new, separate plot which doesn't follow style of past graphs\n fig= sns.catplot(x='basetrain', y='result', hue='basetrain_robust', data=linear_data, kind=\"point\",s=10,\n linestyle=\"-\", legend_out=False, order=[\"MoCo 20-epochs\",\"MoCo 200-epochs\",\"MoCo 800-epochs\"], aspect=11.7/8.27)\n plt.axhline(y=options['asymptote'], c='red', linestyle='dashed', label=\"Best MoCo Random Init\")\n \n #sets axis labels and title of graph\n y_axis = \"Accuracy\"\n if options['dataset_type'] == 'chexpert':\n y_axis = \"AUROC\"\n\n fig.set(xlabel='Pretrained Model', ylabel=y_axis, title=options['data_name'].replace(\"_\", \" \").title())\n\n #sets legend\n fig.ax.legend(loc=4,title=\"Pre-training\",fontsize=\"x-small\")\n # handles, _ = fig.ax.get_legend_handles_labels()\n # fig.ax.legend(handles = handles[1:],labels=labels,title=\"Basetrain\")\n # plt.show()\n\n #gets rid of other graph created \n # plt.close(1) \n\n #creates output file\n outplot = os.path.join(lin_dir, '{}.pdf'.format(options['data_name']))\n\n #saves Figure 1, but Figure 1 is the empty graph created (NOT SNS Graph)\n fig.savefig(outplot, format='pdf', bbox_inches='tight')\n\n\ndef reduce(data):\n best = 0\n for i, row in data.iterrows():\n if best < row['result']:\n best = row['result']\n return best\n\ndef main(args):\n\n #must pass in dataset arg for proper results\n\n os.makedirs(args.out_dir, exist_ok=True)\n # setup plots\n sns.set_style('darkgrid')\n sns.set()\n\n frames = [] #array that collects dataframes from each file\n if(args.dataset == \"all\"):\n dataset_type = \"*\"\n else:\n dataset_type = args.dataset\n\n nobt_baseline = 0\n\n #gets all the basetrain results from the specified dataset\n result_files = glob.glob(os.path.join(args.results_dir, dataset_type + \"*basetrain*.json\"), recursive=True)\n result_files2 = glob.glob(os.path.join(args.results_dir, dataset_type + \"*bt_robust*.json\"), recursive=True)\n result_files += result_files2\n result_files.append(os.path.join(args.results_dir, dataset_type + \"_results.json\")) #gets file with basetrian results\n\n for resfile in result_files:\n with open(resfile, 'r') as infile:\n raw_data = json.load(infile)\n\n print(resfile)\n\n types = [] #used for concatenating the resultzs from each basetrained model\n data = pd.DataFrame(raw_data.values())\n\n\n #finds the relevant values for moco bt no bt\n #mainly done to work around the baseline json file (has extra info that we don't want)\n if (dataset_type + \"_results.json\") in resfile:\n linear_data = data[data.result_type=='linear-eval']\n linear_data = linear_data[linear_data.variant==\"linear-eval-lr\"]\n\n data_moco = linear_data[data.basetrain==\"moco_v2_800ep\"]\n data_moco = data_moco[data_moco.pretrain_data==dataset_type]\n data_moco_x = data_moco[(data_moco['pretrain_iters']==\"5000\")]\n data_moco_y = data_moco[(data_moco['pretrain_iters']==\"0\")]\n data_moco_x = data_moco_x.sort_values(by=['result'],ascending=False).head(1)\n data_moco_y = data_moco_y.sort_values(by=['result'],ascending=False).head(1)\n data_moco = pd.concat([data_moco_x,data_moco_y],ignore_index=True)\n\n #just for asympotote\n data_nobt = linear_data[data.basetrain==\"no\"]\n data_nobt= data_nobt[data_nobt.pretrain_data==dataset_type]\n nobt_baseline = reduce(data_nobt)\n\n\n #appended to list \n types.append(data_moco)\n\n #all concat to new dataframe \n data = pd.concat(types, ignore_index=True)\n\n #get baseline and update results\n\n data = data.astype({\n \"pretrain_iters\": int\n })\n if not (data.result > 1).all():\n data.result *=100\n\n data.basetrain = data.basetrain.replace(\"moco_v2_800ep\", \"MoCo 800-epochs\")\n data.basetrain = data.basetrain.replace(\"moco_v2_200ep_pretrain\", \"MoCo 200-epochs\")\n data.basetrain = data.basetrain.replace(\"moco_v2_20ep_pretrain\", \"MoCo 20-epochs\")\n data.basetrain = data.basetrain.replace(\"no\", \"random init\")\n data.basetrain = data.basetrain.replace(\"none\", \"random init\")\n frames.append(data)\n\n #combines all the dataframes from each file into 1 large dataframe\n result = pd.concat(frames, ignore_index=True)\n dataname = dataset_type + \"_basetrain_robustness\"\n asymptote = nobt_baseline\n if asymptote < 1:\n asymptote *=100\n print(result)\n print(result.result)\n print(asymptote)\n\n basetrain_robust = [\"a\"]*len(result)\n result[\"basetrain_robust\"] = basetrain_robust\n\n print(result.pretrain_iters)\n for i, row in result.iterrows():\n name = \"\"\n if row['pretrain_iters'] == 0:\n name = \"MoCo Direct Transfer\"\n else:\n name = \"HPT\"\n\n result.at[i,\"basetrain_robust\"] = name\n print(result)\n\n #only ran extra experiments for moco_20\n #take only the best result (50k iterations)\n result_20_m = result[(result['basetrain'] == \"MoCo 20-epochs\") & (result['basetrain_robust']== \"HPT\")].sort_values(by=['result'], ascending=False).head(1)\n \n result_20_o = result[(result['basetrain'] == \"MoCo 20-epochs\") & (result['basetrain_robust']== \"MoCo Direct Transfer\")].sort_values(by=['result'], ascending=False).head(1)\n\n result_other = result[((result['basetrain'] != \"MoCo 20-epochs\") | (result['basetrain_robust']== \"MoCo Direct Transfer\")) & ((result['basetrain'] != \"MoCo 20-epochs\") | (result['basetrain_robust']!= \"MoCo Direct Transfer\"))]\n result = pd.concat([result_20_m,result_20_o,result_other], ignore_index=True)\n\n\n #changes all the dataset names to dataset_augmentation\n \n result.dataset = dataset_type\n\n #prints new concatenated dataframe \n print(result)\n print(result.result)\n \n gen_plots(result, {\n 'out_dir': args.out_dir,\n 'data_name': dataname,\n 'asymptote': asymptote,\n 'dataset_type': dataset_type\n })\n\nif __name__ == \"__main__\":\n main(parse_args())" ]
[ [ "pandas.concat", "matplotlib.pyplot.axhline" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ankycheng/damages-calculator
[ "9ad958cd50911aef83ac92206950d9e29ab6e6a3" ]
[ "charts.py" ]
[ "import calculateTool.legaltechDataProcess as ltp\nimport jieba.analyse\nimport jieba\nimport pandas as pd\nimport numpy as np\nimport nltk\nfrom collections import Counter\nimport re\nimport seaborn as sns\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import findfont, FontProperties\nimport matplotlib.font_manager as fm\nmatplotlib.use(\"agg\")\nimport random\nimport io\nimport base64\n\n# set fonts stuff\nfont_dirs = ['statics/fonts/', ]\nfont_files = fm.findSystemFonts(fontpaths=font_dirs)\nfont_list = fm.createFontList(font_files)\nfm.fontManager.ttflist.extend(font_list)\n\nplt.rcParams['axes.unicode_minus'] = False\nplt.rcParams['font.family'] = ['Arial Unicode MS']\nplt.rcParams['font.sans-serif'] = ['Arial Unicode MS'] \nplt.rcParams['font.size'] = 24\nplt.rcParams[\"figure.figsize\"] = (20, 12)\nplt.subplots_adjust(left=None, bottom=0.3, right=None,\n top=1.5, hspace=.27, wspace=.2)\n\ndef jdReportHist(dataframe, col_name, *args):\n fig, (ax1, ax2) = plt.subplots(2, 1)\n ax1.set_facecolor('lemonchiffon')\n ax2.set_facecolor('lemonchiffon')\n label_name = []\n for key in args:\n select = dataframe[col_name].values.tolist()\n a = [key in i for i in select]\n dataframe = dataframe[a]\n dataframe = dataframe[dataframe['solatium_request'] < 10000000]\n x = dataframe['solatium_request'].values/10000\n c = [\"#\"+''.join([random.choice('0123456789ABCDEF')\n for j in range(6)])for i in range(1)]\n\n # 處理label\n label_name.append(key)\n try:\n label_name2 = '&'.join(label_name)\n except:\n label_name2 = label_name[0]\n\n p = x.mean()\n name = '&'.join([key for key in args])\n\n sns.distplot(x, bins=20, label=label_name2, kde=False, ax=ax1)\n ax1.set_xticks(range(0, 1100, 100))\n ax1.set_xlabel('請求金額(萬)')\n ax1.set_ylabel('人數')\n ax1.set_title(name+'--慰撫金請求金額直方圖(條件交集)')\n ax1.axvline(x=p, linewidth=4.5, color=c[0], label=label_name2+'平均金額')\n ax1.grid(color='b', linestyle='--', linewidth=0.2)\n ax1.legend()\n\n sns.kdeplot(x, shade=True, label=label_name2, ax=ax2)\n ax2.set_xticks(range(0, 1100, 100))\n ax2.set_xlabel('請求金額(萬)')\n ax2.set_title(name+'--慰撫金請求金額高斯分佈圖(條件交集)')\n ax2.axvline(x=p, linewidth=4.5, color=c[0], label=label_name2+'平均金額')\n ax2.legend()\n\n img = io.BytesIO()\n plt.tight_layout()\n plt.savefig(img, format='png', dpi=300)\n img.seek(0)\n graph_url = base64.b64encode(img.getvalue()).decode()\n plt.close()\n return 'data:image/png;base64,{}'.format(graph_url)\n\n\ndef jdReportHistGaussian(dataframe, col_name, *args):\n fig, (ax1, ax2) = plt.subplots(2, 1)\n ax1.set_facecolor('snow')\n ax2.set_facecolor('snow')\n\n for key in args:\n select = dataframe[col_name].values.tolist()\n a = [key in i for i in select]\n df2 = dataframe[a]\n df2 = df2[df2['solatium_request'] < 10000000]\n x = df2['solatium_request'].values/10000\n c = [\"#\"+''.join([random.choice('0123456789ABCDEF')\n for j in range(6)])for i in range(1)]\n\n p = x.mean()\n name = '&'.join([key for key in args])\n\n sns.distplot(x, bins=20, label=key, kde=False, ax=ax1)\n ax1.axvline(x=p, linewidth=4.5, color=c[0], label=key+'平均金額')\n ax1.set_xticks(range(0, 1100, 100))\n ax1.set_ylabel('人數')\n ax1.set_xlabel('請求金額(萬)')\n ax1.set_title(name+'慰撫金請求金額直方圖')\n ax1.grid(color='b', linestyle='--', linewidth=0.2)\n ax1.legend()\n\n sns.kdeplot(x, shade=True, label=key, ax=ax2)\n ax2.set_xticks(range(0, 1100, 100))\n ax2.set_xlabel('請求金額(萬)')\n ax2.set_title(name+'慰撫金請求金額高斯分佈圖')\n ax2.axvline(x=p, linewidth=4.5, color=c[0], label=key+'平均金額')\n ax2.legend()\n\n img = io.BytesIO()\n plt.tight_layout()\n plt.savefig(img, format='png')\n img.seek(0)\n graph_url = base64.b64encode(img.getvalue()).decode()\n plt.close()\n return 'data:image/png;base64,{}'.format(graph_url)\n\n\ndef jdReportHistRealPredict(dataframe, col_name, *args):\n fig, (ax1, ax2) = plt.subplots(2, 1)\n ax1.set_facecolor('lemonchiffon')\n ax2.set_facecolor('lemonchiffon')\n\n title_name = '&'.join([key for key in args])\n for keyword in args:\n select = dataframe[col_name].values.tolist()\n a = [keyword in i for i in select]\n dataframe = dataframe[a]\n\n for i in ['solatium_request', 'jd_solatium_predict']:\n dataframe = dataframe[dataframe[i] < 10000000]\n x = dataframe[i].values/10000\n\n c = [\"#\"+''.join([random.choice('0123456789ABCDEF')\n for j in range(6)])for i in range(1)]\n c2 = [\"#\"+''.join([random.choice('0123456789ABCDEF')\n for j in range(6)])for i in range(1)]\n\n if i == 'solatium_request':\n name = '請求金額'\n else:\n name = '實判預估金額'\n \n p = x.mean()\n sns.distplot(x, bins=20, label=name, kde=False, ax=ax1)\n ax1.set_xticks(range(0, 1100, 100))\n ax1.set_ylabel('人數')\n ax1.set_xlabel('金額(萬)')\n ax1.set_title(title_name+'--慰撫金請求金額V.S實判預估金額')\n ax1.axvline(x=p, linewidth=4.5, color=c[0], label=name+'平均金額')\n ax1.grid(color='b', linestyle='--', linewidth=0.2)\n ax1.legend()\n\n sns.kdeplot(x, shade=True, label=name, ax=ax2)\n ax2.set_xticks(range(0, 1100, 100))\n ax2.set_xlabel('金額(萬)')\n ax2.set_title(title_name+'--慰撫金請求金額V.S實判預估金額')\n ax2.axvline(x=p, linewidth=4.5, color=c2[0], label=name+'平均金額')\n ax2.legend()\n\n img = io.BytesIO()\n plt.tight_layout()\n plt.savefig(img, format='png', dpi=300)\n img.seek(0)\n graph_url = base64.b64encode(img.getvalue()).decode()\n plt.close()\n return 'data:image/png;base64,{}'.format(graph_url)\n\n\ndef jdReportScatterRealPredict(df, col_name, *args):\n df = df.reset_index()\n df = df[df['solatium_request'] < 10000000]\n df = df[df['jd_money'] < 50000000]\n df['solatium_request'] = df['solatium_request']/10000\n df['jd_solatium_predict'] = df['jd_solatium_predict']/10000\n df['jd_money'] = df['jd_money']/10000\n for keyword in args:\n select = df[col_name].values.tolist()\n a = [keyword in i for i in select]\n df = df[a]\n plt.rcParams['font.family'] = ['Arial Unicode MS']\n plt.rcParams['font.size'] = 18\n plt.rcParams[\"figure.figsize\"] = (28, 16)\n\n # 請求金額與主文判決總金額散點圖\n plt.subplot(1, 2, 1, facecolor='snow')\n name = '&'.join([key for key in args])\n cmap = sns.cubehelix_palette(dark=.3, light=.8, as_cmap=True)\n ax = sns.scatterplot(x=\"jd_solatium_predict\", y=\"jd_money\",\n hue=\"court\", size=\"jd_solatium_predict\", sizes=(100, 500),\n palette=\"Set3\", data=df)\n plt.title(name+'--各地法院慰撫金請求金額V.S主文總判金額', fontsize='medium')\n\n plt.subplot(1, 2, 2, facecolor='snow')\n df2 = df.groupby('court').count()\n df2 = df2.iloc[:, 0:1]\n df2 = df2.sort_values('index', ascending=False)\n df2 = df2.reset_index()\n ax2 = sns.barplot(x=\"index\", y=\"court\", data=df2)\n plt.title(name+'--各地法院案件數', fontsize='medium')\n\n # 標籤\n for x, y, tex in zip(df2['index'], df2.index, df2['index']):\n t = plt.text(x, y, int(tex), horizontalalignment='right',\n verticalalignment='center', fontdict={'color': 'black', 'size': 20})\n\n img = io.BytesIO()\n plt.tight_layout()\n plt.savefig(img, format='png')\n img.seek(0)\n graph_url = base64.b64encode(img.getvalue()).decode()\n plt.close()\n return 'data:image/png;base64,{}'.format(graph_url)\n\n\ndef build_graph(x_coordinates, y_coordinates):\n img = io.BytesIO()\n plt.plot([1, 2, 3, 4])\n plt.ylabel('some numbers')\n # plt.plot(x_coordinates, y_coordinates)\n plt.savefig(img, format='png')\n img.seek(0)\n graph_url = base64.b64encode(img.getvalue()).decode()\n plt.close()\n return 'data:image/png;base64,{}'.format(graph_url)\n" ]
[ [ "matplotlib.font_manager.fontManager.ttflist.extend", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "matplotlib.use", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.font_manager.createFontList", "matplotlib.pyplot.subplot", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots_adjust", "matplotlib.font_manager.findSystemFonts" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
iver56/keras-retinanet
[ "83feca1aa49a8a75ed5d4a2ab43d8c18c6cce3f7" ]
[ "keras_retinanet/bin/evaluate.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\n\nimport keras\nimport tensorflow as tf\n\n# Allow relative imports when being executed as script.\nif __name__ == \"__main__\" and __package__ is None:\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))\n import keras_retinanet.bin\n __package__ = \"keras_retinanet.bin\"\n\n# Change these to absolute imports if you copy this script outside the keras_retinanet package.\nfrom .. import models\nfrom ..preprocessing.csv_generator import CSVGenerator\nfrom ..preprocessing.pascal_voc import PascalVocGenerator\nfrom ..utils.eval import evaluate\nfrom ..utils.keras_version import check_keras_version\n\n\ndef get_session():\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n return tf.Session(config=config)\n\n\ndef create_generator(args):\n if args.dataset_type == 'coco':\n # import here to prevent unnecessary dependency on cocoapi\n from ..preprocessing.coco import CocoGenerator\n\n validation_generator = CocoGenerator(\n args.coco_path,\n 'val2017',\n image_min_side=args.image_min_side,\n image_max_side=args.image_max_side\n )\n elif args.dataset_type == 'pascal':\n validation_generator = PascalVocGenerator(\n args.pascal_path,\n 'test',\n image_min_side=args.image_min_side,\n image_max_side=args.image_max_side\n )\n elif args.dataset_type == 'csv':\n validation_generator = CSVGenerator(\n args.annotations,\n args.classes,\n image_min_side=args.image_min_side,\n image_max_side=args.image_max_side\n )\n else:\n raise ValueError('Invalid data type received: {}'.format(args.dataset_type))\n\n return validation_generator\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser(description='Evaluation script for a RetinaNet network.')\n subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')\n subparsers.required = True\n\n coco_parser = subparsers.add_parser('coco')\n coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')\n\n pascal_parser = subparsers.add_parser('pascal')\n pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')\n\n csv_parser = subparsers.add_parser('csv')\n csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for evaluation.')\n csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')\n\n parser.add_argument('model', help='Path to RetinaNet model.')\n parser.add_argument('--convert-model', help='Convert the model to an inference model (ie. the input is a training model).', action='store_true')\n parser.add_argument('--backbone', help='The backbone of the model.', default='resnet50')\n parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')\n parser.add_argument('--score-threshold', help='Threshold on score to filter detections with (defaults to 0.05).', default=0.05, type=float)\n parser.add_argument('--iou-threshold', help='IoU Threshold to count for a positive detection (defaults to 0.5).', default=0.5, type=float)\n parser.add_argument('--max-detections', help='Max Detections per image (defaults to 100).', default=100, type=int)\n parser.add_argument('--save-path', help='Path for saving images with detections.')\n parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)\n parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)\n\n return parser.parse_args(args)\n\n\ndef main(args=None):\n # parse arguments\n if args is None:\n args = sys.argv[1:]\n args = parse_args(args)\n\n # make sure keras is the minimum required version\n check_keras_version()\n\n # optionally choose specific GPU\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n keras.backend.tensorflow_backend.set_session(get_session())\n\n # make save path if it doesn't exist\n if args.save_path is not None and not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n\n # create the generator\n generator = create_generator(args)\n\n # load the model\n print('Loading model, this may take a second...')\n model = models.load_model(args.model, backbone_name=args.backbone, convert=args.convert_model)\n\n # print model summary\n print(model.summary())\n\n # start evaluation\n average_precisions = evaluate(\n generator,\n model,\n iou_threshold=args.iou_threshold,\n score_threshold=args.score_threshold,\n max_detections=args.max_detections,\n save_path=args.save_path\n )\n\n # print evaluation\n for label, average_precision in average_precisions.items():\n print(generator.label_to_name(label), '{:.4f}'.format(average_precision))\n print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.ConfigProto", "tensorflow.Session" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
metataro/sc2_imitation_learning
[ "8dca03e9be92e2d8297a4bc34248939af5c7ec3b" ]
[ "tests/test_behaviour_cloning_learner.py" ]
[ "import tensorflow as tf\nimport numpy as np\n\nfrom sc2_imitation_learning.behaviour_cloning.learner import compute_correct_predictions, compute_neg_log_probs\n\n\nclass Test(tf.test.TestCase):\n def test_compute_correct_predictions(self):\n targets = np.asarray([0, -1, 1, -1])\n predictions = np.asarray([0, -1, 0, 0])\n correct_predictions, total_predictions = compute_correct_predictions(targets, predictions)\n self.assertEqual(correct_predictions, 1)\n self.assertEqual(total_predictions, 2)\n\n def test_compute_neg_log_probs(self):\n # test without masked labels\n labels = np.asarray([0, 1])\n logits = np.asarray([[0.5, 1.5], [-1.0, 2.0]])\n log_probs = tf.math.log_softmax(logits, axis=-1)\n label_mask_value = -1\n neg_log_probs = compute_neg_log_probs(labels, logits, label_mask_value)\n\n self.assertAllClose(neg_log_probs, [-log_probs[0, labels[0]], -log_probs[1, labels[1]]])\n\n # test with masked labels\n labels = np.asarray([0, -1])\n logits = np.asarray([[0.5, 1.5], [-1.0, 2.0]])\n log_probs = tf.math.log_softmax(logits, axis=-1)\n label_mask_value = -1\n neg_log_probs = compute_neg_log_probs(labels, logits, label_mask_value)\n\n self.assertAllClose(neg_log_probs, [-log_probs[0, labels[0]], 0.])\n" ]
[ [ "numpy.asarray", "tensorflow.math.log_softmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bsaleil/lc
[ "ee7867fd2bdbbe88924300e10b14ea717ee6434b" ]
[ "tools/graphs.py" ]
[ "#!/usr/bin/env python3\n#!/usr/bin/python3\n#---------------------------------------------------------------------------\n#\n# Copyright (c) 2015, Baptiste Saleil. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. The name of the author may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED\n# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN\n# NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n# NOT LIMITED TO PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n#---------------------------------------------------------------------------\n\n# No font with Ubuntu:\n# http://stackoverflow.com/questions/11354149/python-unable-to-render-tex-in-matplotlib\n\n# Execute compiler with stats option for all benchmarks\n# Parse output\n# Draw graphs\n\nhelp = \"\"\"\ngraphs.py - Generate graphs from compiler output\n\nUse:\n\tgraphs.py [OPTION...]\n\nOptions:\n\t-h,--help\n\t\tPrint this help.\n\t--drawall\n\t\tDraw all graphs. By default the script let the user choose the information to draw.\n\t--stdexec\n\t\tUse standard execution. Same as --exec=\"Standard;\"?\n\t--exec=\"DESCRIPTION;COMPILER_OPTION1 COMPILER_OPTION2 ...\"\n\t\tAdd execution with given compiler options. All given executions are drawn\n\nExample:\n\n\tgraphs.py --exec=\"Standard exec;\" --exec=\"With all tests;--all-tests\" --drawall\n\t\tDraw all graphs for both executions (Standard, and with all-tests option).\n\n\tgraphs.py --stdexec\n\t\tLet the user interactively choose the information to draw from only standard execution.\n\"\"\"\n\nimport sys\nimport io\nimport glob\nimport os\nimport subprocess\nfrom pylab import *\nfrom copy import deepcopy\nfrom matplotlib.backends.backend_pdf import PdfPages\n\n# Constants\nSCRIPT_PATH = os.path.dirname(os.path.realpath(__file__)) + '/' # Current script path\nLC_PATH = SCRIPT_PATH + '../' # Compiler path\nLC_EXEC = 'lazy-comp' # Compiler exec name\nPDF_OUTPUT = SCRIPT_PATH + 'graphs.pdf' # PDF output file\nBENCH_PATH = LC_PATH + 'benchmarks/*.scm' # Benchmarks path\nBAR_COLORS = [\"#222222\",\"#555555\",\"#888888\",\"#AAAAAA\",\"#DDDDDD\"] # Bar colors\nBAR_COLORS = [\"#BBBBBB\",\"#999999\",\"#777777\",\"#555555\",\"#333333\"] # Bar colors\n#BAR_COLORS = [\"#222222\", \"#666666\", \"#AAAAAA\", \"#EEEEEE\"] # Paper sw15\nFONT_SIZE = 9\n\n# Parser constants, must match compiler --stats output\nCSV_INDICATOR = '--'\nSTAT_SEPARATOR = ':'\nCSV_SEPARATOR = ';'\n\n# Options\nDRAW_ALL = '--drawall' # Draw all graphs\nSTD_EXEC = '--stdexec' # Add standard execution to executions list\nREF_EXEC = '--refexec' # Set reference execution for scale\nSORT_EXEC = '--sortexec' # Sort\n\nOPT_REF = False\nOPT_SORT = False\n\n# Globals\nexecs = {}\nlexecs = []\nprinthelp = False\n\n# Set current working directory to compiler path\nos.chdir(LC_PATH)\n\n# Get all benchmarks full path sorted by name\nfiles = sorted(glob.glob(BENCH_PATH))\n\n# Graph config\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nmatplotlib.rcParams.update({'font.size': FONT_SIZE})\n\n#-------------------------------------------------------------------------------------\n# Utils\n\ndef num(s):\n try:\n return int(s)\n except ValueError:\n return float(s)\n\ndef WARNING(s):\n\tprint('WARNING: ' + s)\n\n# Used as matplotlib formatter\ndef to_percent(y, position):\n s = str(int(y))\n # The percent symbol needs escaping in latex\n if matplotlib.rcParams['text.usetex'] is True:\n return s + r'$\\%$'\n else:\n return s + '%'\n\n#-------------------------------------------------------------------------------------\n# Main\n\ndef setargs():\n\tglobal printhelp\n\tglobal OPT_REF\n\tglobal OPT_SORT\n\tif '-h' in sys.argv or '--help' in sys.argv:\n\t\tprinthelp = True\n\tif STD_EXEC in sys.argv:\n\t\texecs['Standard'] = ''\n\tif REF_EXEC in sys.argv:\n\t\tOPT_REF = sys.argv[sys.argv.index(REF_EXEC)+1]\n\tif SORT_EXEC in sys.argv:\n\t\tOPT_SORT = sys.argv[sys.argv.index(SORT_EXEC)+1]\n\n\tfor arg in sys.argv:\n\t\tif arg.startswith('--exec='):\n\t\t\tpair = arg[7:].split(';')\n\t\t\tname = pair[0]\n\t\t\tlcargs = pair[1].split()\n\t\t\texecs[name] = lcargs\n\t\t\tlexecs.append(name)\n\ndef go():\n\tif printhelp:\n\t\tprint(help)\n\telse:\n\t\t# 1 - run benchmarks and parse compiler output\n\t\tbenchs_data = {}\n\t\tkeys = []\n\t\tfor ex in execs:\n\t\t\tks,data = runparse(execs[ex]) # TODO : donner arguments\n\t\t\tif keys == []:\n\t\t\t\tkeys = ks\n\t\t\telse:\n\t\t\t\tif len(ks) != len(keys):\n\t\t\t\t\traise Exception(\"Error\")\n\t\t\tbenchs_data[ex] = data\n\n\t\t# 2 - Draw all graphs\n\t\tdrawGraphs(keys,benchs_data)\n\t\tprint('Done!')\n\n# Run compiler with 'opts', parse output and return keys and data\ndef runparse(opts):\n\tprint(\"Running with options: '\" + ' '.join(opts) + \"'\")\n\tdata = {}\n\n\t# Get keys\n\tfirst = files[0]\n\tkeys = []\n\n\tfor file in files:\n\n\t\tfile_name = os.path.basename(file)\n\t\tprint(file_name + '...')\n\n\t\toptions = [LC_PATH + LC_EXEC, file, '--stats']\n\t\toptions.extend(opts) # TODO : renommer 'options'\n\t\toutput = subprocess.check_output(options).decode(\"utf-8\")\n\n\t\tbench_data = parseOutput(output)\n\n\t\tdata[file_name] = bench_data\n\n\t\t# Get keys on first result\n\t\tif file == first:\n\t\t\tfor key in bench_data:\n\t\t\t\tkeys.append(key)\n\n\treturn keys,data\n\n#-------------------------------------------------------------------------------------\n# Parser: Read stats output from compiler and return python table representation\n\n# Read 'KEY:VALUE' stat\ndef readStat(stream,data,line):\n\tstat = line.split(STAT_SEPARATOR)\n\tkey = stat[0].strip()\n\tval = num(stat[1].strip())\n\t# Store key/value in global data\n\tdata[key] = val\n\tline = stream.readline()\n\treturn line\n\n# Read CSV stat\ndef readCSV(stream,data):\n\tcsv = []\n\t# Consume CSV indicator line\n\tline = stream.readline()\n\t# Read table title\n\ttitle = line.strip()\n\tline = stream.readline()\n\t# Read table header\n\theader = line.split(CSV_SEPARATOR)\n\tfor el in header:\n\t\tcsv.append([el.strip()])\n\t# Read CSV data\n\tline = stream.readline()\n\twhile not line.startswith(CSV_INDICATOR):\n\t\tlinecsv = line.split(CSV_SEPARATOR)\n\t\tfor i in range(0,len(linecsv)):\n\t\t\tcsv[i].extend([num(linecsv[i].strip())]) ## THIS IS NOT EFFICIENT (for large CSV outputs)\n\t\tline = stream.readline()\n\t# Store key/value (title/csv) in global data\n\tdata[title] = csv\n\t# Consume CSV indicator line\n\tline = stream.readline()\n\treturn line\n\n# Return python table from compiler 'output'\ndef parseOutput(output):\n\t# Data for this benchmark\n\tdata = {}\n\t# Stream\n\tstream = io.StringIO(output)\n\t# Parse\n\tline = stream.readline()\n\twhile line:\n\t\t# CSV table\n\t\tif line.startswith(CSV_INDICATOR):\n\t\t\tline = readCSV(stream,data)\n\t\t# Key/Value line\n\t\telse:\n\t\t\tline = readStat(stream,data,line)\n\treturn data\n\n#-------------------------------------------------------------------------------------\n# Draw\n\n# Draw all graphs associated to keys using benchs_data\n# benchs_data contains all information for all benchmarks for all executions\n# ex. benchs_data['Standard']['array1.scm']['Closures'] to get the number of\n# closures created for benchmark array1.scm using standard exec\ndef drawGraphs(keys,benchs_data):\n\n\t# Let user choose the graph to draw (-1 or empty for all graphs)\n\tif not DRAW_ALL in sys.argv:\n\t\tsortedKeys = sorted(keys)\n\t\tprint('Keys:')\n\t\tprint('-1: ALL')\n\t\tfor i in range(0,len(sortedKeys)):\n\t\t\tprint(' ' + str(i) + ': ' + sortedKeys[i])\n\t\tinp = input('Key to draw (all) > ')\n\t\tif not inp == '':\n\t\t\tchoice = num(inp)\n\t\t\tif choice >= 0:\n\t\t\t\tkeys = [sortedKeys[choice]]\n\n\tfirstExec = list(benchs_data.keys())[0]\n\tfirstBenchmark = os.path.basename(files[0])\n\n\t# Gen pdf output file\n\tpdf = PdfPages(PDF_OUTPUT)\n\n\t# For each key\n\tfor key in keys:\n\t\t# CSV, NYI\n\t\tif type(benchs_data[firstExec][firstBenchmark][key]) == list:\n\t\t\tdrawCSV(pdf,key,benchs_data)\n\t\t# Key/Value, draw graph\n\t\telse:\n\t\t\tprint(\"Drawing '\" + key + \"'...\")\n\t\t\tdrawKeyValueGraph(pdf,key,benchs_data)\n\n\tpdf.close()\n\n## This is a specific implementation for #stubs/#versions\n## TODO: Do something generic !\ndef drawCSV(pdf,key,benchs_data):\n\tfig = plt.figure(key)\n\ttitle = key\n\tres = {}\n\n\tfor execution in benchs_data:\n\t\tfor bench in benchs_data[execution]:\n\t\t\tfor data in benchs_data[execution][bench][key]:\n\t\t\t if data[0] == '#stubs':\n\t\t\t for i in range(0,len(data)-1):\n\t\t\t \tindex = i+1\n\t\t\t \tnumvers = i\n\t\t\t \tif (numvers >= 5):\n\t\t\t \t\tnumvers = -1\n\t\t\t \tif (numvers in res):\n\t\t\t \t\tres[numvers] += data[index]\n\t\t\t \telse:\n\t\t\t \t\tres[numvers] = data[index]\n\n\txvals = []\n\tyvals = []\n\tlabels = []\n\n\tkeys = sorted(res.keys())\n\n\tfor key in keys:\n\t\tif key != 0 and key != -1:\n\t\t\txvals.append(key)\n\t\t\tyvals.append(res[key])\n\t\t\tlabels.append(key)\n\n\txvals.append(len(xvals)+1)\n\tyvals.append(res[-1])\n\tlabels.append('>=5')\n\n\tsum = 0\n\tfor val in yvals:\n\t\tsum += val\n\tfor i in range(0,len(yvals)):\n\t\tp = (yvals[i] * 100) / sum\n\t\tyvals[i] = p\n\n\tplt.title(title + ' (total=' + str(sum) + ')')\n\n\tX = np.array(xvals)\n\tY = np.array(yvals)\n\n\tbar(X, +Y, 1, facecolor=BAR_COLORS[0], edgecolor='white', label=key, zorder=10)\n\n\taxes = gca()\n\taxes.get_xaxis().set_visible(False)\n\n\t# Draw grid\n\taxes = gca()\n\taxes.grid(True, zorder=1, color=\"#707070\")\n\taxes.set_axisbelow(True) # Keep grid under the axes\n\n\tfor i in range(0,len(labels)):\n\t\ttext(X[i]+0.25, -0.0, labels[i], ha='right', va='top')\n\n\t# print(xvals)\n\t# print(yvals)\n\t# print(labels)\n\t# print(res)\n\tpdf.savefig(fig)\n\n# Draw graph for given key\n# Y: values for this key\n# X: benchmarks\ndef drawKeyValueGraph(pdf,key,benchs_data):\n\tfig = plt.figure(key,figsize=(8,3.4))\n\t#plt.title(key)\n\n\texec_ref = ''\n\n\t# Number of benchmarks\n\tfirstExec = list(benchs_data.keys())[0]\n\tn = len(benchs_data[firstExec]) + 1 # +1 for mean\n\tX = np.arange(n) # X set is [0, 1, ..., n-1]\n\n\tYs = {}\n\t# For each exec\n\tfor d in benchs_data:\n\t\tY = []\n\t\t# For each benchmark\n\t\tfor f in files:\n\t\t\tY.extend([benchs_data[d][os.path.basename(f)][key]])\n\t\t# Transforme en tableau numpy\n\t\tY = np.array(Y)\n\t\tYs[d] = Y\n\n\twidth = (1 / (len(Ys)+1)) # +1 for mean\n\n\t#----------\n\t# TODO: move to external fn\n\t# Use a reference execution. All values for this exec are 100%\n\t# Values for others executions are computed from this reference exec\n\tif OPT_REF:\n\t\t# Add % symbol to y values\n\t\tformatter = FuncFormatter(to_percent)\n\t\tplt.gca().yaxis.set_major_formatter(formatter)\n\n\t\texec_ref = OPT_REF # Reference execution (100%)\n\t\tY2 = deepcopy(Ys) # Deep copy of Y values\n\t\t# Set all references to 100\n\t\tfor v in range(0,len(Y2[exec_ref])):\n\t\t\tY2[exec_ref][v] = '100'\n\t\t# For each exec which is not ref exec\n\t\tcandraw = True # TODO : rename\n\t\tfor ex in Y2:\n\t\t\tif ex != exec_ref:\n\t\t\t\tfor i in range(0,len(Y2[ex])):\n\t\t\t\t\tref = Ys[exec_ref][i]\n\t\t\t\t\tcur = Ys[ex][i]\n\t\t\t\t\t# We can't compute %, warning and stop\n\t\t\t\t\tif ref == 0:\n\t\t\t\t\t\tWARNING(\"Can't draw '\" + key + \"' using a reference execution.\")\n\t\t\t\t\t\treturn\n\t\t\t\t\t# Compute % and set\n\t\t\t\t\telse:\n\t\t\t\t\t\tY2[ex][i] = (cur*100)/ref\n\t\t# Y2 are the new values to draw\n\t\tYs = Y2\n\t#----------\n\n\tfileList = files\n\tYvals = Ys\n\n\t# Sort Y values by a given execution\n\tif OPT_SORT:\n\t\tfileList,Yvals = sortByExecution(Yvals,OPT_SORT)\n\n\t# Draw grid\n\taxes = gca()\n\taxes.grid(True, zorder=1, color=\"#707070\")\n\taxes.set_axisbelow(True) # Keep grid under the axes\n\n\ti = 0\n\n\t# TODO: add to --help: the script draws the exec bar in order\n\tfor key in lexecs:\n\t\tif key != exec_ref:\n\t\t\tY = Yvals[key]\n\t\t\tcolor = BAR_COLORS[i]\n\t\t\tarith_mean = sum(Y) / float(len(Y))\n\t\t\tprint(\"MEANS:\")\n\t\t\tprint(key + \": \" + str(arith_mean))\n\t\t\tY = np.append(Y,[arith_mean]) # Add mean before drawing bars\n\t\t\tbar(X+(i*width)+0.05, +Y, width, facecolor=color, linewidth=0, label=key)\n\t\t\ti += 1\n\n\t# Hide X values\n\taxes.get_xaxis().set_visible(False)\n\n\tplt.tick_params(axis='both', which='minor')\n\n\t# # Set Y limit\n\t#l = len(str(max(Y2))) # number of digit of max value\n\t#ylim(0,max(Y2)+pow(10,l-1)) # Y is from 0 to (max + 10^i-1)\n\t# # Draw values for each bar\n\t# for x,y in zip(X,Y1):\n\t# text(x+0.4, y+0.05, '%.2f' % y, ha='center', va= 'bottom')\n\tylim(0,120)\n\txlim(0,n)\n\n\t# Draw benchmark name\n\tnames = fileList\n\tnames.append(\"ari-mean.scm\") # Add mean name\n\tfor i in range(0,len(fileList)):\n\t\ttext(X[i]+0.40, -3, os.path.basename(fileList[i])[:-4], rotation=90, ha='center', va='top')\n\n\t# Legend:\n\t# Shrink by 10% on the bottom\n\tbox = axes.get_position()\n\taxes.set_position([box.x0, box.y0 + box.height * 0.34, box.width, box.height * 0.66])\n\t# Put a legend below axis\n\tncol = int(len(lexecs)/3);\n\tlegend(loc='upper center', bbox_to_anchor=(0., 0., 1., -0.35), prop={'size':FONT_SIZE}, ncol=ncol, mode='expand', borderaxespad=0.)\n\n\t# Save to pdf\n\tpdf.savefig(fig)\n\n#-------------------------------------------------------------------------------------\n# Manage Y values\n\n# Sort Y values by values from a specific execution\ndef sortByExecution(Ys,execref):\n\n\t# Pseudo-decorate: Change data layout to allow the useof sort()\n\tdecorated = []\n\tfor fileIndex in range(0,len(files)):\n\t\tr = [] # List of results for current file\n\t\tfor execution in Ys:\n\t\t\tr.extend([execution,Ys[execution][fileIndex]])\n\t\tr.append(files[fileIndex])\n\t\tdecorated.append(r)\n\n\t# Sort\n\ti = decorated[0].index(execref)\n\tdecorated = sorted(decorated,key=lambda el: el[i+1])\n\t# Pseudo-undecorate: Restore previous layout with sorted data\n\tundecorated = {}\n\tordered_files = []\n\ti = 0;\n\twhile not decorated[0][i] in files:\n\t\texecution = decorated[0][i]\n\t\tvals = []\n\t\t# For each data associated to file\n\t\tfor el in decorated:\n\t\t\tvals.append(el[i+1])\n\t\t\tfilepath = el[len(el)-1]\n\t\t\tif not filepath in ordered_files:\n\t\t\t\tordered_files.append(filepath)\n\t\tundecorated[execution] = np.asarray(vals);\n\t\ti+=2\n\n\treturn(ordered_files,undecorated)\n\n#-------------------------------------------------------------------------------------\n\nsetargs()\ngo()\n" ]
[ [ "matplotlib.backends.backend_pdf.PdfPages" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
leomi7/flower-recognition
[ "f7fd167aa8000bb9c2ddd2f9dd1f2e120dff1fa8" ]
[ "extract_features.py" ]
[ "# filter warnings\nimport warnings\n\nwarnings.simplefilter(action=\"ignore\", category=FutureWarning)\n\n# keras imports\nfrom keras.applications.vgg16 import VGG16, preprocess_input\nfrom keras.applications.vgg19 import VGG19, preprocess_input\n\nfrom keras.applications.resnet50 import ResNet50, preprocess_input\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input\n\nfrom keras.applications.inception_v3 import InceptionV3, preprocess_input\nfrom keras.preprocessing import image\nfrom keras.models import Model\nfrom keras.layers import Input\n\n# other imports\nfrom sklearn.preprocessing import LabelEncoder\nimport numpy as np\nimport glob\nimport cv2\nimport h5py\nimport os\nimport json\nimport datetime\nimport time\n\n# load the user configs\nwith open('conf/conf.json') as f:\n config = json.load(f)\n\n# config variables\nmodel_name = config[\"model\"]\nweights = config[\"weights\"]\ninclude_top = config[\"include_top\"]\ntrain_path = config[\"train_path\"]\nfeatures_path = config[\"features_path\"]\nlabels_path = config[\"labels_path\"]\ntest_size = config[\"test_size\"]\nresults = config[\"results\"]\nmodel_path = config[\"model_path\"]\n\n# start time\nprint(\"[STATUS] start time - {}\".format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")))\nstart = time.time()\n\n# create the pretrained models\n# check for pretrained weight usage or not\n# check for top layers to be included or not\nif model_name == \"vgg16\":\n base_model = VGG16(weights=weights)\n model = Model(input=base_model.input, output=base_model.get_layer('fc1').output)\n image_size = (224, 224)\nelif model_name == \"vgg19\":\n base_model = VGG19(weights=weights)\n print(base_model.summary())\n model = Model(input=base_model.input, output=base_model.get_layer('fc1').output)\n image_size = (224, 224)\nelif model_name == \"resnet50\":\n base_model = ResNet50(weights=weights)\n print(base_model.summary())\n model = Model(input=base_model.input, output=base_model.get_layer('avg_pool').output)\n image_size = (224, 224)\nelif model_name == \"inceptionv3\":\n base_model = InceptionV3(include_top=include_top, weights=weights, input_tensor=Input(shape=(299, 299, 3)))\n model = Model(input=base_model.input, output=base_model.get_layer('mixed10').output)\n image_size = (299, 299)\nelif model_name == \"inceptionresnetv2\":\n base_model = InceptionResNetV2(include_top=include_top, weights=weights, input_tensor=Input(shape=(299, 299, 3)))\n\n model = Model(input=base_model.input, output=base_model.get_layer('conv_7b').output)\n image_size = (299, 299)\n# elif model_name == \"mobilenet\":\n# base_model = MobileNet(include_top=include_top, weights=weights, input_tensor=Input(shape=(224, 224, 3)),\n# input_shape=(224, 224, 3))\n# model = Model(input=base_model.input, output=base_model.get_layer('custom').output)\n# image_size = (224, 224)\n# elif model_name == \"xception\":\n# base_model = Xception(weights=weights)\n# model = Model(input=base_model.input, output=base_model.get_layer('avg_pool').output)\n# image_size = (299, 299)\nelse:\n base_model = None\n\nprint(\"[INFO] successfully loaded base model and model...\")\n\n# path to training dataset\ntrain_labels = os.listdir(train_path)\n\n# encode the labels\nprint(\"[INFO] encoding labels...\")\nle = LabelEncoder()\nle.fit([tl for tl in train_labels])\n\n# variables to hold features and labels\nfeatures = []\nlabels = []\n\n# loop over all the labels in the folder\ncount = 1\nfor i, label in enumerate(train_labels):\n cur_path = train_path + \"/\" + label\n count = 1\n for image_path in glob.glob(cur_path + \"/*.jpg\"):\n img = image.load_img(image_path, target_size=image_size)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n feature = model.predict(x)\n flat = feature.flatten()\n features.append(flat)\n labels.append(label)\n print(\"[INFO] processed - \" + str(count))\n count += 1\n print(\"[INFO] completed label - \" + label)\n\n# encode the labels using LabelEncoder\nle = LabelEncoder()\nle_labels = le.fit_transform(labels)\n\n# get the shape of training labels\nprint(\"[STATUS] training labels: {}\".format(le_labels))\nprint(\"[STATUS] training labels shape: {}\".format(le_labels.shape))\n\n# save features and labels\nh5f_data = h5py.File(features_path, 'w')\nh5f_data.create_dataset('dataset_1', data=np.array(features))\n\nh5f_label = h5py.File(labels_path, 'w')\nh5f_label.create_dataset('dataset_1', data=np.array(le_labels))\n\nh5f_data.close()\nh5f_label.close()\n\n# save model and weights\nmodel_json = model.to_json()\nwith open(model_path + str(test_size) + \".json\", \"w\") as json_file:\n json_file.write(model_json)\n\n# save weights\nmodel.save_weights(model_path + str(test_size) + \".h5\")\nprint(\"[STATUS] saved model and weights to disk..\")\n\nprint(\"[STATUS] features and labels saved..\")\n\n# end time\nend = time.time()\nprint(\"[STATUS] end time - {}\".format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")))\n" ]
[ [ "numpy.array", "sklearn.preprocessing.LabelEncoder", "numpy.expand_dims" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
omersan/LSTM_Nudging
[ "35c8f294708336a28f33b4be93a82d80f2f3d99c" ]
[ "Nudging/plotting_field.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 14 19:17:22 2020\n\n@author: suraj\n\"\"\"\n\nimport numpy as np\nfrom numpy.random import seed\nseed(1)\nfrom scipy import integrate\nfrom scipy import linalg\nimport matplotlib.pyplot as plt \nimport time as tm\nimport matplotlib.ticker as ticker\n\nfont = {'family' : 'Times New Roman',\n 'size' : 12} \nplt.rc('font', **font)\n\nimport matplotlib as mpl\nmpl.rcParams['text.usetex'] = True\nmpl.rcParams['text.latex.preamble'] = [r'\\usepackage{amsmath}']\n\n#%%\ndata = np.load('data_20_50.npz')\nt = data['t']\ntobs = data['tobs']\nT = data['T']\nX = data['X']\nutrue_8 = data['utrue']\nuobs_8 = data['uobs']\nuw_8 = data['uw']\nua_8 = data['ua']\n\ndata = np.load('data_20_100.npz')\nt = data['t']\ntobs = data['tobs']\nT = data['T']\nX = data['X']\nutrue_12 = data['utrue']\nuobs_12 = data['uobs']\nuw_12 = data['uw']\nua_12 = data['ua']\n\ndata = np.load('data_20_200.npz')\nt = data['t']\ntobs = data['tobs']\nT = data['T']\nX = data['X']\nutrue_20 = data['utrue']\nuobs_20 = data['uobs']\nuw_20 = data['uw']\nua_20 = data['ua']\n\n\ndiff_8 = utrue_8 - ua_8\ndiff_12 = utrue_12 - ua_12\ndiff_20 = utrue_20 - ua_20\n\n#%%\nvmin = -10\nvmax = 10\nfig, ax = plt.subplots(3,3,figsize=(12,7.5))\n\naxs = ax.flat\n\nfield = [utrue_8,utrue_12,utrue_20, ua_8,ua_12,ua_20, diff_8,diff_12,diff_20]\nlabel = ['True','True','True','Nudging','Nudging','Nudging','Error','Error','Error']\n\n\nfor i in range(9):\n cs = axs[i].contourf(T,X,field[i],60,cmap='coolwarm',vmin=vmin,vmax=vmax,zorder=-9)\n axs[i].set_rasterization_zorder(-1)\n axs[i].set_title(label[i])\n axs[i].set_xlabel(r'$t$')\n axs[i].set_ylabel(r'$u$')\n for c in cs.collections:\n c.set_edgecolor(\"face\")\n\nm = plt.cm.ScalarMappable(cmap='coolwarm')\nm.set_array(utrue_8)\nm.set_clim(vmin, vmax)\n#fig.colorbar(m,ax=axs[0],ticks=np.linspace(vmin, vmax, 6))\n\nfig.subplots_adjust(bottom=0.2)\ncbar_ax = fig.add_axes([0.25, -0.02, 0.5, 0.025])\nfig.colorbar(m, cax=cbar_ax,orientation='horizontal')\n\nfig.tight_layout()\nplt.show() \nfig.savefig('field_plot_nudging_tau.pdf',bbox_inches='tight')\nfig.savefig('field_plot_nudging_tau.eps',bbox_inches='tight')\nfig.savefig('field_plot_nudging_tau.png',bbox_inches='tight',dpi=300)\n\n" ]
[ [ "numpy.random.seed", "matplotlib.pyplot.rc", "matplotlib.pyplot.subplots", "numpy.load", "matplotlib.pyplot.show", "matplotlib.pyplot.cm.ScalarMappable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gddickinson/VolumeSlider
[ "bdde0e6e18741f1b11481fe9ae7dba6c0f571acc" ]
[ "volumeSlider/tiffLoader.py" ]
[ "import numpy as np\nfrom qtpy import QtWidgets, QtCore, QtGui\nimport flika\nfrom flika import global_vars as g\nfrom flika.utils.io import tifffile\nfrom flika.process.file_ import get_permutation_tuple\nfrom flika.utils.misc import open_file_gui\nfrom distutils.version import StrictVersion\nfrom flika.window import Window\n\nflika_version = flika.__version__\nif StrictVersion(flika_version) < StrictVersion('0.2.23'):\n from flika.process.BaseProcess import BaseProcess, SliderLabel, CheckBox, ComboBox, BaseProcess_noPriorWindow, WindowSelector, FileSelector\nelse:\n from flika.utils.BaseProcess import BaseProcess, SliderLabel, CheckBox, ComboBox, BaseProcess_noPriorWindow, WindowSelector, FileSelector\n\n#flika.start_flika()\n\ndef openTiff(filename):\n Tiff = tifffile.TiffFile(str(filename))\n\n A = Tiff.asarray()\n B = []\n C = []\n Tiff.close()\n axes = [tifffile.AXES_LABELS[ax] for ax in Tiff.series[0].axes]\n print(axes)\n\n if set(axes) == set(['time', 'depth', 'height', 'width']): # single channel, multi-volume\n target_axes = ['time', 'depth', 'width', 'height']\n perm = get_permutation_tuple(axes, target_axes)\n A = np.transpose(A, perm)\n nScans, nFrames, x, y = A.shape\n A = A.reshape(nScans*nFrames,x,y)\n #newWindow = Window(A,'Loaded Tiff')\n \n elif set(axes) == set(['series', 'height', 'width']): # single channel, single-volume\n target_axes = ['series', 'width', 'height']\n perm = get_permutation_tuple(axes, target_axes)\n A = np.transpose(A, perm)\n nFrames, x, y = A.shape\n A = A.reshape(nFrames,x,y)\n #newWindow = Window(A,'Loaded Tiff')\n \n elif set(axes) == set(['time', 'height', 'width']): # single channel, single-volume\n target_axes = ['time', 'width', 'height']\n perm = get_permutation_tuple(axes, target_axes)\n A = np.transpose(A, perm)\n nFrames, x, y = A.shape\n A = A.reshape(nFrames,x,y)\n #newWindow = Window(A,'Loaded Tiff')\n \n elif set(axes) == set(['time', 'depth', 'channel', 'height', 'width']): # multi-channel, multi-volume\n target_axes = ['channel','time','depth', 'width', 'height']\n perm = get_permutation_tuple(axes, target_axes)\n A = np.transpose(A, perm)\n B = A[0]\n C = A[1]\n\n n1Scans, n1Frames, x1, y1 = B.shape\n n2Scans, n2Frames, x2, y2 = C.shape\n\n B = B.reshape(n1Scans*n1Frames,x1,y1)\n C = C.reshape(n2Scans*n2Frames,x2,y2)\n\n #channel_1 = Window(B,'Channel 1')\n #channel_2 = Window(C,'Channel 2')\n \n \n elif set(axes) == set(['depth', 'channel', 'height', 'width']): # multi-channel, single volume\n target_axes = ['channel','depth', 'width', 'height']\n perm = get_permutation_tuple(axes, target_axes)\n A = np.transpose(A, perm)\n B = A[0]\n C = A[1]\n\n #channel_1 = Window(B,'Channel 1')\n #channel_2 = Window(C,'Channel 2')\n\n \n elif set(axes) == set(['time', 'channel', 'height', 'width']): # multi-channel, single volume\n target_axes = ['channel','time', 'width', 'height']\n perm = get_permutation_tuple(axes, target_axes)\n A = np.transpose(A, perm)\n B = A[0]\n C = A[1]\n\n #channel_1 = Window(B,'Channel 1')\n #channel_2 = Window(C,'Channel 2')\n \n return A, B, C\n\n\n\n#A, _, _ = openTiff(fileName)\n\n#newWindow = Window(A,'Loaded Tiff') \n\n\n\n " ]
[ [ "numpy.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MacuXavier/S1_ML_Practices
[ "4cd29c5537c95cfd940e13e741db8d363c57ab1d" ]
[ "CIFAR10/train.py" ]
[ "import os\nimport os.path as osp\n# third-party packages\nimport pyprind\nimport glog as log\n# pytorch related packages\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as T\nimport torchvision.datasets as datasets\nimport argparse\n# Model Definition\nfrom model import Net\nfrom cifar_dataset import CIFAR10Dataset\nimport torch.utils.data\n# Training Part\n# Please fill the training part based on the given model/dataloader/optimizer/criterion\n\n\ndef train(args, model, train_loader, optimizer, criterion, epoch):\n model.train()\n for batch_idx, data in enumerate(\n pyprind.prog_bar(\n train_loader,\n title=\"[Epoch {}: Training]\".format(epoch),\n width=40,\n )):\n\n def one_iteration(model, data, target, criterion):\n '''\n Please fill the training iteration with given components:\n model: our provided convolutional neural network\n data: Images\n target: category of the images\n criterion: the loss function\n '''\n output = F.softmax(model(data), dim=1)\n loss = criterion(output, target)\n loss.backward()\n\n data, target = data['image'], data['label']\n optimizer.zero_grad()\n one_iteration(model, data, target, criterion)\n optimizer.step()\n\n\n# Testing Part\ndef test(args, model, test_loader, epoch):\n model.eval()\n correct = 0\n with torch.no_grad():\n for data in test_loader:\n data, target = data['image'], data['label']\n output = model(data)\n pred = output.max(\n 1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n log.info('Test set: Accuracy: {}/{} ({:.0f}%)\\n'.format(\n correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size',\n type=int,\n default=64,\n metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--epochs',\n type=int,\n default=10,\n metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr',\n type=float,\n default=0.01,\n metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum',\n type=float,\n default=0.9,\n metavar='M',\n help='SGD momentum (default: 0.9)')\n parser.add_argument('--seed',\n type=int,\n default=1,\n metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--resume', type=str, default=None, help=\"Model Path.\")\n parser.add_argument('--model-name',\n type=str,\n default='char_cnn.pt',\n help='Trained model name (defaut: char_cnn.pt).')\n args = parser.parse_args()\n torch.manual_seed(args.seed)\n\n # Fill the data directory: [train] and [test] should be at this path:\n data_dir = '/home/denglong/workspace/processed/processed'\n # data_dir = 'D:\\document\\google_download\\homework3\\homework3-students\\cifar10\\cifar-10-batches-py'\n # write your own dataloader to read images and targets from [data_dir]\n # Then initialize your own [train_loader], [val_loader] and [num_classes]\n # you can check torch.utils.data.DataLoader for help\n ############################\n num_classes = 10 # number of categories\n train_loader = torch.utils.data.DataLoader(\n CIFAR10Dataset('./cifar-10-batches-py', split='train'),\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=2) # training set loader\n test_loader = torch.utils.data.DataLoader(\n CIFAR10Dataset('./cifar-10-batches-py', split='test'),\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=2) # testing set loader\n #############################\n # get the model definition\n model = Net(num_classes=num_classes)\n if args.resume:\n model.load_state_dict(torch.load(args.resume))\n lr = args.lr\n optimizer = torch.optim.SGD(\n model.parameters(),\n lr=args.lr,\n momentum=args.momentum,\n weight_decay=0.001,\n nesterov=True,\n )\n criterion = nn.CrossEntropyLoss()\n\n for epoch in range(1, args.epochs + 1):\n lr_schedule = (epoch >= 0.8 * (args.epochs))\n \"\"\"\n replace False with the condition for lr_schedule\n we want to reduce the learning rate at the 80% of the training process\n \"\"\"\n if lr_schedule:\n lr = lr * 0.1\n print(\"[Learning Rate] {}\".format(lr))\n \"\"\"\n schedule the learning rate used in optimizer\n \"\"\"\n\n train(args, model, train_loader, optimizer, criterion, epoch)\n test(args, model, test_loader, epoch=epoch)\n\n # saving the trained model and category names\n result = {\n 'state_dict': model.state_dict(),\n # 'classes': trainset.classes,\n # 'class_to_idx': trainset.class_to_idx,\n }\n torch.save(result, \"char_cnn.pt\")\n print(\"Trained Model saved to: {}\".format('./char_cnn.pt'))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.load", "torch.manual_seed", "torch.no_grad", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ankeshanand/Rainbow
[ "cb89a0f2794381973daaa9b66cd8924111291b78" ]
[ "memory.py" ]
[ "from collections import namedtuple\nimport numpy as np\nimport torch\n\nTransition = namedtuple('Transition', ('timestep', 'state', 'action', 'reward', 'nonterminal'))\nblank_trans = Transition(0, torch.zeros(84, 84, dtype=torch.uint8), None, 0, False)\n\n\n# Segment tree data structure where parent node values are sum/max of children node values\nclass SegmentTree():\n def __init__(self, size):\n self.index = 0\n self.size = size\n self.full = False # Used to track actual capacity\n self.sum_tree = np.zeros((2 * size - 1,),\n dtype=np.float32) # Initialise fixed size tree with all (priority) zeros\n self.data = np.array([None] * size) # Wrap-around cyclic buffer\n self.max = 1 # Initial max value to return (1 = 1^ω)\n\n # Propagates value up tree given a tree index\n def _propagate(self, index, value):\n parent = (index - 1) // 2\n left, right = 2 * parent + 1, 2 * parent + 2\n self.sum_tree[parent] = self.sum_tree[left] + self.sum_tree[right]\n if parent != 0:\n self._propagate(parent, value)\n\n # Updates value given a tree index\n def update(self, index, value):\n self.sum_tree[index] = value # Set new value\n self._propagate(index, value) # Propagate value\n self.max = max(value, self.max)\n\n def append(self, data, value):\n self.data[self.index] = data # Store data in underlying data structure\n self.update(self.index + self.size - 1, value) # Update tree\n self.index = (self.index + 1) % self.size # Update index\n self.full = self.full or self.index == 0 # Save when capacity reached\n self.max = max(value, self.max)\n\n # Searches for the location of a value in sum tree\n def _retrieve(self, index, value):\n left, right = 2 * index + 1, 2 * index + 2\n if left >= len(self.sum_tree):\n return index\n elif value <= self.sum_tree[left]:\n return self._retrieve(left, value)\n else:\n return self._retrieve(right, value - self.sum_tree[left])\n\n # Searches for a value in sum tree and returns value, data index and tree index\n def find(self, value):\n index = self._retrieve(0, value) # Search for index of item from root\n data_index = index - self.size + 1\n return (self.sum_tree[index], data_index, index) # Return value, data index, tree index\n\n # Returns data given a data index\n def get(self, data_index):\n return self.data[data_index % self.size]\n\n def total(self):\n return self.sum_tree[0]\n\n\nclass ReplayMemory():\n def __init__(self, args, capacity):\n self.device = args.device\n self.capacity = capacity\n self.history = args.history_length\n self.discount = args.discount\n self.n = args.multi_step\n self.priority_weight = args.priority_weight # Initial importance sampling weight β, annealed to 1 over course of training\n self.priority_exponent = args.priority_exponent\n self.t = 0 # Internal episode timestep counter\n self.transitions = SegmentTree(\n capacity) # Store transitions in a wrap-around cyclic buffer within a sum tree for querying priorities\n\n # Adds state and action at time t, reward and terminal at time t + 1\n def append(self, state, action, reward, terminal):\n state = state[-1].mul(255).to(dtype=torch.uint8,\n device=torch.device('cpu')) # Only store last frame and discretise to save memory\n self.transitions.append(Transition(self.t, state, action, reward, not terminal),\n self.transitions.max) # Store new transition with maximum priority\n self.t = 0 if terminal else self.t + 1 # Start new episodes with t = 0\n\n # Returns a transition with blank states where appropriate\n def _get_transition(self, idx):\n transition = np.array([None] * (self.history + self.n))\n transition[self.history - 1] = self.transitions.get(idx)\n for t in range(self.history - 2, -1, -1): # e.g. 2 1 0\n if transition[t + 1].timestep == 0:\n transition[t] = blank_trans # If future frame has timestep 0\n else:\n transition[t] = self.transitions.get(idx - self.history + 1 + t)\n for t in range(self.history, self.history + self.n): # e.g. 4 5 6\n if transition[t - 1].nonterminal:\n transition[t] = self.transitions.get(idx - self.history + 1 + t)\n else:\n transition[t] = blank_trans # If prev (next) frame is terminal\n return transition\n\n def _get_sample_from_idx(self, idx):\n # Retrieve all required transition data (from t - h to t + n)\n transition = self._get_transition(idx)\n # Create un-discretised state and nth next state\n state = torch.stack([trans.state for trans in transition[:self.history]]).to(dtype=torch.float32,\n device=self.device).div_(255)\n next_state = torch.stack([trans.state for trans in transition[self.n:self.n + self.history]]).to(\n dtype=torch.float32, device=self.device).div_(255)\n # Discrete action to be used as index\n action = torch.tensor([transition[self.history - 1].action], dtype=torch.int64, device=self.device)\n # Calculate truncated n-step discounted return R^n = Σ_k=0->n-1 (γ^k)R_t+k+1 (note that invalid nth next states have reward 0)\n R = torch.tensor([sum(self.discount ** n * transition[self.history + n - 1].reward for n in range(self.n))],\n dtype=torch.float32, device=self.device)\n # Mask for non-terminal nth next states\n nonterminal = torch.tensor([transition[self.history + self.n - 1].nonterminal], dtype=torch.float32,\n device=self.device)\n\n return state, action, R, next_state, nonterminal\n\n # Returns a valid sample from a segment\n def _get_sample_from_segment(self, segment, i):\n valid = False\n while not valid:\n sample = np.random.uniform(i * segment,\n (i + 1) * segment) # Uniformly sample an element from within a segment\n prob, idx, tree_idx = self.transitions.find(\n sample) # Retrieve sample from tree with un-normalised probability\n # Resample if transition straddled current index or probablity 0\n if (self.transitions.index - idx) % self.capacity > self.n and (\n idx - self.transitions.index) % self.capacity >= self.history and prob != 0:\n valid = True # Note that conditions are valid but extra conservative around buffer index 0\n\n state, action, R, next_state, nonterminal = self._get_sample_from_idx(idx)\n return prob, idx, tree_idx, state, action, R, next_state, nonterminal\n\n def sample(self, batch_size):\n p_total = self.transitions.total() # Retrieve sum of all priorities (used to create a normalised probability distribution)\n segment = p_total / batch_size # Batch size number of segments, based on sum over all probabilities\n batch = [self._get_sample_from_segment(segment, i) for i in range(batch_size)] # Get batch of valid samples\n probs, idxs, tree_idxs, states, actions, returns, next_states, nonterminals = zip(*batch)\n states, next_states, = torch.stack(states), torch.stack(next_states)\n actions, returns, nonterminals = torch.cat(actions), torch.cat(returns), torch.stack(nonterminals)\n probs = np.array(probs, dtype=np.float32) / p_total # Calculate normalised probabilities\n capacity = self.capacity if self.transitions.full else self.transitions.index\n weights = (capacity * probs) ** -self.priority_weight # Compute importance-sampling weights w\n weights = torch.tensor(weights / weights.max(), dtype=torch.float32,\n device=self.device) # Normalise by max importance-sampling weight from batch\n return tree_idxs, states, actions, returns, next_states, nonterminals, weights\n\n def update_priorities(self, idxs, priorities):\n priorities = np.power(priorities, self.priority_exponent)\n [self.transitions.update(idx, priority) for idx, priority in zip(idxs, priorities)]\n\n # Set up internal state for iterator\n def __iter__(self):\n self.current_idx = 0\n return self\n\n # Return valid states for validation\n def __next__(self):\n if self.current_idx == self.capacity:\n raise StopIteration\n # Create stack of states\n state_stack = [None] * self.history\n state_stack[-1] = self.transitions.data[self.current_idx].state\n prev_timestep = self.transitions.data[self.current_idx].timestep\n for t in reversed(range(self.history - 1)):\n if prev_timestep == 0:\n state_stack[t] = blank_trans.state # If future frame has timestep 0\n else:\n state_stack[t] = self.transitions.data[self.current_idx + t - self.history + 1].state\n prev_timestep -= 1\n state = torch.stack(state_stack, 0).to(dtype=torch.float32, device=self.device).div_(\n 255) # Agent will turn into batch\n self.current_idx += 1\n return state\n\n def clear(self):\n self.transitions = SegmentTree(self.capacity)\n\n def get_all_transitions(self):\n batch = []\n for i in range(self.capacity):\n batch.append(self._get_sample_from_idx(i))\n states, actions, returns, next_states, nonterminals = zip(*batch)\n states, next_states, = torch.stack(states), torch.stack(next_states)\n actions, returns, nonterminals = torch.cat(actions), torch.cat(returns), torch.stack(nonterminals)\n return states, actions, returns, next_states, nonterminals\n" ]
[ [ "numpy.power", "torch.zeros", "torch.cat", "torch.tensor", "torch.stack", "numpy.random.uniform", "numpy.array", "numpy.zeros", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jni/useful-histories
[ "0c75003e4fa3a80d4bf7281314cdf6e363d3be56" ]
[ "climate-change-model-test.py" ]
[ "# IPython log file\n\n\nT = pd.read_csv('bundoora-temp.csv')\nT.head()\nT.rename(columns={'Mean maximum temperature (°C)':'Temperature'},\n inplace=True)\n \nT['Date'] = T['Year'] + (T['Month'] - 0.5) / 12\ndates = T['Date']\ntemps = T['Temperature']\ndef predicted_temperature(parameters, time):\n t0, w, A, omega, phi = parameters\n return t0 + w*time + A * np.sin(omega*time + phi)\ndef prediction_error(parameters, time, true_temperature):\n return true_temperature - predicted_temperature(parameters, time)\ndef predicted_temperature_null(parameters, time):\n t0, w, A, omega, phi = parameters\n return t0 + A * np.sin(omega*time + phi)\nt0 = np.mean(temps)\nw = 0\nA = np.max(temps) - np.min(temps)\nomega = np.pi * 2\nphi = np.pi / 2\n\nparams0 = [t0, w, A, omega, phi]\nparams, success = optimize.leastsq(prediction_error, params0,\n args=(dates, temps))\n \nfrom scipy import optimize\nparams, success = optimize.leastsq(prediction_error, params0,\n args=(dates, temps))\n \nsuccess\ndef prediction_error_null(parameters, time, true_temperature):\n return true_temperature - predicted_temperature_null(parameters, time)\n\nparamsnull, successnull = optimize.leastsq(prediction_error_null,\n params0,\n args=(dates, temps))\n \nsuccessnull\nfrom scipy import stats\npredicted = predicted_temperature(params, dates)\npredicted_null = predicted_temperature_null(params, dates)\nchisq1 = (temps - predicted)**2 / predicted\nchisq0 = (temps - predicted_null)**2 / predicted_null\nchisqdiff = chisq1 - chisq0\nchisqdiff\nchisq1 = np.sum((temps - predicted)**2 / predicted)\nchisq0 = np.sum((temps - predicted_null)**2 / predicted_null)\nchisqdiff = chisq1 - chisq0\nchisqdiff\nchisq_dof = len(temps)\nchisq_dof\nchisq1\nchisq2\nchisq0\nplt.plot(dates, predicted_null)\nimport statsmodels\nfrom statsmodels import stats\nnp.mean((temps - predicted)**2)\nplt.plot(dates, predicted)\nparams\nplt.plot(dates, temps)\ndef predicted_temperature_null(parameters, time):\n t0, A, omega, phi = parameters\n return t0 + A * np.sin(omega*time + phi)\ndef prediction_error_null(parameters, time, true_temperature):\n return true_temperature - predicted_temperature_null(parameters, time)\n\nparamsnull, successnull = optimize.leastsq(prediction_error_null,\n [params0[0]] + params0[2:],\n args=(dates, temps))\n \nsuccessnull\npredicted_null = predicted_temperature_null(paramsnull, dates)\nplt.plot(dates, temps)\nplt.plot(dates, predicted_null)\nnp.mean((temps - predicted_null)**2)\nnp.mean((temps - predicted)**2)\nssdiff = 401 * (_48 - _49)\nssdiff\nfrom scipy import stats\nstats.gamma\nstats.chi2\nget_ipython().magic('pinfo stats.chisquare')\nget_ipython().set_next_input('c2 = stats.chi2');get_ipython().magic('pinfo stats.chi2')\nc2 = stats.chi2.sf(ssdiff, 401)\nc2\nc2 = stats.chi2.sf(ssdiff, 4)\nc2\n" ]
[ [ "scipy.optimize.leastsq", "scipy.stats.chi2.sf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Oktai15/NeMo
[ "5b6dd3850129898be47cf0d65587897ec45a5b59" ]
[ "nemo/collections/asr/models/ctc_models.py" ]
[ "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\nimport json\nimport os\nimport tempfile\nfrom math import ceil\nfrom typing import Dict, List, Optional, Union\n\nimport onnx\nimport torch\nfrom omegaconf import DictConfig, OmegaConf, open_dict\nfrom pytorch_lightning import Trainer\n\nfrom nemo.collections.asr.data import audio_to_text_dataset\nfrom nemo.collections.asr.data.audio_to_text_dali import DALIOutputs\nfrom nemo.collections.asr.losses.ctc import CTCLoss\nfrom nemo.collections.asr.metrics.wer import WER\nfrom nemo.collections.asr.models.asr_model import ASRModel\nfrom nemo.collections.asr.parts.perturb import process_augmentations\nfrom nemo.core.classes.common import PretrainedModelInfo, typecheck\nfrom nemo.core.classes.exportable import Exportable\nfrom nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, LogprobsType, NeuralType, SpectrogramType\nfrom nemo.utils import logging\nfrom nemo.utils.export_utils import attach_onnx_to_onnx\n\n__all__ = ['EncDecCTCModel', 'JasperNet', 'QuartzNet']\n\n\nclass EncDecCTCModel(ASRModel, Exportable):\n \"\"\"Base class for encoder decoder CTC-based models.\"\"\"\n\n @classmethod\n def list_available_models(cls) -> Optional[PretrainedModelInfo]:\n \"\"\"\n This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.\n\n Returns:\n List of available pre-trained models.\n \"\"\"\n result = []\n model = PretrainedModelInfo(\n pretrained_model_name=\"QuartzNet15x5Base-En\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/QuartzNet15x5Base-En.nemo\",\n description=\"QuartzNet15x5 model trained on six datasets: LibriSpeech, Mozilla Common Voice (validated clips from en_1488h_2019-12-10), WSJ, Fisher, Switchboard, and NSC Singapore English. It was trained with Apex/Amp optimization level O1 for 600 epochs. The model achieves a WER of 3.79% on LibriSpeech dev-clean, and a WER of 10.05% on dev-other.\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"QuartzNet15x5Base-Zh\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/QuartzNet15x5Base-Zh.nemo\",\n description=\"QuartzNet15x5 model trained on ai-shell2 Mandarin Chinese dataset.\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"QuartzNet5x5LS-En\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/QuartzNet5x5LS-En.nemo\",\n description=\"QuartzNet5x5 model trained on LibriSpeech dataset only. The model achieves a WER of 5.37% on LibriSpeech dev-clean, and a WER of 15.69% on dev-other.\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"QuartzNet15x5NR-En\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/QuartzNet15x5NR-En.nemo\",\n description=\"QuartzNet15x5Base-En was finetuned with RIR and noise augmentation to make it more robust to noise. This model should be preferred for noisy speech transcription. This model achieves a WER of 3.96% on LibriSpeech dev-clean and a WER of 10.14% on dev-other.\",\n )\n result.append(model)\n\n model = PretrainedModelInfo(\n pretrained_model_name=\"Jasper10x5Dr-En\",\n location=\"https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/Jasper10x5Dr-En.nemo\",\n description=\"JasperNet10x5Dr model trained on six datasets: LibriSpeech, Mozilla Common Voice (validated clips from en_1488h_2019-12-10), WSJ, Fisher, Switchboard, and NSC Singapore English. It was trained with Apex/Amp optimization level O1. The model achieves a WER of 3.37% on LibriSpeech dev-clean, 9.81% on dev-other.\",\n )\n result.append(model)\n return result\n\n def __init__(self, cfg: DictConfig, trainer: Trainer = None):\n # Get global rank and total number of GPU workers for IterableDataset partitioning, if applicable\n self.global_rank = 0\n self.world_size = 1\n self.local_rank = 0\n if trainer is not None:\n self.global_rank = (trainer.node_rank * trainer.num_gpus) + trainer.local_rank\n self.world_size = trainer.num_nodes * trainer.num_gpus\n self.local_rank = trainer.local_rank\n\n super().__init__(cfg=cfg, trainer=trainer)\n self.preprocessor = EncDecCTCModel.from_config_dict(self._cfg.preprocessor)\n self.encoder = EncDecCTCModel.from_config_dict(self._cfg.encoder)\n\n with open_dict(self._cfg):\n if \"params\" in self._cfg.decoder:\n if \"feat_in\" not in self._cfg.decoder.params or (\n not self._cfg.decoder.params.feat_in and hasattr(self.encoder, '_feat_out')\n ):\n self._cfg.decoder.params.feat_in = self.encoder._feat_out\n if \"feat_in\" not in self._cfg.decoder.params or not self._cfg.decoder.params.feat_in:\n raise ValueError(\"param feat_in of the decoder's config is not set!\")\n else:\n if \"feat_in\" not in self._cfg.decoder or (\n not self._cfg.decoder.feat_in and hasattr(self.encoder, '_feat_out')\n ):\n self._cfg.decoder.feat_in = self.encoder._feat_out\n if \"feat_in\" not in self._cfg.decoder or not self._cfg.decoder.feat_in:\n raise ValueError(\"param feat_in of the decoder's config is not set!\")\n\n self.decoder = EncDecCTCModel.from_config_dict(self._cfg.decoder)\n\n self.loss = CTCLoss(\n num_classes=self.decoder.num_classes_with_blank - 1,\n zero_infinity=True,\n reduction=self._cfg.get(\"ctc_reduction\", \"mean_batch\"),\n )\n\n if hasattr(self._cfg, 'spec_augment') and self._cfg.spec_augment is not None:\n self.spec_augmentation = EncDecCTCModel.from_config_dict(self._cfg.spec_augment)\n else:\n self.spec_augmentation = None\n\n # Setup metric objects\n self._wer = WER(\n vocabulary=self.decoder.vocabulary,\n batch_dim_index=0,\n use_cer=self._cfg.get('use_cer', False),\n ctc_decode=True,\n dist_sync_on_step=True,\n log_prediction=self._cfg.get(\"log_prediction\", False),\n )\n\n @torch.no_grad()\n def transcribe(self, paths2audio_files: List[str], batch_size: int = 4, logprobs=False) -> List[str]:\n \"\"\"\n Uses greedy decoding to transcribe audio files. Use this method for debugging and prototyping.\n\n Args:\n\n paths2audio_files: (a list) of paths to audio files. \\\n Recommended length per file is between 5 and 25 seconds. \\\n But it is possible to pass a few hours long file if enough GPU memory is available.\n batch_size: (int) batch size to use during inference. \\\n Bigger will result in better throughput performance but would use more memory.\n logprobs: (bool) pass True to get log probabilities instead of transcripts.\n\n Returns:\n\n A list of transcriptions (or raw log probabilities if logprobs is True) in the same order as paths2audio_files\n \"\"\"\n if paths2audio_files is None or len(paths2audio_files) == 0:\n return {}\n # We will store transcriptions here\n hypotheses = []\n # Model's mode and device\n mode = self.training\n device = next(self.parameters()).device\n dither_value = self.preprocessor.featurizer.dither\n pad_to_value = self.preprocessor.featurizer.pad_to\n\n try:\n self.preprocessor.featurizer.dither = 0.0\n self.preprocessor.featurizer.pad_to = 0\n # Switch model to evaluation mode\n self.eval()\n logging_level = logging.get_verbosity()\n logging.set_verbosity(logging.WARNING)\n # Work in tmp directory - will store manifest file there\n with tempfile.TemporaryDirectory() as tmpdir:\n with open(os.path.join(tmpdir, 'manifest.json'), 'w') as fp:\n for audio_file in paths2audio_files:\n entry = {'audio_filepath': audio_file, 'duration': 100000, 'text': 'nothing'}\n fp.write(json.dumps(entry) + '\\n')\n\n config = {'paths2audio_files': paths2audio_files, 'batch_size': batch_size, 'temp_dir': tmpdir}\n\n temporary_datalayer = self._setup_transcribe_dataloader(config)\n for test_batch in temporary_datalayer:\n logits, logits_len, greedy_predictions = self.forward(\n input_signal=test_batch[0].to(device), input_signal_length=test_batch[1].to(device)\n )\n if logprobs:\n # dump log probs per file\n for idx in range(logits.shape[0]):\n hypotheses.append(logits[idx][: logits_len[idx]])\n else:\n hypotheses += self._wer.ctc_decoder_predictions_tensor(greedy_predictions)\n del test_batch\n finally:\n # set mode back to its original value\n self.train(mode=mode)\n self.preprocessor.featurizer.dither = dither_value\n self.preprocessor.featurizer.pad_to = pad_to_value\n logging.set_verbosity(logging_level)\n return hypotheses\n\n def change_vocabulary(self, new_vocabulary: List[str]):\n \"\"\"\n Changes vocabulary used during CTC decoding process. Use this method when fine-tuning on from pre-trained model.\n This method changes only decoder and leaves encoder and pre-processing modules unchanged. For example, you would\n use it if you want to use pretrained encoder when fine-tuning on a data in another language, or when you'd need\n model to learn capitalization, punctuation and/or special characters.\n\n If new_vocabulary == self.decoder.vocabulary then nothing will be changed.\n\n Args:\n\n new_vocabulary: list with new vocabulary. Must contain at least 2 elements. Typically, \\\n this is target alphabet.\n\n Returns: None\n\n \"\"\"\n if self.decoder.vocabulary == new_vocabulary:\n logging.warning(f\"Old {self.decoder.vocabulary} and new {new_vocabulary} match. Not changing anything.\")\n else:\n if new_vocabulary is None or len(new_vocabulary) == 0:\n raise ValueError(f'New vocabulary must be non-empty list of chars. But I got: {new_vocabulary}')\n decoder_config = self.decoder.to_config_dict()\n new_decoder_config = copy.deepcopy(decoder_config)\n if 'vocabulary' in new_decoder_config:\n new_decoder_config['vocabulary'] = new_vocabulary\n new_decoder_config['num_classes'] = len(new_vocabulary)\n else:\n new_decoder_config['params']['vocabulary'] = new_vocabulary\n new_decoder_config['params']['num_classes'] = len(new_vocabulary)\n\n del self.decoder\n self.decoder = EncDecCTCModel.from_config_dict(new_decoder_config)\n del self.loss\n self.loss = CTCLoss(\n num_classes=self.decoder.num_classes_with_blank - 1,\n zero_infinity=True,\n reduction=self._cfg.get(\"ctc_reduction\", \"mean_batch\"),\n )\n self._wer = WER(\n vocabulary=self.decoder.vocabulary,\n batch_dim_index=0,\n use_cer=self._cfg.get('use_cer', False),\n ctc_decode=True,\n dist_sync_on_step=True,\n log_prediction=self._cfg.get(\"log_prediction\", False),\n )\n\n # Update config\n OmegaConf.set_struct(self._cfg.decoder, False)\n self._cfg.decoder = new_decoder_config\n OmegaConf.set_struct(self._cfg.decoder, True)\n\n logging.info(f\"Changed decoder to output to {self.decoder.vocabulary} vocabulary.\")\n\n def _setup_dataloader_from_config(self, config: Optional[Dict]):\n if 'augmentor' in config:\n augmentor = process_augmentations(config['augmentor'])\n else:\n augmentor = None\n\n shuffle = config['shuffle']\n device = 'gpu' if torch.cuda.is_available() else 'cpu'\n if config.get('use_dali', False):\n device_id = self.local_rank if device == 'gpu' else None\n dataset = audio_to_text_dataset.get_dali_char_dataset(\n config=config,\n shuffle=shuffle,\n device_id=device_id,\n global_rank=self.global_rank,\n world_size=self.world_size,\n preprocessor_cfg=self._cfg.preprocessor,\n )\n return dataset\n\n # Instantiate tarred dataset loader or normal dataset loader\n if config.get('is_tarred', False):\n if ('tarred_audio_filepaths' in config and config['tarred_audio_filepaths'] is None) or (\n 'manifest_filepath' in config and config['manifest_filepath'] is None\n ):\n logging.warning(\n \"Could not load dataset as `manifest_filepath` was None or \"\n f\"`tarred_audio_filepaths` is None. Provided config : {config}\"\n )\n return None\n\n shuffle_n = config.get('shuffle_n', 4 * config['batch_size']) if shuffle else 0\n dataset = audio_to_text_dataset.get_tarred_char_dataset(\n config=config,\n shuffle_n=shuffle_n,\n global_rank=self.global_rank,\n world_size=self.world_size,\n augmentor=augmentor,\n )\n shuffle = False\n else:\n if 'manifest_filepath' in config and config['manifest_filepath'] is None:\n logging.warning(f\"Could not load dataset as `manifest_filepath` was None. Provided config : {config}\")\n return None\n\n dataset = audio_to_text_dataset.get_char_dataset(config=config, augmentor=augmentor)\n\n return torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=config['batch_size'],\n collate_fn=dataset.collate_fn,\n drop_last=config.get('drop_last', False),\n shuffle=shuffle,\n num_workers=config.get('num_workers', 0),\n pin_memory=config.get('pin_memory', False),\n )\n\n def setup_training_data(self, train_data_config: Optional[Union[DictConfig, Dict]]):\n if 'shuffle' not in train_data_config:\n train_data_config['shuffle'] = True\n\n # preserve config\n self._update_dataset_config(dataset_name='train', config=train_data_config)\n\n self._train_dl = self._setup_dataloader_from_config(config=train_data_config)\n\n # Need to set this because if using an IterableDataset, the length of the dataloader is the total number\n # of samples rather than the number of batches, and this messes up the tqdm progress bar.\n # So we set the number of steps manually (to the correct number) to fix this.\n if 'is_tarred' in train_data_config and train_data_config['is_tarred']:\n # We also need to check if limit_train_batches is already set.\n # If it's an int, we assume that the user has set it to something sane, i.e. <= # training batches,\n # and don't change it. Otherwise, adjust batches accordingly if it's a float (including 1.0).\n if isinstance(self._trainer.limit_train_batches, float):\n self._trainer.limit_train_batches = int(\n self._trainer.limit_train_batches\n * ceil((len(self._train_dl.dataset) / self.world_size) / train_data_config['batch_size'])\n )\n\n def setup_validation_data(self, val_data_config: Optional[Union[DictConfig, Dict]]):\n if 'shuffle' not in val_data_config:\n val_data_config['shuffle'] = False\n\n # preserve config\n self._update_dataset_config(dataset_name='validation', config=val_data_config)\n\n self._validation_dl = self._setup_dataloader_from_config(config=val_data_config)\n\n def setup_test_data(self, test_data_config: Optional[Union[DictConfig, Dict]]):\n if 'shuffle' not in test_data_config:\n test_data_config['shuffle'] = False\n\n # preserve config\n self._update_dataset_config(dataset_name='test', config=test_data_config)\n\n self._test_dl = self._setup_dataloader_from_config(config=test_data_config)\n\n @property\n def input_types(self) -> Optional[Dict[str, NeuralType]]:\n if hasattr(self.preprocessor, '_sample_rate'):\n input_signal_eltype = AudioSignal(freq=self.preprocessor._sample_rate)\n else:\n input_signal_eltype = AudioSignal()\n return {\n \"input_signal\": NeuralType(('B', 'T'), input_signal_eltype, optional=True),\n \"input_signal_length\": NeuralType(tuple('B'), LengthsType(), optional=True),\n \"processed_signal\": NeuralType(('B', 'D', 'T'), SpectrogramType(), optional=True),\n \"processed_signal_length\": NeuralType(tuple('B'), LengthsType(), optional=True),\n }\n\n @property\n def output_types(self) -> Optional[Dict[str, NeuralType]]:\n return {\n \"outputs\": NeuralType(('B', 'T', 'D'), LogprobsType()),\n \"encoded_lengths\": NeuralType(tuple('B'), LengthsType()),\n \"greedy_predictions\": NeuralType(('B', 'T'), LabelsType()),\n }\n\n @typecheck()\n def forward(\n self, input_signal=None, input_signal_length=None, processed_signal=None, processed_signal_length=None\n ):\n has_input_signal = input_signal is not None and input_signal_length is not None\n has_processed_signal = processed_signal is not None and processed_signal_length is not None\n if (has_input_signal ^ has_processed_signal) == False:\n raise ValueError(\n f\"{self} Arguments ``input_signal`` and ``input_signal_length`` are mutually exclusive \"\n \" with ``processed_signal`` and ``processed_signal_len`` arguments.\"\n )\n\n if not has_processed_signal:\n processed_signal, processed_signal_length = self.preprocessor(\n input_signal=input_signal, length=input_signal_length,\n )\n\n if self.spec_augmentation is not None and self.training:\n processed_signal = self.spec_augmentation(input_spec=processed_signal)\n\n encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_length)\n log_probs = self.decoder(encoder_output=encoded)\n greedy_predictions = log_probs.argmax(dim=-1, keepdim=False)\n return log_probs, encoded_len, greedy_predictions\n\n # PTL-specific methods\n def training_step(self, batch, batch_nb):\n signal, signal_len, transcript, transcript_len = batch\n if isinstance(batch, DALIOutputs) and batch.has_processed_signal:\n log_probs, encoded_len, predictions = self.forward(\n processed_signal=signal, processed_signal_length=signal_len\n )\n else:\n log_probs, encoded_len, predictions = self.forward(input_signal=signal, input_signal_length=signal_len)\n\n loss_value = self.loss(\n log_probs=log_probs, targets=transcript, input_lengths=encoded_len, target_lengths=transcript_len\n )\n\n tensorboard_logs = {'train_loss': loss_value, 'learning_rate': self._optimizer.param_groups[0]['lr']}\n\n if hasattr(self, '_trainer') and self._trainer is not None:\n log_every_n_steps = self._trainer.log_every_n_steps\n else:\n log_every_n_steps = 1\n\n if (batch_nb + 1) % log_every_n_steps == 0:\n self._wer.update(predictions, transcript, transcript_len)\n wer, _, _ = self._wer.compute()\n tensorboard_logs.update({'training_batch_wer': wer})\n\n return {'loss': loss_value, 'log': tensorboard_logs}\n\n def validation_step(self, batch, batch_idx, dataloader_idx=0):\n signal, signal_len, transcript, transcript_len = batch\n if isinstance(batch, DALIOutputs) and batch.has_processed_signal:\n log_probs, encoded_len, predictions = self.forward(\n processed_signal=signal, processed_signal_length=signal_len\n )\n else:\n log_probs, encoded_len, predictions = self.forward(input_signal=signal, input_signal_length=signal_len)\n\n loss_value = self.loss(\n log_probs=log_probs, targets=transcript, input_lengths=encoded_len, target_lengths=transcript_len\n )\n self._wer.update(predictions, transcript, transcript_len)\n wer, wer_num, wer_denom = self._wer.compute()\n return {\n 'val_loss': loss_value,\n 'val_wer_num': wer_num,\n 'val_wer_denom': wer_denom,\n 'val_wer': wer,\n }\n\n def test_step(self, batch, batch_idx, dataloader_idx=0):\n logs = self.validation_step(batch, batch_idx, dataloader_idx=dataloader_idx)\n test_logs = {\n 'test_loss': logs['val_loss'],\n 'test_wer_num': logs['val_wer_num'],\n 'test_wer_denom': logs['val_wer_denom'],\n 'test_wer': logs['val_wer'],\n }\n return test_logs\n\n def test_dataloader(self):\n if self._test_dl is not None:\n return self._test_dl\n\n def _setup_transcribe_dataloader(self, config: Dict) -> 'torch.utils.data.DataLoader':\n \"\"\"\n Setup function for a temporary data loader which wraps the provided audio file.\n\n Args:\n config: A python dictionary which contains the following keys:\n paths2audio_files: (a list) of paths to audio files. The files should be relatively short fragments. \\\n Recommended length per file is between 5 and 25 seconds.\n batch_size: (int) batch size to use during inference. \\\n Bigger will result in better throughput performance but would use more memory.\n temp_dir: (str) A temporary directory where the audio manifest is temporarily\n stored.\n\n Returns:\n A pytorch DataLoader for the given audio file(s).\n \"\"\"\n dl_config = {\n 'manifest_filepath': os.path.join(config['temp_dir'], 'manifest.json'),\n 'sample_rate': self.preprocessor._sample_rate,\n 'labels': self.decoder.vocabulary,\n 'batch_size': min(config['batch_size'], len(config['paths2audio_files'])),\n 'trim_silence': True,\n 'shuffle': False,\n }\n\n temporary_datalayer = self._setup_dataloader_from_config(config=DictConfig(dl_config))\n return temporary_datalayer\n\n def export(\n self,\n output: str,\n input_example=None,\n output_example=None,\n verbose=False,\n export_params=True,\n do_constant_folding=True,\n keep_initializers_as_inputs=False,\n onnx_opset_version: int = 12,\n try_script: bool = False,\n set_eval: bool = True,\n check_trace: bool = True,\n use_dynamic_axes: bool = True,\n ):\n if input_example is not None or output_example is not None:\n logging.warning(\n \"Passed input and output examples will be ignored and recomputed since\"\n \" EncDecCTCModel consists of two separate models (encoder and decoder) with different\"\n \" inputs and outputs.\"\n )\n\n qual_name = self.__module__ + '.' + self.__class__.__qualname__\n output1 = os.path.join(os.path.dirname(output), 'encoder_' + os.path.basename(output))\n output1_descr = qual_name + ' Encoder exported to ONNX'\n encoder_onnx = self.encoder.export(\n output1,\n None, # computed by input_example()\n None,\n verbose,\n export_params,\n do_constant_folding,\n keep_initializers_as_inputs,\n onnx_opset_version,\n try_script,\n set_eval,\n check_trace,\n use_dynamic_axes,\n )\n\n output2 = os.path.join(os.path.dirname(output), 'decoder_' + os.path.basename(output))\n output2_descr = qual_name + ' Decoder exported to ONNX'\n decoder_onnx = self.decoder.export(\n output2,\n None, # computed by input_example()\n None,\n verbose,\n export_params,\n do_constant_folding,\n keep_initializers_as_inputs,\n onnx_opset_version,\n try_script,\n set_eval,\n check_trace,\n use_dynamic_axes,\n )\n\n output_model = attach_onnx_to_onnx(encoder_onnx, decoder_onnx, \"DC\")\n output_descr = qual_name + ' Encoder+Decoder exported to ONNX'\n onnx.save(output_model, output)\n return ([output, output1, output2], [output_descr, output1_descr, output2_descr])\n\n\nclass JasperNet(EncDecCTCModel):\n pass\n\n\nclass QuartzNet(EncDecCTCModel):\n pass\n" ]
[ [ "torch.no_grad", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
localmonkey/rosalind
[ "dceab11d4938c1325075988be091abd3a5d25824" ]
[ "algorythmic_heights/bf/bf.py" ]
[ "import queue\nimport numpy as np\n\n\nclass myGrph:\n def __init__(self, vertex_quant, edges_quant):\n self.V_quant = vertex_quant\n self.E_quant = edges_quant\n self.edges_list = np.zeros((edges_quant, 3), dtype=np.int64)\n #self.edge_list = []\n\n def fill_connections(self, file_descriptor):\n counter = 0\n for line in file_descriptor:\n vertex, connected_vertex, weight = map(lambda x: int(x),\n line.strip().split())\n self.edges_list[counter] = [vertex - 1, connected_vertex - 1, weight]\n counter = counter + 1\n\n def f_con_comp(self, src_vertex):\n q = queue.Queue()\n q.put(src_vertex - 1)\n visited = [False]*self.V_quant\n while(not q.empty()):\n vrtxf = q.get()\n visited[vrtxf] = True\n for i in range(0, self.V_quant):\n if ((self.V_mat[vrtxf][i] != - 1) and visited[i] == False):\n q.put(i)\n return visited\n\n def find_shortest_paths(self, src_vertex):\n distance = np.full((1, self.V_quant), np.Inf).flatten()\n distance[src_vertex - 1] = 0\n for _ in range(0, self.V_quant - 1):\n for uu, vv, weight in self.edges_list:\n if (\n distance[uu] != np.Inf and\n (distance[vv] > (distance[uu] + weight))\n ):\n distance[vv] = distance[uu] + weight\n return distance\n\n\nif __name__ == \"__main__\":\n with open(\"rosalind_bf.txt\", \"r\") as f:\n vertexes_quant, edges_quant = map(lambda x: int(x),\n f.readline().strip().split())\n tst_grph = myGrph(vertexes_quant, edges_quant)\n tst_grph.fill_connections(f)\n res = tst_grph.find_shortest_paths(1)\n\n with open(\"bf_output.txt\", \"w\") as f:\n for i in res:\n if i == np.Inf:\n i = 'x'\n else:\n i = int(i)\n f.write(f\"{i} \")\n" ]
[ [ "numpy.zeros", "numpy.full" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Fieps1/p3-tennis
[ "29f3dab5810d7cd7f84120416a615956d266c256" ]
[ "deep_rl/component/envs.py" ]
[ "#######################################################################\n# Copyright (C) 2017 Shangtong Zhang([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nimport os\nimport gym\nimport numpy as np\nimport torch\nfrom gym.spaces.box import Box\nfrom gym.spaces.discrete import Discrete\n\nfrom baselines.common.atari_wrappers import make_atari, wrap_deepmind\nfrom baselines.common.atari_wrappers import FrameStack as FrameStack_\nfrom baselines.common.vec_env.subproc_vec_env import SubprocVecEnv, VecEnv\n\nfrom ..utils import *\n\ntry:\n import roboschool\nexcept ImportError:\n pass\n\n\n# adapted from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/envs.py\ndef make_env(env_id, seed, rank, episode_life=True):\n def _thunk():\n # I think this is not needed\n # random_seed(seed)\n if env_id.startswith(\"dm\"):\n import dm_control2gym\n _, domain, task = env_id.split('-')\n env = dm_control2gym.make(domain_name=domain, task_name=task)\n else:\n # My code for reacher env:\n if env_id == 'reacher':\n env = make_reacher()\n elif env_id == 'tennis':\n env = make_tennis()\n else:\n env = gym.make(env_id)\n\n is_atari = hasattr(gym.envs, 'atari') and isinstance(\n env.unwrapped, gym.envs.atari.atari_env.AtariEnv)\n if is_atari:\n env = make_atari(env_id)\n env.seed(seed + rank)\n env = OriginalReturnWrapper(env)\n if is_atari:\n env = wrap_deepmind(env,\n episode_life=episode_life,\n clip_rewards=False,\n frame_stack=False,\n scale=False)\n obs_shape = env.observation_space.shape\n if len(obs_shape) == 3:\n env = TransposeImage(env)\n env = FrameStack(env, 4)\n\n return env\n\n return _thunk\n\n\nclass TennisVecEnv(gym.Env):\n\n reward_range = (-0.01, 0.1)\n\n def __init__(self):\n from unityagents import UnityEnvironment\n\n env = UnityEnvironment(file_name='Tennis_Linux/Tennis.x86_64')\n\n # get the default brain\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n\n self.train_mode = True\n\n # reset the environment\n env_info = env.reset(train_mode=self.train_mode)[brain_name]\n\n # number of agents\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # size of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # examine the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n print('There are {} agents. Each agent observes state size: {}'.format(states.shape[0], state_size))\n print('The state for the first agent looks like:', states[0])\n\n self.unity_env = env\n self.brain_name = brain_name\n self.brain = brain\n\n # # action vector is between -1 and +1\n action_space = np.array(np.ones(action_size))\n # # 100 is a guess from me ;-)\n state_space = np.array(np.full(state_size, fill_value=100))\n\n # # Need to be set\n self.action_space = Box(-action_space, action_space, dtype=np.float32)\n self.observation_space = Box(-state_space, state_space, dtype=np.float32)\n\n self.last_step = None\n\n def step_both_agents(self, actions):\n env_info = self.unity_env.step(actions)[self.brain_name]\n state = env_info.vector_observations # get the current state\n reward = env_info.rewards # get the reward\n done = env_info.local_done # see if episode has finished\n\n return state, reward, done, {}\n\n def step(self, action):\n return self.step_both_agents(action)\n # raise NotImplementedError('Cannot step for a single agent!')\n\n def reset(self):\n env_info = self.unity_env.reset(train_mode=self.train_mode)[self.brain_name]\n return env_info.vector_observations # Return current state\n\n def close(self):\n self.unity_env.close()\n\n def render(self, mode='human'):\n # no-op\n raise NotImplementedError()\n\n def seed(self, seed=None):\n # no-op\n return [0]\n\n\n_tennis_vec_env_instance = None\n\n\ndef make_tennis():\n global _tennis_vec_env_instance\n if not _tennis_vec_env_instance:\n _tennis_vec_env_instance = TennisVecEnv()\n return _tennis_vec_env_instance\n\n\n_reacher_instance = None\n\n\ndef make_reacher():\n global _reacher_instance\n if not _reacher_instance:\n _reacher_instance = ReacherWrapper()\n return _reacher_instance\n\n\nclass ReacherWrapper(gym.Env):\n\n metadata = {\n # 'render.modes': ['human'],\n 'render.modes': [],\n }\n\n reward_range = (0., 0.1)\n\n def __init__(self):\n from unityagents import UnityEnvironment\n\n env = UnityEnvironment(file_name='Reacher_Linux/Reacher.x86_64')\n\n # get the default brain\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n\n self.train_mode = True\n\n # reset the environment\n env_info = env.reset(train_mode=self.train_mode)[brain_name]\n\n # number of agents\n num_agents = len(env_info.agents)\n #print('Number of agents:', num_agents)\n\n # size of each action\n action_size = brain.vector_action_space_size\n #print('Size of each action:', action_size)\n\n # examine the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n #print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))\n #print('The state for the first agent looks like:', states[0])\n\n self.unity_env = env\n self.brain_name = brain_name\n\n # action vector is between -1 and +1\n action_space = np.array([1] * brain.vector_action_space_size)\n # 100 is a guess from me ;-)\n state_space = np.array([100] * brain.vector_observation_space_size)\n\n # Need to be set\n self.action_space = Box(-action_space, action_space, dtype=np.float32)\n self.observation_space = Box(-state_space, state_space, dtype=np.float32)\n\n def step(self, action):\n env_info = self.unity_env.step(action)[self.brain_name]\n state = env_info.vector_observations[0] # get the current state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n\n return state, reward, done, {}\n\n def reset(self):\n env_info = self.unity_env.reset(train_mode=self.train_mode)[self.brain_name]\n return env_info.vector_observations[0] # Return current state\n\n def render(self, mode='human'):\n # no-op\n raise NotImplementedError()\n\n def seed(self, seed=None):\n # no-op\n return [0]\n\n def close(self):\n self.unity_env.close()\n\n\nclass OriginalReturnWrapper(gym.Wrapper):\n def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n self.total_rewards = 0\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n self.total_rewards += sum(reward)\n if any(done):\n info['episodic_return'] = self.total_rewards\n self.total_rewards = 0\n obs = self.env.reset() # reset if any agent reports done\n else:\n info['episodic_return'] = None\n return obs, reward, done, (info, info)\n\n def reset(self):\n return self.env.reset()\n\n\nclass TransposeImage(gym.ObservationWrapper):\n def __init__(self, env=None):\n super(TransposeImage, self).__init__(env)\n obs_shape = self.observation_space.shape\n self.observation_space = Box(\n self.observation_space.low[0, 0, 0],\n self.observation_space.high[0, 0, 0],\n [obs_shape[2], obs_shape[1], obs_shape[0]],\n dtype=self.observation_space.dtype)\n\n def observation(self, observation):\n return observation.transpose(2, 0, 1)\n\n\n# The original LayzeFrames doesn't work well\nclass LazyFrames(object):\n def __init__(self, frames):\n \"\"\"This object ensures that common frames between the observations are only stored once.\n It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay\n buffers.\n\n This object should only be converted to numpy array before being passed to the model.\n\n You'd not believe how complex the previous solution was.\"\"\"\n self._frames = frames\n\n def __array__(self, dtype=None):\n out = np.concatenate(self._frames, axis=0)\n if dtype is not None:\n out = out.astype(dtype)\n return out\n\n def __len__(self):\n return len(self.__array__())\n\n def __getitem__(self, i):\n return self.__array__()[i]\n\n\nclass FrameStack(FrameStack_):\n def __init__(self, env, k):\n FrameStack_.__init__(self, env, k)\n\n def _get_ob(self):\n assert len(self.frames) == self.k\n return LazyFrames(list(self.frames))\n\n\n# The original one in baselines is really bad\nclass DummyVecEnv(VecEnv):\n def __init__(self, env_fns):\n self.envs = [fn() for fn in env_fns]\n env = self.envs[0]\n VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)\n self.actions = None\n\n def step_async(self, actions):\n self.actions = actions\n\n def step_wait(self):\n data = []\n for i in range(self.num_envs):\n obs, rew, done, info = self.envs[i].step(self.actions[i])\n if done:\n obs = self.envs[i].reset()\n data.append([obs, rew, done, info])\n obs, rew, done, info = zip(*data)\n return obs, np.asarray(rew), np.asarray(done), info\n\n def reset(self):\n return [env.reset() for env in self.envs]\n\n def close(self):\n return\n\n\nclass Task:\n def __init__(self,\n name,\n num_envs=1,\n single_process=True,\n log_dir=None,\n episode_life=True,\n seed=np.random.randint(int(1e5))):\n if log_dir is not None:\n mkdir(log_dir)\n\n if name == 'tennis':\n self.env = OriginalReturnWrapper(TennisVecEnv())\n else:\n envs = [make_env(name, seed, i, episode_life) for i in range(num_envs)]\n if single_process:\n Wrapper = DummyVecEnv\n else:\n Wrapper = SubprocVecEnv\n self.env = Wrapper(envs)\n\n self.name = name\n self.observation_space = self.env.observation_space\n self.state_dim = int(np.prod(self.env.observation_space.shape))\n\n self.action_space = self.env.action_space\n if isinstance(self.action_space, Discrete):\n self.action_dim = self.action_space.n\n elif isinstance(self.action_space, Box):\n self.action_dim = self.action_space.shape[0]\n else:\n assert 'unknown action space'\n\n def reset(self):\n return self.env.reset()\n\n def step(self, actions):\n if isinstance(self.action_space, Box):\n actions = np.clip(actions, self.action_space.low, self.action_space.high)\n return self.env.step(actions)\n\n\nif __name__ == '__main__':\n task = Task('Hopper-v2', 5, single_process=False)\n state = task.reset()\n while True:\n action = np.random.rand(task.observation_space.shape[0])\n next_state, reward, done, _ = task.step(action)\n print(done)\n" ]
[ [ "numpy.clip", "numpy.asarray", "numpy.full", "numpy.concatenate", "numpy.ones", "numpy.random.rand", "numpy.prod", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YOHNGGG/Deep-Learning-based-Spectrum-Sensing
[ "e3b17ea30020db5ccf753497ce00a0fdf3ffa342" ]
[ "Python/SignalLoader_Test.py" ]
[ "import torch\r\nimport os, glob\r\nimport random\r\nimport csv\r\nfrom torch.utils.data import Dataset,DataLoader\r\nfrom scipy.io import loadmat\r\n\r\nclass LoadSignal(Dataset):\r\n\r\n def __init__(self,root):\r\n super(LoadSignal, self).__init__()\r\n\r\n self.root = root\r\n\r\n self.name2label = {} #signal0\r\n for name in sorted(os.listdir(os.path.join(root))):\r\n if not os.path.isdir(os.path.join(root, name)):\r\n continue\r\n self.name2label[name] = len(self.name2label.keys())\r\n\r\n #print(self.name2label)\r\n\r\n # data,label\r\n self.signals,self.labels = self.load_csv('signal.csv')\r\n\r\n def load_csv(self,filename):\r\n\r\n if not os.path.exists(os.path.join(self.root,filename)):\r\n signals=[]\r\n for name in self.name2label.keys():\r\n signals += glob.glob(os.path.join(self.root,name,'*.mat'))\r\n\r\n #print(len(signals),signals)\r\n\r\n random.shuffle(signals)\r\n with open(os.path.join(self.root,filename),mode='w',newline='') as f:\r\n writer = csv.writer(f)\r\n for sig in signals:\r\n name = sig.split(os.sep)[-2]\r\n label = self.name2label[name]\r\n writer.writerow([sig,label])\r\n signals = []\r\n labels = []\r\n with open(os.path.join(self.root,filename)) as f:\r\n reader = csv.reader(f)\r\n for row in reader:\r\n sig,label = row\r\n label = int(label)\r\n signals.append(sig)\r\n labels.append(label)\r\n\r\n assert len(signals) == len(labels)\r\n\r\n return signals, labels\r\n\r\n\r\n def __len__(self):\r\n return len(self.signals)\r\n\r\n def __getitem__(self, idx):\r\n #img:root label:0/1\r\n sig,label = self.signals[idx],self.labels[idx]\r\n sig = torch.from_numpy(loadmat(sig)['feature'])\r\n sig = sig.type(torch.FloatTensor)\r\n sig = torch.unsqueeze(sig, dim=0)\r\n label = torch.tensor(label)\r\n\r\n return sig,label\r\n\r\ndef main():\r\n db = LoadSignal('dataset')\r\n train_loader = DataLoader(db, batch_size=16, shuffle=True,\r\n num_workers=1)\r\n\r\n a=0\r\n for x,y in train_loader:\r\n x = x.view(x.size(0), 64 * 2)\r\n print(x.shape,y.shape,y)\r\n l = len(y)\r\n for i in range(l):\r\n if y[i] == 1:\r\n input = x[i]\r\n input = input.view(64*2)\r\n print(input,input.shape)\r\n\r\n print(a)\r\n break\r\n\r\n\r\n\r\n #x,y = next(iter(train_loader))\r\n #print(x,x.shape,y,y.shape)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()" ]
[ [ "scipy.io.loadmat", "torch.utils.data.DataLoader", "torch.unsqueeze", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
JungeAlexander/cocosco
[ "81ba561f6f16b43cfbd1b6d119e042bb640da23d" ]
[ "tests/ml/feature/test_glove.py" ]
[ "import numpy as np\n\nimport cocoscore.ml.feature.glove as glove\n\n\nclass TestClass(object):\n test_vec_file = 'tests/ml/feature/vectors.txt.gz'\n test_vocab_file = 'tests/ml/feature/vocab.txt.gz'\n\n def test_load_vector_array(self):\n w, w2i, i2w = glove.load_vector_array(self.test_vec_file, self.test_vocab_file)\n np.testing.assert_array_equal(w, np.array([[0.5, 0.5, 0.5, 0.5], [1, 0, 0, 0]]))\n assert w2i == {'a': 1, 'the': 0}\n assert i2w == {1: 'a', 0: 'the'}\n\n def test_load_pre_trained_vector_array(self):\n w, w2i, i2w = glove.load_pre_trained_vector_array(self.test_vec_file)\n np.testing.assert_array_equal(w, np.array([[1, 0, 0, 0], [0.5, 0.5, 0.5, 0.5]]))\n assert w2i == {'a': 0, 'the': 1}\n assert i2w == {0: 'a', 1: 'the'}\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SamanKhamesian/Music-Genre-Classification-of-Audio-Signals
[ "10b29b91738b25138cc9916ae174e4cd5027c759" ]
[ "Source/Classification.py" ]
[ "import joblib\nimport sklearn\n\nfrom Source.Utilities import *\nfrom config import Test, Model\n\nPATH = librosa.util.find_files(Test.DATA_PATH)\n\n\ndef main():\n songs = []\n\n # Load Test Files\n for p in PATH:\n song, sr = librosa.load(p, sr=SAMPLING_RATE, duration=5.0)\n songs.append(song)\n\n # Extract Features of Test Files and Save Them in Array\n data = numpy.array([extract_features(song) for song in songs])\n\n # Scale ALL Variables Between -1 to 1\n scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(-1, 1))\n data = scaler.fit_transform(data)\n\n # Predict Genres\n svm = joblib.load(Model.NAME)\n predicts = svm.predict(data)\n\n print(predicts)\n" ]
[ [ "sklearn.preprocessing.MinMaxScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
eliadl/textdistance
[ "bbba2eb9660aa0b360ad54ef7aa5a8348d9a9924" ]
[ "textdistance/algorithms/edit_based.py" ]
[ "# built-in\nfrom collections import defaultdict\n\n# app\nfrom .base import Base as _Base, BaseSimilarity as _BaseSimilarity\n\n\ntry:\n # python3\n from itertools import zip_longest\nexcept ImportError:\n # python2\n from itertools import izip_longest as zip_longest\ntry:\n import numpy\nexcept ImportError:\n numpy = None\n\n\n__all__ = [\n 'Hamming', 'MLIPNS',\n 'Levenshtein', 'DamerauLevenshtein',\n 'Jaro', 'JaroWinkler', 'StrCmp95',\n 'NeedlemanWunsch', 'Gotoh', 'SmithWaterman',\n\n 'hamming', 'mlipns',\n 'levenshtein', 'damerau_levenshtein',\n 'jaro', 'jaro_winkler', 'strcmp95',\n 'needleman_wunsch', 'gotoh', 'smith_waterman',\n]\n\n\nclass Hamming(_Base):\n \"\"\"\n Compute the Hamming distance between the two or more sequences.\n The Hamming distance is the number of differing items in ordered sequences.\n\n https://en.wikipedia.org/wiki/Hamming_distance\n \"\"\"\n def __init__(self, qval=1, test_func=None, truncate=False, external=True):\n self.qval = qval\n self.test_func = test_func or self._ident\n self.truncate = truncate\n self.external = external\n\n def __call__(self, *sequences):\n sequences = self._get_sequences(*sequences)\n\n result = self.quick_answer(*sequences)\n if result is not None:\n return result\n\n _zip = zip if self.truncate else zip_longest\n return sum([not self.test_func(*es) for es in _zip(*sequences)])\n\n\nclass Levenshtein(_Base):\n \"\"\"\n Compute the absolute Levenshtein distance between the two sequences.\n The Levenshtein distance is the minimum number of edit operations necessary\n for transforming one sequence into the other. The edit operations allowed are:\n\n * deletion: ABC -> BC, AC, AB\n * insertion: ABC -> ABCD, EABC, AEBC..\n * substitution: ABC -> ABE, ADC, FBC..\n\n https://en.wikipedia.org/wiki/Levenshtein_distance\n TODO: https://gist.github.com/kylebgorman/1081951/9b38b7743a3cb5167ab2c6608ac8eea7fc629dca\n \"\"\"\n def __init__(self, qval=1, test_func=None, external=True):\n self.qval = qval\n self.test_func = test_func or self._ident\n self.external = external\n\n def _recursive(self, s1, s2):\n # TODO: more than 2 sequences support\n if not s1 or not s2:\n return len(s1) + len(s2)\n\n if self.test_func(s1[-1], s2[-1]):\n return self(s1[:-1], s2[:-1])\n\n # deletion/insertion\n d = min(\n self(s1[:-1], s2),\n self(s1, s2[:-1]),\n )\n # substitution\n s = self(s1[:-1], s2[:-1])\n return min(d, s) + 1\n\n def _cicled(self, s1, s2):\n \"\"\"\n source:\n https://github.com/jamesturk/jellyfish/blob/master/jellyfish/_jellyfish.py#L18\n \"\"\"\n rows = len(s1) + 1\n cols = len(s2) + 1\n prev = None\n if numpy:\n cur = numpy.arange(cols)\n else:\n cur = range(cols)\n\n for r in range(1, rows):\n prev, cur = cur, [r] + [0] * (cols - 1)\n for c in range(1, cols):\n deletion = prev[c] + 1\n insertion = cur[c - 1] + 1\n dist = self.test_func(s1[r - 1], s2[c - 1])\n edit = prev[c - 1] + (not dist)\n cur[c] = min(edit, deletion, insertion)\n return cur[-1]\n\n def __call__(self, s1, s2):\n s1, s2 = self._get_sequences(s1, s2)\n\n result = self.quick_answer(s1, s2)\n if result is not None:\n return result\n\n return self._cicled(s1, s2)\n\n\nclass DamerauLevenshtein(_Base):\n \"\"\"\n Compute the absolute Damerau-Levenshtein distance between the two sequences.\n The Damerau-Levenshtein distance is the minimum number of edit operations necessary\n for transforming one sequence into the other. The edit operations allowed are:\n\n * deletion: ABC -> BC, AC, AB\n * insertion: ABC -> ABCD, EABC, AEBC..\n * substitution: ABC -> ABE, ADC, FBC..\n * transposition: ABC -> ACB, BAC\n\n https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance\n \"\"\"\n def __init__(self, qval=1, test_func=None, external=True):\n self.qval = qval\n self.test_func = test_func or self._ident\n self.external = external\n\n def _numpy(self, s1, s2):\n # TODO: doesn't pass tests, need improve\n d = numpy.zeros([len(s1) + 1, len(s2) + 1], dtype=numpy.int)\n\n # matrix\n for i in range(-1, len(s1) + 1):\n d[i][-1] = i + 1\n for j in range(-1, len(s2) + 1):\n d[-1][j] = j + 1\n\n for i, cs1 in enumerate(s1):\n for j, cs2 in enumerate(s2):\n cost = int(not self.test_func(cs1, cs2))\n # ^ 0 if equal, 1 otherwise\n\n d[i][j] = min(\n d[i - 1][j] + 1, # deletion\n d[i][j - 1] + 1, # insertion\n d[i - 1][j - 1] + cost, # substitution\n )\n\n # transposition\n if not i or not j:\n continue\n if not self.test_func(cs1, s2[j - 1]):\n continue\n d[i][j] = min(\n d[i][j],\n d[i - 2][j - 2] + cost,\n )\n\n return d[len(s1) - 1][len(s2) - 1]\n\n def _pure_python(self, s1, s2):\n d = {}\n\n # matrix\n for i in range(-1, len(s1) + 1):\n d[i, -1] = i + 1\n for j in range(-1, len(s2) + 1):\n d[-1, j] = j + 1\n\n for i, cs1 in enumerate(s1):\n for j, cs2 in enumerate(s2):\n cost = int(not self.test_func(cs1, cs2))\n # ^ 0 if equal, 1 otherwise\n\n d[i, j] = min(\n d[i - 1, j] + 1, # deletion\n d[i, j - 1] + 1, # insertion\n d[i - 1, j - 1] + cost, # substitution\n )\n\n # transposition\n if not i or not j:\n continue\n if not self.test_func(cs1, s2[j - 1]):\n continue\n d[i, j] = min(\n d[i, j],\n d[i - 2, j - 2] + cost,\n )\n\n return d[len(s1) - 1, len(s2) - 1]\n\n def __call__(self, s1, s2):\n s1, s2 = self._get_sequences(s1, s2)\n\n result = self.quick_answer(s1, s2)\n if result is not None:\n return result\n\n # if numpy:\n # return self._numpy(s1, s2)\n # else:\n return self._pure_python(s1, s2)\n\n\nclass JaroWinkler(_BaseSimilarity):\n \"\"\"\n Computes the Jaro-Winkler measure between two strings.\n The Jaro-Winkler measure is designed to capture cases where two strings\n have a low Jaro score, but share a prefix.\n and thus are likely to match.\n\n https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance\n https://github.com/Yomguithereal/talisman/blob/master/src/metrics/distance/jaro.js\n https://github.com/Yomguithereal/talisman/blob/master/src/metrics/distance/jaro-winkler.js\n \"\"\"\n def __init__(self, long_tolerance=False, winklerize=True, qval=1, external=True):\n self.qval = qval\n self.long_tolerance = long_tolerance\n self.winklerize = winklerize\n self.external = external\n\n def maximum(self, *sequences):\n return 1\n\n def __call__(self, s1, s2, prefix_weight=0.1):\n s1, s2 = self._get_sequences(s1, s2)\n\n result = self.quick_answer(s1, s2)\n if result is not None:\n return result\n\n s1_len = len(s1)\n s2_len = len(s2)\n\n if not s1_len or not s2_len:\n return 0.0\n\n min_len = max(s1_len, s2_len)\n search_range = (min_len // 2) - 1\n if search_range < 0:\n search_range = 0\n\n s1_flags = [False] * s1_len\n s2_flags = [False] * s2_len\n\n # looking only within search range, count & flag matched pairs\n common_chars = 0\n for i, s1_ch in enumerate(s1):\n low = max(0, i - search_range)\n hi = min(i + search_range, s2_len - 1)\n for j in range(low, hi + 1):\n if not s2_flags[j] and s2[j] == s1_ch:\n s1_flags[i] = s2_flags[j] = True\n common_chars += 1\n break\n\n # short circuit if no characters match\n if not common_chars:\n return 0.0\n\n # count transpositions\n k = trans_count = 0\n for i, s1_f in enumerate(s1_flags):\n if s1_f:\n for j in range(k, s2_len):\n if s2_flags[j]:\n k = j + 1\n break\n if s1[i] != s2[j]:\n trans_count += 1\n trans_count //= 2\n\n # adjust for similarities in nonmatched characters\n common_chars = float(common_chars)\n weight = common_chars / s1_len + common_chars / s2_len\n weight += (common_chars - trans_count) / common_chars\n weight /= 3\n\n # stop to boost if strings are not similar\n if not self.winklerize:\n return weight\n if weight <= 0.7 or s1_len <= 3 or s2_len <= 3:\n return weight\n\n # winkler modification\n # adjust for up to first 4 chars in common\n j = min(min_len, 4)\n i = 0\n while i < j and s1[i] == s2[i] and s1[i]:\n i += 1\n if i:\n weight += i * prefix_weight * (1.0 - weight)\n\n # optionally adjust for long strings\n # after agreeing beginning chars, at least two or more must agree and\n # agreed characters must be > half of remaining characters\n if not self.long_tolerance or min_len <= 4:\n return weight\n if common_chars <= i + 1 or 2 * common_chars < min_len + i:\n return weight\n tmp = float(common_chars - i - 1) / (s1_len + s2_len - i * 2 + 2)\n weight += (1.0 - weight) * tmp\n return weight\n\n\nclass Jaro(JaroWinkler):\n def __init__(self, long_tolerance=False, qval=1, external=True):\n super(Jaro, self).__init__(\n long_tolerance=long_tolerance,\n winklerize=False,\n qval=qval,\n external=external)\n\n\nclass NeedlemanWunsch(_BaseSimilarity):\n \"\"\"\n Computes the Needleman-Wunsch measure between two strings.\n The Needleman-Wunsch generalizes the Levenshtein distance and considers global\n alignment between two strings. Specifically, it is computed by assigning\n a score to each alignment between two input strings and choosing the\n score of the best alignment, that is, the maximal score.\n An alignment between two strings is a set of correspondences between the\n characters of between them, allowing for gaps.\n\n https://en.wikipedia.org/wiki/Needleman%E2%80%93Wunsch_algorithm\n \"\"\"\n positive = False\n\n def __init__(self, gap_cost=1.0, sim_func=None, qval=1, external=True):\n self.qval = qval\n self.gap_cost = gap_cost\n if sim_func:\n self.sim_func = sim_func\n else:\n self.sim_func = self._ident\n self.external = external\n\n def minimum(self, *sequences):\n return - max(map(len, sequences)) * self.gap_cost\n\n def maximum(self, *sequences):\n return max(map(len, sequences))\n\n def distance(self, *sequences):\n \"\"\"Get distance between sequences\n \"\"\"\n return -1 * self.similarity(*sequences)\n\n def normalized_distance(self, *sequences):\n \"\"\"Get distance from 0 to 1\n \"\"\"\n minimum = self.minimum(*sequences)\n maximum = self.maximum(*sequences)\n return float(self.distance(*sequences) - minimum) / (maximum * 2)\n\n def normalized_similarity(self, *sequences):\n \"\"\"Get distance from 0 to 1\n \"\"\"\n minimum = self.minimum(*sequences)\n maximum = self.maximum(*sequences)\n return float(self.similarity(*sequences) - minimum) / (maximum * 2)\n\n def __call__(self, s1, s2):\n if not numpy:\n raise ImportError('Please, install numpy for Needleman-Wunsch measure')\n\n s1, s2 = self._get_sequences(s1, s2)\n\n # result = self.quick_answer(s1, s2)\n # if result is not None:\n # return result * self.maximum(s1, s2)\n\n dist_mat = numpy.zeros(\n (len(s1) + 1, len(s2) + 1),\n dtype=numpy.float,\n )\n # DP initialization\n for i in range(len(s1) + 1):\n dist_mat[i, 0] = -(i * self.gap_cost)\n # DP initialization\n for j in range(len(s2) + 1):\n dist_mat[0, j] = -(j * self.gap_cost)\n # Needleman-Wunsch DP calculation\n for i in range(1, len(s1) + 1):\n for j in range(1, len(s2) + 1):\n match = dist_mat[i - 1, j - 1] + self.sim_func(s1[i - 1], s2[j - 1])\n delete = dist_mat[i - 1, j] - self.gap_cost\n insert = dist_mat[i, j - 1] - self.gap_cost\n dist_mat[i, j] = max(match, delete, insert)\n return dist_mat[dist_mat.shape[0] - 1, dist_mat.shape[1] - 1]\n\n\nclass SmithWaterman(_BaseSimilarity):\n \"\"\"\n Computes the Smith-Waterman measure between two strings.\n The Smith-Waterman algorithm performs local sequence alignment;\n that is, for determining similar regions between two strings.\n Instead of looking at the total sequence, the Smith-Waterman algorithm compares\n segments of all possible lengths and optimizes the similarity measure.\n\n https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm\n https://github.com/Yomguithereal/talisman/blob/master/src/metrics/distance/smith-waterman.js\n \"\"\"\n def __init__(self, gap_cost=1.0, sim_func=None, qval=1, external=True):\n self.qval = qval\n self.gap_cost = gap_cost\n self.sim_func = sim_func or self._ident\n self.external = external\n\n def maximum(self, *sequences):\n return min(map(len, sequences))\n\n def __call__(self, s1, s2):\n if not numpy:\n raise ImportError('Please, install numpy for Smith-Waterman measure')\n\n s1, s2 = self._get_sequences(s1, s2)\n\n result = self.quick_answer(s1, s2)\n if result is not None:\n return result\n\n dist_mat = numpy.zeros(\n (len(s1) + 1, len(s2) + 1),\n dtype=numpy.float,\n )\n for i, sc1 in enumerate(s1, start=1):\n for j, sc2 in enumerate(s2, start=1):\n # The score for substituting the letter a[i - 1] for b[j - 1].\n # Generally low for mismatch, high for match.\n match = dist_mat[i - 1, j - 1] + self.sim_func(sc1, sc2)\n # The scores for for introducing extra letters in one of the strings\n # (or by symmetry, deleting them from the other).\n delete = dist_mat[i - 1, j] - self.gap_cost\n insert = dist_mat[i, j - 1] - self.gap_cost\n dist_mat[i, j] = max(0, match, delete, insert)\n return dist_mat[dist_mat.shape[0] - 1, dist_mat.shape[1] - 1]\n\n\nclass Gotoh(NeedlemanWunsch):\n \"\"\"Gotoh score\n Gotoh's algorithm is essentially Needleman-Wunsch with affine gap\n penalties:\n https://www.cs.umd.edu/class/spring2003/cmsc838t/papers/gotoh1982.pdf\n \"\"\"\n def __init__(self, gap_open=1, gap_ext=0.4, sim_func=None, qval=1, external=True):\n self.qval = qval\n self.gap_open = gap_open\n self.gap_ext = gap_ext\n if sim_func:\n self.sim_func = sim_func\n else:\n self.sim_func = self._ident\n self.external = external\n\n def minimum(self, *sequences):\n return -min(map(len, sequences))\n\n def maximum(self, *sequences):\n return min(map(len, sequences))\n\n def __call__(self, s1, s2):\n if not numpy:\n raise ImportError('Please, install numpy for Gotoh measure')\n\n s1, s2 = self._get_sequences(s1, s2)\n\n # result = self.quick_answer(s1, s2)\n # if result is not None:\n # return result * self.maximum(s1, s2)\n\n len_s1 = len(s1)\n len_s2 = len(s2)\n d_mat = numpy.zeros((len_s1 + 1, len_s2 + 1), dtype=numpy.float)\n p_mat = numpy.zeros((len_s1 + 1, len_s2 + 1), dtype=numpy.float)\n q_mat = numpy.zeros((len_s1 + 1, len_s2 + 1), dtype=numpy.float)\n\n d_mat[0, 0] = 0\n p_mat[0, 0] = float('-inf')\n q_mat[0, 0] = float('-inf')\n for i in range(1, len_s1 + 1):\n d_mat[i, 0] = float('-inf')\n p_mat[i, 0] = -self.gap_open - self.gap_ext * (i - 1)\n q_mat[i, 0] = float('-inf')\n q_mat[i, 1] = -self.gap_open\n for j in range(1, len_s2 + 1):\n d_mat[0, j] = float('-inf')\n p_mat[0, j] = float('-inf')\n p_mat[1, j] = -self.gap_open\n q_mat[0, j] = -self.gap_open - self.gap_ext * (j - 1)\n\n for i, sc1 in enumerate(s1, start=1):\n for j, sc2 in enumerate(s2, start=1):\n sim_val = self.sim_func(sc1, sc2)\n d_mat[i, j] = max(\n d_mat[i - 1, j - 1] + sim_val,\n p_mat[i - 1, j - 1] + sim_val,\n q_mat[i - 1, j - 1] + sim_val,\n )\n p_mat[i, j] = max(\n d_mat[i - 1, j] - self.gap_open,\n p_mat[i - 1, j] - self.gap_ext,\n )\n q_mat[i, j] = max(\n d_mat[i, j - 1] - self.gap_open,\n q_mat[i, j - 1] - self.gap_ext,\n )\n\n i, j = (n - 1 for n in d_mat.shape)\n return max(d_mat[i, j], p_mat[i, j], q_mat[i, j])\n\n\nclass StrCmp95(_BaseSimilarity):\n \"\"\"strcmp95 similarity\n\n http://cpansearch.perl.org/src/SCW/Text-JaroWinkler-0.1/strcmp95.c\n \"\"\"\n sp_mx = (\n ('A', 'E'), ('A', 'I'), ('A', 'O'), ('A', 'U'), ('B', 'V'), ('E', 'I'),\n ('E', 'O'), ('E', 'U'), ('I', 'O'), ('I', 'U'), ('O', 'U'), ('I', 'Y'),\n ('E', 'Y'), ('C', 'G'), ('E', 'F'), ('W', 'U'), ('W', 'V'), ('X', 'K'),\n ('S', 'Z'), ('X', 'S'), ('Q', 'C'), ('U', 'V'), ('M', 'N'), ('L', 'I'),\n ('Q', 'O'), ('P', 'R'), ('I', 'J'), ('2', 'Z'), ('5', 'S'), ('8', 'B'),\n ('1', 'I'), ('1', 'L'), ('0', 'O'), ('0', 'Q'), ('C', 'K'), ('G', 'J'),\n )\n\n def __init__(self, long_strings=False, external=True):\n self.long_strings = long_strings\n self.external = external\n\n def maximum(self, *sequences):\n return 1\n\n @staticmethod\n def _in_range(char):\n return 0 < ord(char) < 91\n\n def __call__(self, s1, s2):\n s1 = s1.strip().upper()\n s2 = s2.strip().upper()\n\n result = self.quick_answer(s1, s2)\n if result is not None:\n return result\n\n len_s1 = len(s1)\n len_s2 = len(s2)\n\n adjwt = defaultdict(int)\n\n # Initialize the adjwt array on the first call to the function only.\n # The adjwt array is used to give partial credit for characters that\n # may be errors due to known phonetic or character recognition errors.\n # A typical example is to match the letter \"O\" with the number \"0\"\n for c1, c2 in self.sp_mx:\n adjwt[c1, c2] = 3\n adjwt[c2, c1] = 3\n\n if len_s1 > len_s2:\n search_range = len_s1\n minv = len_s2\n else:\n search_range = len_s2\n minv = len_s1\n\n # Blank out the flags\n s1_flag = [0] * search_range\n s2_flag = [0] * search_range\n search_range = max(0, search_range // 2 - 1)\n\n # Looking only within the search range, count and flag the matched pairs.\n num_com = 0\n yl1 = len_s2 - 1\n for i, sc1 in enumerate(s1):\n lowlim = max(i - search_range, 0)\n hilim = min(i + search_range, yl1)\n for j in range(lowlim, hilim + 1):\n if s2_flag[j] == 0 and s2[j] == sc1:\n s2_flag[j] = 1\n s1_flag[i] = 1\n num_com += 1\n break\n\n # If no characters in common - return\n if num_com == 0:\n return 0.0\n\n # Count the number of transpositions\n k = n_trans = 0\n for i, sc1 in enumerate(s1):\n if not s1_flag[i]:\n continue\n for j in range(k, len_s2):\n if s2_flag[j] != 0:\n k = j + 1\n break\n if sc1 != s2[j]:\n n_trans += 1\n n_trans = n_trans // 2\n\n # Adjust for similarities in unmatched characters\n n_simi = 0\n if minv > num_com:\n for i in range(len_s1):\n if s1_flag[i] != 0:\n continue\n if not self._in_range(s1[i]):\n continue\n for j in range(len_s2):\n if s2_flag[j] != 0:\n continue\n if not self._in_range(s2[j]):\n continue\n if (s1[i], s2[j]) not in adjwt:\n continue\n n_simi += adjwt[s1[i], s2[j]]\n s2_flag[j] = 2\n break\n num_sim = n_simi / 10.0 + num_com\n\n # Main weight computation\n weight = float(num_sim) / len_s1 + num_sim / len_s2\n weight += float(num_com - n_trans) / num_com\n weight = weight / 3.0\n\n # Continue to boost the weight if the strings are similar\n if weight <= 0.7:\n return weight\n\n # Adjust for having up to the first 4 characters in common\n j = min(minv, 4)\n i = 0\n for sc1, sc2 in zip(s1, s2):\n if i >= j:\n break\n if sc1 != sc2:\n break\n if sc1.isdigit():\n break\n i += 1\n if i:\n weight += i * 0.1 * (1.0 - weight)\n\n # Optionally adjust for long strings.\n\n # After agreeing beginning chars, at least two more must agree and\n # the agreeing characters must be > .5 of remaining characters.\n if not self.long_strings:\n return weight\n if minv <= 4:\n return weight\n if num_com <= i + 1 or 2 * num_com < minv + i:\n return weight\n if s1[0].isdigit():\n return weight\n res = (num_com - i - 1) / (len_s1 + len_s2 - i * 2 + 2)\n weight += (1.0 - weight) * res\n return weight\n\n\nclass MLIPNS(_BaseSimilarity):\n \"\"\"\n Compute the Hamming distance between the two or more sequences.\n The Hamming distance is the number of differing items in ordered sequences.\n\n http://www.sial.iias.spb.su/files/386-386-1-PB.pdf\n https://github.com/Yomguithereal/talisman/blob/master/src/metrics/distance/mlipns.js\n \"\"\"\n def __init__(self, threshold=0.25, maxmismatches=2, qval=1, external=True):\n self.qval = qval\n self.threshold = threshold\n self.maxmismatches = maxmismatches\n self.external = external\n\n def maximum(self, *sequences):\n return 1\n\n def __call__(self, *sequences):\n sequences = self._get_sequences(*sequences)\n\n result = self.quick_answer(*sequences)\n if result is not None:\n return result\n\n mismatches = 0\n ham = Hamming()(*sequences)\n maxlen = max(map(len, sequences))\n while all(sequences) and mismatches <= self.maxmismatches:\n if not maxlen:\n return 1\n if 1 - float(maxlen - ham) / maxlen <= self.threshold:\n return 1\n mismatches += 1\n ham -= 1\n maxlen -= 1\n\n if not maxlen:\n return 1\n return 0\n\n\nhamming = Hamming()\nlevenshtein = Levenshtein()\ndamerau = damerau_levenshtein = DamerauLevenshtein()\njaro = Jaro()\njaro_winkler = JaroWinkler()\nneedleman_wunsch = NeedlemanWunsch()\nsmith_waterman = SmithWaterman()\ngotoh = Gotoh()\nstrcmp95 = StrCmp95()\nmlipns = MLIPNS()\n" ]
[ [ "numpy.arange", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
StanfordASL/sensitivity_torch
[ "0601c30c21f6d3acfb5dea9ae4f98a42d8101cab" ]
[ "sensitivity_torch/utils.py" ]
[ "##^# ops import and utils ######################################################\nimport os, pickle, time as time_module, pdb, math\nfrom pprint import pprint\nfrom collections import OrderedDict as odict\nfrom operator import itemgetter\n\nimport torch, numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\n\n\n##$#############################################################################\n##^# torch utils ###############################################################\ndef topts(A):\n return dict(device=A.device, dtype=A.dtype)\n\n\nss = lambda x, dim=(): torch.sum(x ** 2, dim=dim)\nt = lambda x: x.transpose(-1, -2)\ndiag = lambda x: x.diagonal(0, -1, -2)\nvec = lambda x: x.reshape(-1)\nidentity = lambda x: x\nis_equal = (\n lambda a, b: (type(a) == type(b))\n and (a.shape == b.shape)\n and (torch.norm(a - b) / math.sqrt(a.numel()) < 1e-7)\n)\n\n\ndef normalize(x, dim=-2, params=None, min_std=1e-3):\n if params is None:\n x_mu = torch.mean(x, dim, keepdim=True)\n x_std = torch.maximum(\n torch.std(x, dim, keepdim=True), torch.tensor(min_std, **topts(x))\n )\n else:\n x_mu, x_std = params\n return (x - x_mu) / x_std, (x_mu, x_std)\n\n\nunnormalize = lambda x, params: x * params[1] + params[0]\n\nonehot = lambda *args, **kwargs: torch.nn.functional.one_hot(\n args[0].to(int), *args[1:], **kwargs\n).to(args[0].dtype)\n\nt2n = (\n lambda x: np.copy(x.detach().cpu().clone().numpy().astype(np.float64))\n if isinstance(x, torch.Tensor)\n else x\n)\nn2t = lambda x, device=None, dtype=None: torch.as_tensor(\n x, device=device, dtype=dtype\n)\n\n##$#############################################################################\n##^# timing ####################################################################\ndef elapsed(name, t1, end=None):\n t2 = time_module.time()\n name = name if len(name) <= 20 else name[:17] + \"...\"\n msg = \"%20s took %9.4e ms\" % (name, (t2 - t1) * 1e3)\n if end is not None:\n print(msg, end=end)\n else:\n print(msg)\n\n\ntime = time_module.time\n##$#############################################################################\n##^# table printing utility class ##############################################\nclass TablePrinter:\n def __init__(self, names, fmts=None, prefix=\"\", use_writer=False):\n self.names = names\n self.fmts = fmts if fmts is not None else [\"%9.4e\" for _ in names]\n self.widths = [\n max(self.calc_width(fmt), len(name)) + 2\n for (fmt, name) in zip(fmts, names)\n ]\n self.prefix = prefix\n self.writer = None\n if use_writer:\n try:\n self.writer = SummaryWriter(flush_secs=1)\n self.iteration = 0\n except NameError:\n print(\"SummaryWriter not available, ignoring\")\n\n def calc_width(self, fmt):\n f = fmt[-1]\n width = None\n if f == \"f\" or f == \"e\" or f == \"d\" or f == \"i\":\n width = max(len(fmt % 1), len(fmt % (-1)))\n elif f == \"s\":\n width = len(fmt % \"\")\n else:\n raise ValueError(\"I can't recognized the [%s] print format\" % fmt)\n return width\n\n def pad_field(self, s, width, lj=True):\n # lj -> left justify\n assert len(s) <= width\n rem = width - len(s)\n if lj:\n return (\" \" * (rem // 2)) + s + (\" \" * ((rem // 2) + (rem % 2)))\n else:\n return (\" \" * ((rem // 2) + (rem % 2))) + s + (\" \" * (rem // 2))\n\n def make_row_sep(self):\n return \"+\" + \"\".join([(\"-\" * width) + \"+\" for width in self.widths])\n\n def make_header(self):\n s = self.prefix + self.make_row_sep() + \"\\n\"\n s += self.prefix\n for (name, width) in zip(self.names, self.widths):\n s += \"|\" + self.pad_field(\"%s\" % name, width, lj=True)\n s += \"|\\n\"\n return s + self.prefix + self.make_row_sep()\n\n def make_footer(self):\n return self.prefix + self.make_row_sep()\n\n def make_values(self, vals):\n assert len(vals) == len(self.fmts)\n s = self.prefix + \"\"\n for (val, fmt, width) in zip(vals, self.fmts, self.widths):\n s += \"|\" + self.pad_field(fmt % val, width, lj=False)\n s += \"|\"\n\n if self.writer is not None:\n for (name, val) in zip(self.names, vals):\n self.writer.add_scalar(name, val, self.iteration)\n self.iteration += 1\n\n return s\n\n def print_header(self):\n print(self.make_header())\n\n def print_footer(self):\n print(self.make_footer())\n\n def print_values(self, vals):\n print(self.make_values(vals))\n\n\n##$#############################################################################\n##^# solution caching decorator ################################################\ndef to_tuple_(arg):\n if isinstance(arg, np.ndarray):\n return arg.tobytes()\n elif isinstance(arg, torch.Tensor):\n return to_tuple_(arg.cpu().detach().numpy())\n else:\n return to_tuple_(np.array(arg))\n\n\ndef to_tuple(*args):\n return tuple(to_tuple_(arg) for arg in args)\n\n\ndef fn_with_sol_cache(fwd_fn, cache=None):\n def inner_decorator(fn):\n #def fn2(*args, **kwargs):\n # return fn(fwd_fn(*args), *args, **kwargs)\n #return fn2\n\n nonlocal cache\n cache = cache if cache is None else cache\n\n def fn_with_sol(*args, **kwargs):\n cache, sol_key = fn_with_sol.cache, to_tuple(*args)\n sol = fwd_fn(*args) if not sol_key in cache else cache[sol_key]\n cache.setdefault(sol_key, sol)\n return fn_with_sol.fn(sol, *args, **kwargs)\n\n fn_with_sol.cache = cache\n fn_with_sol.fn = fn\n return fn_with_sol\n\n return inner_decorator\n\n\n##$#############################################################################\n##^# GPU utils #################################################################\ndef print_gpu_mem_status(locals, globals):\n unit = 1e9 # GB\n\n def sz(x):\n return 4 if x.dtype == torch.float32 else 8\n\n def size_of(variables):\n return {k: z.numel() * sz(z) / unit for (k, z) in variables.items()}\n\n def print_variables(variables):\n for (k, z) in odict(\n sorted(size_of(variables).items(), key=itemgetter(1), reverse=True)\n ).items():\n print(\"%010s: %9.4e GB\" % (k, z))\n\n print(\"#\" * 80)\n # local variables first #########################################\n print(\"LOCAL VARIABLES:\")\n tensors = {k: z for (k, z) in locals.items() if isinstance(z, torch.Tensor)}\n print(\" requires grad:\")\n variables = {k: z for (k, z) in tensors.items() if z.requires_grad}\n print_variables(variables)\n print(\" Total: %9.4e\" % sum(size_of(variables).values()))\n print(\" does not require grad:\")\n variables = {k: z for (k, z) in tensors.items() if not z.requires_grad}\n print_variables(variables)\n print(\" Total: %9.4e\" % sum(size_of(variables).values()))\n print(\"Total: %9.4e\" % sum(size_of(tensors).values()))\n\n # global variables second #######################################\n print(\"GLOBAL VARIABLES:\")\n tensors = {\n k: z for (k, z) in globals.items() if isinstance(z, torch.Tensor)\n }\n print(\" requires grad:\")\n variables = {k: z for (k, z) in tensors.items() if z.requires_grad}\n print_variables(variables)\n print(\" Total: %9.4e\" % sum(size_of(variables).values()))\n print(\" does not require grad:\")\n variables = {k: z for (k, z) in tensors.items() if not z.requires_grad}\n print_variables(variables)\n print(\" Total: %9.4e\" % sum(size_of(variables).values()))\n print(\"Total: %9.4e\" % sum(size_of(tensors).values()))\n\n print(\"#\" * 80)\n\n return\n\n\n##$#############################################################################\n" ]
[ [ "torch.mean", "torch.norm", "torch.sum", "torch.std", "torch.utils.tensorboard.SummaryWriter", "numpy.array", "torch.as_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vdutell/wavelet_stim
[ "5a0026220a9d72365983cd66b796bb1a32f3d326" ]
[ "utils/getstim.py" ]
[ "import numpy as np\n\nimport utils.imtools as imtools\nimport utils.fouriertools as ftools\nimport utils.wavelettools as wtools\nimport utils.imwritetools as imwtools\nimport pathlib\n\ndef step_stim_img(width_px, height_px, loc=0.5, stepdn=False, rescale=True, orient=1, contrast=1):\n '''\n Make a step function stimulus of a given size, orientation, and contrast\n \n Args:\n width_px (int): width in pixels of stimulus\n height_px (int): height in pixels of stimulus\n loc (float): location of split from 0 (left/top) to 1 (right/bottom)\n stepdn (bool): L-R; U-D; go from white to black (down) vs black to white (up)\n orient (int): orientation, 1=vertical, 0=horizontal\n contrast (float): float value between 0 and 1 of contrast for stimlulus\n \n Returns:\n stim (2d float): step function stimulus with values in [0,1]\n '''\n \n #vertical line step function\n if(orient==1):\n stim = np.hstack((np.zeros((height_px, width_px//2)),\n np.ones((height_px, width_px//2))))\n if(stepdn):\n stim = stim[:,::-1]\n \n #horizontal line step function\n elif(orient==0):\n stim = np.vstack((np.zeros((height_px//2, width_px)),\n np.ones((height_px//2, width_px))))\n if(stepdn):\n stim = stim[::-1,:]\n \n # Rescale to [0,255]\n if(rescale):\n stim = imtools.rescale_255(stim)\n \n #contrast\n stim = stim*contrast\n\n return(stim)\n\n\ndef step_stim(width_px, height_px, len_frames=1, stepdn=False, rescale=True, orient=1, contrast=1, stim_type='reverse_phase'):\n '''\n Make a step function stimulus of a given size, orientation, and contrast\n \n Args:\n width_px (int): width in pixels of stimulus\n height_px (int): height in pixels of stimulus\n stepdn (bool): L-R; U-D; go from white to black (down) vs black to white (up)\n orient (int): orientation, 1=vertical, 0=horizontal\n contrast (float): float value between 0 and 1 of contrast for stimlulus\n stim_type (string): type of stimulus we want. Options are:\n 'reverse_phase' - change phase once during the stim time\n 'reverse_phase_flicker' - reverse phase at frame rate\n \n Returns:\n stim (2d float): step function stimulus with values in [0,1]\n '''\n \n # if our step function is just a 1 frame image, return the image\n if(len_frames==1):\n stim = step_stim_img(width_px, height_px, loc=0.5, stepdn=False, rescale=True, orient=1, contrast=1)\n # otherwise, loop through the number of frames and make a moving edge.\n else:\n stim = np.zeros((len_frames, width_px, height_px))\n \n # reverse phase graing that changes once during stim (halfway through)\n if(stim_type=='reverse_phase'):\n halfpoint = len_frames//2\n stim[:halfpoint,:,:] = step_stim_img(width_px, height_px, loc=0.5, stepdn=False, rescale=True, orient=1, contrast=1)\n stim[halfpoint:,:,:] = step_stim_img(width_px, height_px, loc=0.5, stepdn=False, rescale=True, orient=1, contrast=1)\n \n #reverse_phase grating: make every other frame reverse.\n if(stim_type=='reverse_phase_flicker'):\n stim[::2,:,:] = step_stim_img(width_px, height_px, loc=0.5, stepdn=False, rescale=True, orient=1, contrast=1)\n stim[1::2,:,:] = step_stim_img(width_px, height_px, loc=0.5, stepdn=True, rescale=True, orient=1, contrast=1)\n \n \n return(stim)\n\n\n\ndef generate_spatial_filtered_stims(stim, stimdeg, cutoffs, filt='fourier_sharp', stim_type='stepfun'):\n \n '''\n Generate filtered stimlui at the given cuttoffos with a given filter\n mode (See PIL modes: https://pillow.readthedocs.io/en/3.1.x/handbook/concepts.html#modes)\n '''\n \n stim_outfolder = 'filtered_stims/spatial/'\n ft_outfolder = stim_outfolder+'fts/'\n \n stimpx_w, stimpx_h = np.shape(stim)\n \n #calc degrees and cpd\n stim_cpd = (stimpx_w/2)/stimdeg\n \n #save our raw stim\n stim_fname = f'{stim_outfolder}{stim_type}_{int(stim_cpd)}cpdc_raw.png'\n imwtools.writestim(stim, stim_fname)\n # create our raw stim's FT\n stim_ft = ftools.gen_azm_spatial_ft(stim, np.ones_like(stim), stim, stim_cpd, filt, int(stim_cpd))\n #save our raw stim's ft\n stim_ft_fname = f'{ft_outfolder}{stim_type}_raw_{int(stim_cpd)}cpd_ft.png'\n imwtools.writeplot(stim_ft, stim_ft_fname)\n print(f'Wrote {stim_ft_fname}')\n\n # loop through cuttoff frequencies and filter\n for cut in cutoffs:\n filt_stim, stim_mag, stim_phase, stim_filter, warn_flag = ftools.fft_lowpass(stim, cut, stim_cpd, filt)\n stim_filt_ft = ftools.gen_azm_spatial_ft(filt_stim, stim_filter, stim, stim_cpd, filt, cut)\n # if we had a warning during generating the image, reflect in image filename\n stim_fname = f'{stim_outfolder}{stim_type}_{filt}_{int(cut)}cpd'\n if(warn_flag):\n stim_fname = stim_fname + '_warn'\n # write to disk\n imwtools.writestim(filt_stim, f'{stim_fname}.png')\n imwtools.writeplot(stim_filt_ft, f'{stim_dir}/ft.png')\n print(f'Wrote {stim_fname}; {stim_ft_fname}')\n \n return()\n\n \n \ndef generate_temporal_filtered_stims(stim, stimfps, cutoffs, filt='fourier_sharp', stim_type='stepfun'):\n \n '''\n Generate filtered stimlui at the given cuttoffos with a given filter\n mode (See PIL modes: https://pillow.readthedocs.io/en/3.1.x/handbook/concepts.html#modes)\n '''\n \n stim_outfolder = 'filtered_stims/temporal/'\n ft_outfolder = stim_outfolder+'fts/'\n \n stimlen_frames, stimpx_w, stimpx_h = np.shape(stim)\n \n #calc degrees and cpd\n #stim_cpd = (stimpx_w/2)/stimdeg\n\n #save our raw stim\n stim_dir = f'{stim_outfolder}{stim_type}_{int(stimfps)}fps_raw/'\n pathlib.Path(stim_dir).mkdir(exist_ok=True)\n for i, frame in enumerate(stim):\n stim_fname = stim_dir + f'frame_{i+1}.png'\n imwtools.writestim(frame, stim_fname)\n # create our raw stim's FT\n stim_ft = ftools.gen_temporal_ft(stim, np.ones_like(stim), stim, stimfps, filt, int(stimfps))\n #save our raw stim's ft\n imwtools.writeplot(stim_ft, f'{stim_dir}ft.png')\n# print(f'Wrote {stim_ft_fname}')\n\n # loop through cuttoff frequencies and filter\n for cut in cutoffs:\n filt_stim, stim_mag, stim_phase, stim_filter, warn_flag = ftools.fft_spatiotemporal_lowpass(stim, cut, stimfps, filt)\n stim_filt_ft = ftools.gen_temporal_ft(filt_stim, stim_filter, stim, stimfps, filt, cut)\n # if we had a warning during generating the image, reflect in image filename\n stim_dir = f'{stim_outfolder}{stim_type}_{int(stimfps)}fps_{filt}_{int(cut)}fps'\n if(warn_flag):\n stim_dir = stim_dir + '_warn'\n #save our fitlered stim\n pathlib.Path(stim_dir).mkdir(exist_ok=True)\n for i, frame in enumerate(filt_stim):\n stim_fname = stim_dir + f'/frame_{i+1}.png'\n imwtools.writestim(frame, stim_fname)\n #save our fourier transform\n imwtools.writeplot(stim_filt_ft, f'{stim_dir}/ft.png')\n\n print(f'Wrote {stim_fname}')\n \n return()\n\ndef generate_spatiotemporal_filtered_stims(stim, stimcpd, spatial_cutoffs,\n stimfps, temporal_cutoffs, filt='fourier_sharp', stim_type='stepfun'):\n \n '''\n Generate filtered stimlui at the given cuttoffos with a given filter\n mode (See PIL modes: https://pillow.readthedocs.io/en/3.1.x/handbook/concepts.html#modes)\n '''\n \n stim_outfolder = 'filtered_stims/spatiotemporal/'\n stimlen_frames, stimpx_w, stimpx_h = np.shape(stim)\n\n #save our raw stim\n stim_dir = f'{stim_outfolder}{stim_type}_{int(stimcpd)}cpd_{int(stimfps)}fps_raw'\n pathlib.Path(stim_dir).mkdir(exist_ok=True)\n for i, frame in enumerate(stim):\n imwtools.writestim(frame, f'{stim_dir}/frame_{i+1}.png')\n # create our raw stim's FT\n fs = ftools.st_ft(stim)\n ftools.da_spatiotemporal_ft(fs, stimcpd, stimfps, 'raw', stimcpd, stimfps, f'{stim_dir}/ft_raw.png')\n\n # loop through cuttoff frequencies and filter\n for s_cut in spatial_cutoffs:\n for t_cut in temporal_cutoffs:\n filt_stim, stim_mag, stim_phase, stim_filter, warn_flag = ftools.fft_spatiotemporal_lowpass(stim, s_cut, t_cut, stimcpd, stimfps, filt)\n #create fourier transform of filtered stim\n #stim_filt_ft = ftools.gen_spatiotemporal_ft(filt_stim, stim_filter, stim, stimcpd, stimfps, filt, s_cut, t_cut)\n # if we had a warning during generating the image, reflect in image filename,\n stim_dir = f'{stim_outfolder}{stim_type}_{int(stimcpd)}cpd_{int(stimfps)}fps_{filt}_{int(s_cut)}cpd_{int(t_cut)}fps'\n if(warn_flag):\n stim_dir = stim_dir + '_warn'\n #save our fitlered stim\n pathlib.Path(stim_dir).mkdir(exist_ok=True)\n for i, frame in enumerate(filt_stim):\n stim_fname = stim_dir + f'/frame_{i+1}.png'\n imwtools.writestim(frame, stim_fname)\n #save our fourier transform\n fs = ftools.st_ft(filt_stim)\n ftools.da_spatiotemporal_ft(fs, stimcpd, stimfps, 'filtered_stim', s_cut, t_cut, f'{stim_dir}/ft_filtered_stim.png')\n ftools.da_spatiotemporal_ft(ftools.azm_avg_frames(stim_filter), stimcpd, stimfps, 'filter', s_cut, t_cut, f'{stim_dir}/ft_filt.png')\n\n print(f'Wrote {stim_fname}')\n \n return()" ]
[ [ "numpy.ones_like", "numpy.shape", "numpy.zeros", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kourtneyshort/healthcare
[ "7d3d4dc9deb3d31eab99035780ccb9a44f00b687" ]
[ "datathon/datathon_etl_pipelines/mimic_cxr/prepare_mimic_cxr.py" ]
[ "r\"\"\"Load the MIMIC CXR dataset onto GCP.\n\nThe raw MIMIC CXR dataset is originally hosted by Physionet as a collection of\ntar files which contain JPG images, and a gzip'd CSV file of labels.\n\nThe JPG images have paths of the form:\n(train|valid)/p([0-9]+)/s([0-9]+)/view([0-9]+)_(frontal|lateral|other)\\.jpg\nwhich lists the dataset, patient id, study id, image number and view for each\nimage.\n\nThe gzip'd CSV file contains columns:\n - path\n - view\n - No Finding\n - Enlarged Cardiomediastinum\n - Cardiomegaly\n - Airspace Opacity\n - Lung Lesion\n - Edema\n - Consolidation\n - Pneumonia\n - Atelectasis\n - Pneumothorax\n - Pleural Effusion\n - Pleural Other\n - Fracture\n - Support Devices\n\nSee https://physionet.org/works/MIMICCXR/files/ for more details and to download\nthis data.\n\nThis apache beam pipeline takes this set of files as input, and outputs a\nBigQuery table, two TFRecords (one for frontal and one for lateral images),\nand the untar'd jpg images. This delivers an ergonomic presentation of the\ndataset that enables high productivity for data scientists working with this\ndata at datathons.\n\nAll the provided paths must be absolute and may point to either local files\nor GCS blobs.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport csv\nimport inspect\nimport os\n\nimport apache_beam as beam\nfrom apache_beam.io.gcp.bigquery import BigQueryDisposition\nfrom apache_beam.io.gcp.bigquery import WriteToBigQuery\nfrom apache_beam.io.gcp.bigquery_tools import parse_table_schema_from_json\nfrom apache_beam.options.pipeline_options import PipelineOptions\nfrom apache_beam.options.pipeline_options import SetupOptions\nfrom datathon_etl_pipelines.dofns.read_tar_file import ReadTarFile\nfrom datathon_etl_pipelines.dofns.resize_image import ResizeImage\nfrom datathon_etl_pipelines.mimic_cxr.enum_encodings import ID_NAMES\nfrom datathon_etl_pipelines.mimic_cxr.enum_encodings import ids_to_path\nfrom datathon_etl_pipelines.mimic_cxr.enum_encodings import LABEL_NAMES\nfrom datathon_etl_pipelines.mimic_cxr.enum_encodings import LABEL_VALUES\nfrom datathon_etl_pipelines.mimic_cxr.enum_encodings import path_to_ids\nfrom datathon_etl_pipelines.mimic_cxr.enum_encodings import VIEW_VALUES\nfrom datathon_etl_pipelines.mimic_cxr.mimic_cxr_tfrecord_schema import build_features\nfrom datathon_etl_pipelines.utils import get_setup_file\nimport tensorflow as tf\n\n\nclass ChexpertConverter(object):\n \"\"\"Converts CheXpert codes into integers mapped by LABEL_VALUES.\"\"\"\n chexpert_code_to_index = {\n '': LABEL_VALUES['not_mentioned'],\n '1.0': LABEL_VALUES['positive'],\n '-1.0': LABEL_VALUES['uncertain'],\n '0.0': LABEL_VALUES['negative']\n }\n\n @classmethod\n def convert(cls, s):\n int_code = cls.chexpert_code_to_index.get(s, None)\n if int_code is None:\n raise ValueError('unrecognized chexpert encoding: {}'.format(s))\n else:\n return int_code\n\n\ndef parse_csv_row(line):\n row = next(csv.reader([line]))\n ids = path_to_ids(row[0])\n # row[1] is the view, which is already captured within the ids\n return ids, [ChexpertConverter.convert(s) for s in row[2:]]\n\n\ndef to_bigquery_json(ids_row):\n ids, row = ids_row\n bq = {col: val for col, val in zip(LABEL_NAMES, row)}\n bq.update(**dict(zip(ID_NAMES, ids)))\n bq['path'] = ids_to_path(ids)\n return bq\n\n\ndef write_jpg(element, path):\n ids, jpg_bytes = element\n file_path = path + ids_to_path(ids)\n tf.io.gfile.makedirs(file_path.rsplit('/', 1)[0])\n with tf.io.gfile.GFile(file_path, 'wb') as gf:\n gf.write(jpg_bytes)\n\n\ndef to_tf_example(element):\n \"\"\"Construct a tf.train.Example from an image joined with labels.\n\n Args:\n element (Tuple[Tuple, Dict[Union['jpgs', 'rows'], List[List[int]]]]): The\n set of ids for the image along with the joined image and labels.\n\n Returns:\n tf.train.Example: The labelled image.\n \"\"\"\n (ids, join) = element\n if len(join['jpgs']) != 1 or len(join['rows']) != 1:\n raise ValueError('{} JPG files matched to {} rows for {}'.format(\n len(join['jpgs']), len(join['rows']), ids_to_path(ids)))\n\n features = build_features(\n jpg_bytes=join['jpgs'][0], ids=ids, labels=join['rows'][0])\n\n example = tf.train.Example(features=tf.train.Features(feature=features))\n return example\n\n\ndef import_json_bq_schema():\n path = os.path.join(\n os.path.dirname(inspect.getfile(inspect.currentframe())),\n 'mimic_cxr_bigquery_labels_schema.json')\n\n with open(path) as fp:\n return parse_table_schema_from_json(fp.read())\n\n\ndef build_and_run_pipeline(pipeline_options,\n input_tars,\n input_csv,\n output_jpg_dir,\n output_bq_table,\n output_tfrecord_dir,\n output_image_shape=None):\n \"\"\"Construct and run the Apache Beam Pipeline.\n\n Args:\n pipeline_options (PipelineOptions): Passed to Apache Beam.\n input_tars (List[str]): A set of patterns specifying the paths to the input\n tar files.\n input_csv (str): The path to the (optionally compressed) CSV that contains\n the image labels.\n output_jpg_dir (str): The directory to output the JPG files to.\n output_bq_table (str): A string of the form `project:dataset.table_name`.\n This table will be overwritten if it already exists.\n output_tfrecord_dir (str): The directory to output the sharded TFRecords to.\n output_image_shape (Optional[Tuple]): The dimensions to resize the image to.\n Either HW or HWC. If this is None, then the images will not be resized.\n \"\"\"\n input_paths = []\n for pattern in input_tars:\n input_paths.extend(tf.io.gfile.glob(pattern))\n\n if not input_paths:\n raise ValueError('No matching tar files were found.')\n\n with beam.Pipeline(options=pipeline_options) as p:\n\n rows = (\n p\n | beam.io.ReadFromText(input_csv, skip_header_lines=1)\n | beam.Map(parse_csv_row))\n\n if output_bq_table is not None:\n _ = rows | beam.Map(to_bigquery_json) | WriteToBigQuery(\n table=output_bq_table,\n schema=import_json_bq_schema(),\n write_disposition=BigQueryDisposition.WRITE_TRUNCATE)\n\n if output_jpg_dir is not None or output_tfrecord_dir is not None:\n jpgs = p | beam.Create(input_paths) | beam.ParDo(ReadTarFile(),\n path_to_ids)\n\n if output_image_shape is not None:\n jpgs |= beam.ParDo(ResizeImage('jpg', *output_image_shape))\n\n if output_jpg_dir is not None:\n if not output_jpg_dir.endswith('/'):\n output_jpg_dir += '/'\n _ = jpgs | beam.Map(write_jpg, output_jpg_dir)\n\n joined = {'jpgs': jpgs, 'rows': rows} | beam.CoGroupByKey()\n\n frontal, lateral, _ = joined | 'Partition on view' >> beam.Partition(\n lambda kv, n_split: kv[0][ID_NAMES['view']], len(VIEW_VALUES))\n\n if output_tfrecord_dir is not None:\n if not output_tfrecord_dir.endswith('/'):\n output_tfrecord_dir += '/'\n for pcol, name in [(frontal, 'frontal'), (lateral, 'lateral')]:\n _ = (\n pcol\n | (name + '_to_tf_example') >> beam.Map(to_tf_example)\n | (name + '_write_tf_record') >> beam.io.WriteToTFRecord(\n output_tfrecord_dir + name,\n file_name_suffix='.tfrecord',\n coder=beam.coders.ProtoCoder(tf.train.Example)))\n\n\ndef main():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n '--input_tars',\n nargs='+',\n required=True,\n help='A set of patterns specifying the paths to the input tar files.')\n parser.add_argument(\n '--input_csv',\n required=True,\n help='The path to the (optionally compressed) CSV that contains the image'\n ' labels.')\n parser.add_argument(\n '--output_jpg_dir',\n required=False,\n help='The directory to output the JPG files to.')\n parser.add_argument(\n '--output_bq_table',\n required=False,\n help='A string of the form `project:dataset.table_name`. This table will '\n 'be overwritten if it already exists.')\n parser.add_argument(\n '--output_tfrecord_dir',\n required=False,\n help='The directory to output the sharded TFRecords to.')\n parser.add_argument(\n '--output_image_shape',\n nargs='+',\n type=int,\n required=False,\n help='The dimensions to resize the image to. Either HW or HWC. If this is'\n ' None, then the images will not be resized.')\n\n args, pipeline_args = parser.parse_known_args()\n beam_options = PipelineOptions(pipeline_args)\n beam_options.view_as(SetupOptions).save_main_session = True\n beam_options.view_as(SetupOptions).setup_file = get_setup_file()\n\n if args.output_image_shape is not None:\n if len(args.output_image_shape) not in (2, 3):\n parser.error('2 (HW) or 3 (HWC) integers are required for '\n 'output_image_shape')\n\n build_and_run_pipeline(\n pipeline_options=beam_options,\n input_tars=args.input_tars,\n input_csv=args.input_csv,\n output_jpg_dir=args.output_jpg_dir,\n output_bq_table=args.output_bq_table,\n output_tfrecord_dir=args.output_tfrecord_dir,\n output_image_shape=args.output_image_shape)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.io.gfile.GFile", "tensorflow.io.gfile.glob", "tensorflow.train.Features" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
OscarPalominoC/linearRegression
[ "8b601b173bf4785f076aa9f2e4ab9acde738ad24" ]
[ "linearRegression.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\n# Función para hallar b0 y b1\ndef estimate_b0_b1(x, y):\n n = np.size(x)\n \n # Obtenemos los promedios de X y de Y\n m_x, m_y = np.mean(x), np.mean(y)\n \n # Calculando la sumatoria de XY y sumatoria de X*Xprom\n sumatoria_XY = np.sum((x-m_x)*(y-m_y))\n sumatoria_XXprom = np.sum(x*(x-m_x))\n \n # Coeficientes de regresión\n b_1 = sumatoria_XY/sumatoria_XXprom\n b_0 = m_y - b_1*m_x\n \n return (b_0, b_1)\n\n# Función de graficado\ndef plot_regression(x, y, b):\n plt.scatter(x, y, color = 'g', marker = 'o', s=30)\n \n # Vector de prediciones\n y_pred = b[1]*x + b[0]\n plt.plot(x, y_pred, color='b')\n \n # Etiquetado\n plt.xlabel('X-Independiente')\n plt.ylabel('Y-Dependiente')\n \n plt.show()\n" ]
[ [ "matplotlib.pyplot.scatter", "matplotlib.pyplot.plot", "numpy.size", "numpy.mean", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TimRepke/openTSNE
[ "22c306c3ec087e4b4be364431bc3626190a85c86" ]
[ "openTSNE/callbacks.py" ]
[ "import logging\nimport time\nimport warnings\nfrom functools import partial\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix\n\nfrom openTSNE import kl_divergence\nfrom openTSNE.tsne import TSNEEmbedding\n\nlog = logging.getLogger(__name__)\n\n\nclass Callback:\n def optimization_about_to_start(self):\n \"\"\"This is called at the beginning of the optimization procedure.\"\"\"\n\n def __call__(self, iteration, error, embedding):\n \"\"\"This is the main method called from the optimization.\n\n Parameters\n ----------\n iteration: int\n The current iteration number.\n\n error: float\n The current KL divergence of the given embedding.\n\n embedding: TSNEEmbedding\n The current t-SNE embedding.\n\n Returns\n -------\n stop_optimization: bool\n If this value is set to ``True``, the optimization will be\n interrupted.\n\n \"\"\"\n\n\nclass ErrorLogger(Callback):\n \"\"\"Basic error logger.\n\n This logger prints out basic information about the optimization. These\n include the iteration number, error and how much time has elapsed from the\n previous callback invocation.\n\n \"\"\"\n\n def __init__(self):\n warnings.warn(\n \"`ErrorLogger` will be removed in upcoming version. Please use the \"\n \"`verbose` flag instead.\",\n category=FutureWarning,\n )\n self.iter_count = 0\n self.last_log_time = None\n\n def optimization_about_to_start(self):\n self.last_log_time = time.time()\n self.iter_count = 0\n\n def __call__(self, iteration, error, embedding):\n now = time.time()\n duration = now - self.last_log_time\n self.last_log_time = now\n\n n_iters = iteration - self.iter_count\n self.iter_count = iteration\n\n print(\n \"Iteration % 4d, KL divergence % 6.4f, %d iterations in %.4f sec\"\n % (iteration, error, n_iters, duration)\n )\n\n\nclass VerifyExaggerationError(Callback):\n \"\"\"Used to verify that the exaggeration correction implemented in\n `gradient_descent` is correct.\"\"\"\n\n def __init__(self, embedding: TSNEEmbedding) -> None:\n self.embedding = embedding\n # Keep a copy of the unexaggerated affinity matrix\n self.P = self.embedding.affinities.P.copy()\n\n def __call__(\n self, iteration: int, corrected_error: float, embedding: TSNEEmbedding\n ):\n params = self.embedding.gradient_descent_params\n method = params[\"negative_gradient_method\"]\n\n if np.sum(embedding.affinities.P) <= 1:\n log.warning(\"Are you sure you are testing an exaggerated P matrix?\")\n\n if method == \"fft\":\n f = partial(\n kl_divergence.kl_divergence_approx_fft,\n n_interpolation_points=params[\"n_interpolation_points\"],\n min_num_intervals=params[\"min_num_intervals\"],\n ints_in_interval=params[\"ints_in_interval\"],\n dof=params[\"dof\"],\n )\n elif method == \"bh\":\n f = partial(\n kl_divergence.kl_divergence_approx_bh,\n theta=params[\"theta\"],\n dof=params[\"dof\"],\n )\n\n P = self.P\n\n true_error = f(P.indices, P.indptr, P.data, embedding)\n if abs(true_error - corrected_error) > 1e-8:\n raise RuntimeError(\"Correction term is wrong.\")\n else:\n log.info(\n \"Corrected: %.4f - True %.4f [eps %.4f]\"\n % (corrected_error, true_error, abs(true_error - corrected_error))\n )\n\n\nclass ErrorApproximations(Callback):\n \"\"\"Check how good the error approximations are. Of course, we use an\n approximation for P so this itself is an approximation.\"\"\"\n\n def __init__(self, P: csr_matrix):\n self.P = P.copy()\n self.exact_errors = []\n self.bh_errors = []\n self.fft_errors = []\n\n def __call__(self, iteration: int, error: float, embedding: TSNEEmbedding):\n exact_error = kl_divergence.kl_divergence_exact(self.P.toarray(), embedding)\n bh_error = kl_divergence.kl_divergence_approx_bh(\n self.P.indices, self.P.indptr, self.P.data, embedding\n )\n fft_error = kl_divergence.kl_divergence_approx_fft(\n self.P.indices, self.P.indptr, self.P.data, embedding\n )\n\n self.exact_errors.append(exact_error)\n self.bh_errors.append(bh_error)\n self.fft_errors.append(fft_error)\n\n def report(self):\n exact_errors = np.array(self.exact_errors)\n bh_errors = np.array(self.bh_errors)\n fft_errors = np.array(self.fft_errors)\n\n bh_diff = bh_errors - exact_errors\n print(\n \"Barnes-Hut: mean difference %.4f (±%.4f)\"\n % (np.mean(bh_diff), np.std(bh_diff))\n )\n\n fft_diff = fft_errors - exact_errors\n print(\n \"Interpolation: mean difference %.4f (±%.4f)\"\n % (np.mean(fft_diff), np.std(fft_diff))\n )\n" ]
[ [ "numpy.std", "numpy.array", "numpy.mean", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
georgenemo/Paddledy
[ "cc2d4e869d2bc045bf30cd3494df7e9dd689f0c6" ]
[ "python/paddle/nn/layer/conv.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# TODO: define classes of convolutional neural network\n\n__all__ = [\n 'Conv1D',\n 'Conv2D',\n 'Conv3D',\n 'Conv1DTranspose',\n 'Conv2DTranspose',\n 'Conv3DTranspose',\n]\n\nimport numpy as np\n\nfrom ...fluid import core\nfrom ...device import get_cudnn_version\nfrom ...fluid.dygraph import layers\nfrom ...fluid.initializer import Normal\nfrom .. import functional as F\nfrom ...fluid.layers import utils\nfrom ..functional.conv import _update_padding_nd\n\n\ndef _get_default_param_initializer(num_channels, filter_size):\n filter_elem_num = num_channels * np.prod(filter_size)\n std = (2.0 / filter_elem_num)**0.5\n return Normal(0.0, std, 0)\n\n\ndef _reverse_repeat_list(t, n):\n \"\"\"Reverse the order of `t` and repeat each element for `n` times.\n This can be used to translate padding arg used by Conv and Pooling modules\n to the ones used by `F.pad`.\n \"\"\"\n return list(x for x in reversed(t) for _ in range(n))\n\n\nclass _ConvNd(layers.Layer):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n transposed,\n dims,\n stride=1,\n padding=0,\n padding_mode='zeros',\n output_padding=0,\n dilation=1,\n groups=1,\n weight_attr=None,\n bias_attr=None,\n data_format=\"NCHW\"):\n super(_ConvNd, self).__init__()\n assert weight_attr is not False, \"weight_attr should not be False in Conv.\"\n self._param_attr = weight_attr\n self._bias_attr = bias_attr\n self._groups = groups\n self._in_channels = in_channels\n self._out_channels = out_channels\n self._data_format = data_format\n\n valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}\n if padding_mode not in valid_padding_modes:\n raise ValueError(\n \"padding_mode must be one of {}, but got padding_mode='{}'\".\n format(valid_padding_modes, padding_mode))\n\n if padding_mode in {'reflect', 'replicate', 'circular'\n } and not isinstance(padding, np.int):\n raise TypeError(\n \"when padding_mode in ['reflect', 'replicate', 'circular'], type of padding must be int\"\n )\n\n channel_last = (data_format == \"NHWC\") or (data_format == \"NDHWC\") or (\n data_format == \"NLC\")\n if channel_last:\n self._channel_dim = len(data_format) - 1\n else:\n self._channel_dim = 1\n\n self._stride = utils.convert_to_list(stride, dims, 'stride')\n self._dilation = utils.convert_to_list(dilation, dims, 'dilation')\n self._kernel_size = utils.convert_to_list(kernel_size, dims,\n 'kernel_size')\n self._padding = padding\n self._padding_mode = padding_mode\n self.output_padding = output_padding\n if dims != 1:\n self._padding, self._padding_algorithm = _update_padding_nd(\n padding, channel_last, dims)\n\n if transposed:\n filter_shape = [self._in_channels, out_channels // groups\n ] + self._kernel_size\n self._padding, self._padding_algorithm = _update_padding_nd(\n padding, channel_last, dims)\n else:\n if in_channels % groups != 0:\n raise ValueError(\"in_channels must be divisible by groups.\")\n\n if padding_mode in {'reflect', 'replicate', 'circular'}:\n _paired_padding = utils.convert_to_list(padding, dims,\n 'padding')\n self._reversed_padding_repeated_twice = _reverse_repeat_list(\n _paired_padding, 2)\n\n self._padding, _ = _update_padding_nd(0, channel_last, dims)\n\n filter_shape = [out_channels, in_channels // groups\n ] + self._kernel_size\n\n def _get_default_param_initializer():\n if transposed:\n return None\n filter_elem_num = np.prod(self._kernel_size) * self._in_channels\n std = (2.0 / filter_elem_num)**0.5\n return Normal(0.0, std, 0)\n\n self.weight = self.create_parameter(\n shape=filter_shape,\n attr=self._param_attr,\n default_initializer=_get_default_param_initializer())\n self.bias = self.create_parameter(\n attr=self._bias_attr, shape=[self._out_channels], is_bias=True)\n\n cudnn_version = get_cudnn_version()\n\n self._use_cudnn = True if (core.is_compiled_with_cuda() and\n cudnn_version is not None) else False\n\n self._op_type = \"conv\" + str(dims) + 'd'\n if dims == 2 and (in_channels == groups and in_channels != 1 and\n out_channels % in_channels == 0):\n self.op_type = 'depthwise_conv2d'\n self._use_cudnn = False\n\n\nclass Conv1D(_ConvNd):\n r\"\"\"\n This interface is used to construct a callable object of the ``Conv1D`` class.\n For more details, refer to code examples.\n The convolution1D layer calculates the output based on the input, filter\n and stride, padding, dilation, groups parameters. Input and\n Output are in NCL format or NLC format, where N is batch size, C is the number of\n the feature map, L is the length of the feature map.\n Filter's shape is [MCK] , where M is the number of output feature map,\n C is the number of input feature map, K is the size of the kernel. \n If the groups is greater than 1, C will equal the number of input feature map divided by the groups.\n If bias attribution and activation type are provided, bias is added to the\n output of the convolution, and the corresponding activation function is\n applied to the final result.\n\n For each input :math:`X` , the equation is:\n\n .. math::\n\n Out = \\sigma (W \\\\ast X + b)\n\n Where:\n\n * :math:`X`: Input value, a ``Tensor`` with 'NCL' format or 'NLC' format.\n * :math:`W`: Filter value, a ``Tensor`` with shape [MCK] .\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n Example:\n\n - Input:\n\n Input shape: :math:`(N, C_{in}, L_{in})`\n\n Kernel shape: :math:`(C_{out}, C_{in}, K)`\n\n - Output:\n\n Output shape: :math:`(N, C_{out}, L_{out})`\n\n Where\n\n .. math::\n\n L_{out}&= \\\\frac{(L_{in} + 2 * padding - (dilation * (L_f - 1) + 1))}{stride} + 1\n\n Parameters:\n in_channels(int): The number of channels in the input image.\n out_channels(int): The number of filter. It is as same as the output\n feature map.\n kernel_size (int|tuple|list): The filter size. If kernel_size is a tuple,\n it must contain one integer, (kernel_size).\n stride (int|tuple|list, optional): The stride size. If stride is a tuple, it must\n contain one integer, (stride_size). Default: 1.\n padding(int|str|tuple|list, optional): The size of zeros to be padded. It must be in one of the following forms.\n 1. a string in ['valid', 'same'].\n 2. an int, which means the feature map is zero paded by size of `padding` on both sides.\n 3. a list[int] or tuple[int] whose length is 1, which means the feature map is zero paded by size of `padding[0]` on both sides.\n The default value is 0.\n dilation (int|tuple|list, optional): The dilation size. If dilation is a tuple, it must\n contain one integer, (dilation_size). Default: 1.\n groups (int, optional): The groups number of the conv2d Layer. According to grouped\n convolution in Alex Krizhevsky's Deep CNN paper: when group=2,\n the first half of the filters is only connected to the first half\n of the input channels, while the second half of the filters is only\n connected to the second half of the input channels. Default: 1.\n padding_mode(str, optional): Four modes: 'zeros', 'reflect', 'replicate', 'circular'.\n When in 'zeros' mode, this op uses zeros to pad the input tensor.\n When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.\n When in 'replicate' mode, uses input boundaries to pad the input tensor.\n When in 'circular' mode, uses circular input to pad the input tensor.\n Default is 'zeros'.\n weight_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)\n of conv1d. If it is set to None or one attribute of ParamAttr, conv1d\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with :math:`Normal(0.0, std)`,\n and the :math:`std` is :math:`(\\\\frac{2.0 }{filter\\_elem\\_num})^{0.5}`. Default: None.\n bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv1d.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv1d\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n\n Attribute:\n **weight** (Parameter): the learnable weights of filter of this layer.\n **bias** (Parameter or None): the learnable bias of this layer.\n\n Shape:\n - x: 3-D tensor with shape: (batch, in_channels, length) or (batch, length, in_channels).\n - output: 3-D tensor with same shape as input x.\n \n Raises:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle\n from paddle.nn import Conv1D\n import numpy as np\n x = np.array([[[4, 8, 1, 9],\n [7, 2, 0, 9],\n [6, 9, 2, 6]]]).astype(np.float32)\n w=np.array(\n [[[9, 3, 4],\n [0, 0, 7],\n [2, 5, 6]],\n [[0, 3, 4],\n [2, 9, 7],\n [5, 6, 8]]]).astype(np.float32)\n x_t = paddle.to_tensor(x)\n conv = Conv1D(3, 2, 3)\n conv.weight.set_value(w)\n y_t = conv(x_t)\n print(y_t)\n # [[[133. 238.]\n # [160. 211.]]]\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n padding_mode='zeros',\n weight_attr=None,\n bias_attr=None,\n data_format=\"NCL\"):\n super(Conv1D, self).__init__(\n in_channels,\n out_channels,\n kernel_size,\n False,\n 1,\n stride=stride,\n padding=padding,\n padding_mode=padding_mode,\n dilation=dilation,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=bias_attr,\n data_format=data_format)\n\n def forward(self, x):\n padding = 0\n if self._padding_mode != \"zeros\":\n x = F.pad(x,\n self._reversed_padding_repeated_twice,\n mode=self._padding_mode,\n data_format=self._data_format)\n else:\n padding = self._padding\n\n out = F.conv1d(\n x,\n self.weight,\n bias=self.bias,\n padding=padding,\n stride=self._stride,\n dilation=self._dilation,\n groups=self._groups,\n data_format=self._data_format)\n return out\n\n\nclass Conv1DTranspose(_ConvNd):\n r\"\"\"\n This interface is used to construct a callable object of the ``Conv1DTranspose`` class.\n For more details, refer to code examples.\n The 1-D convolution transpose layer calculates the output based on the input,\n filter, and dilation, stride, padding. Input(Input) and output(Output)\n are in 'NCL' format or 'NLC' where N is batch size, C is the number of channels,\n L is the length of the feature. The details of convolution transpose\n layer, please refer to the following explanation and references\n `therein <https://arxiv.org/pdf/1603.07285.pdf>`_.\n If bias attribution and activation type are provided, bias is added to\n the output of the convolution, and the corresponding activation function\n is applied to the final result.\n\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\sigma (W \\\\ast X + b)\n\n Where:\n\n * :math:`X`: Input value, a 3-D Tensor with 'NCL' format or 'NLC' format.\n * :math:`W`: Kernel value, a 3-D Tensor with 'MCK' format.\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, a 3-D Tensor with data format 'NCL' of 'NLC', the shape of :math:`Out` and :math:`X` may be different.\n\n Example:\n\n - Input:\n\n Input shape: :math:`(N, C_{in}, L_{in})`\n\n Filter shape: :math:`(C_{in}, C_{out}, L_f)`\n\n - Output:\n\n Output shape: :math:`(N, C_{out}, L_{out})`\n\n Where\n\n .. math::\n\n L^\\prime_{out} &= (L_{in} - 1) * stride - pad_top - pad_bottom + dilation * (L_f - 1) + 1 \\\\\\\\\n L_{out} &\\in [ L^\\prime_{out}, L^\\prime_{out} + stride ]\n\n Note:\n The conv1d_transpose can be seen as the backward of the conv1d. For conv1d,\n when stride > 1, conv1d maps multiple input shape to the same output shape,\n so for conv1d_transpose, when stride > 1, input shape maps multiple output shape.\n If output_size is None, :math:`L_{out} = L^\\prime_{out}`;\n else, the :math:`L_{out}` of the output size must between :math:`L^\\prime_{out}`\n and :math:`L^\\prime_{out} + stride`. conv1d_transpose can compute the kernel size automatically.\n\n Args:\n in_channels(int): The number of channels in the input image.\n out_channels(int): The number of the filter. It is as same as the output\n feature map.\n kernel_size(int|tuple|list, optional): The filter size. If kernel_size is a tuple,\n it must contain one integers, (kernel_size). None if\n use output size to calculate kernel_size. Default: None. kernel_size and\n output_size should not be None at the same time.\n stride(int|tuple|list, optional): The stride size. It means the stride in transposed convolution.\n If stride is a tuple, it must contain one integer, (stride_size).\n Default: stride = 1.\n padding(int|list|str|tuple, optional): The padding size. The padding argument effectively adds\n `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a\n string, either 'VALID' or 'SAME' supported, which is the padding algorithm.\n If `padding` is a tuple or list, it could be in two forms:\n `[pad]` or `[pad_left, pad_right]`. Default: padding = 0.\n output_padding(int|list|tuple, optional): The count of zeros to be added to tail of each dimension.\n If it is a tuple, it must contain one integer. Default: 0.\n groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by\n grouped convolution in Alex Krizhevsky's Deep CNN paper, in which\n when group=2, the first half of the filters is only connected to the\n first half of the input channels, while the second half of the\n filters is only connected to the second half of the input channels.\n Default: groups = 1.\n bias(bool, optional): Whether to use bias. Default: True.\n dilation(int|tuple|list, optional): The dilation size. It means the spacing between the kernel points.\n If dilation is a tuple, it must contain one integer, (dilation_size).\n Default: dilation = 1.\n weight_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights\n of conv1d_transpose. If it is set to None or one attribute of ParamAttr, conv1d_transpose\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. Default: None.\n bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv1d_transpose.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv1d_transpose\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n\n Attribute:\n **weight** (Parameter): the learnable weights of filters of this layer.\n **bias** (Parameter or None): the learnable bias of this layer.\n\n Shape:\n\n - x(Tensor): 3-D tensor with shape (batch, in_channels, length) when data_format is \"NCL\" or shape (batch, length, in_channels) when data_format is \"NLC\".\n - output_size(int|tuple|list, optional): The output image size. If output size is a tuple, it must contain one integer, (feature_length). None if use kernel_size, padding, output_padding and stride to calculate output_size. If output_size and kernel_size are specified at the same time, They should follow the formula above. Default: None. output_size and kernel_size should not be None at the same time.\n - output(Tensor): 3-D tensor with same shape as input x.\n\n Examples:\n .. code-block:: python\n\n import paddle\n from paddle.nn import Conv1DTranspose\n import numpy as np\n \n # shape: (1, 2, 4)\n x=np.array([[[4, 0, 9, 7],\n [8, 0, 9, 2]]]).astype(np.float32)\n # shape: (2, 1, 2)\n y=np.array([[[7, 0]],\n [[4, 2]]]).astype(np.float32)\n x_t = paddle.to_tensor(x)\n conv = Conv1DTranspose(2, 1, 2)\n conv.weight.set_value(y)\n y_t = conv(x_t)\n print(y_t)\n \n # [[[60. 16. 99. 75. 4.]]]\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n output_padding=0,\n groups=1,\n dilation=1,\n weight_attr=None,\n bias_attr=None,\n data_format=\"NCL\"):\n super(Conv1DTranspose, self).__init__(\n in_channels,\n out_channels,\n kernel_size,\n True,\n 1,\n stride=stride,\n padding=padding,\n dilation=dilation,\n output_padding=output_padding,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=bias_attr,\n data_format=data_format)\n\n def forward(self, x, output_size=None):\n out = F.conv1d_transpose(\n x,\n self.weight,\n bias=self.bias,\n output_size=output_size,\n output_padding=self.output_padding,\n padding=self._padding,\n stride=self._stride,\n dilation=self._dilation,\n groups=self._groups,\n data_format=self._data_format)\n return out\n\n\nclass Conv2D(_ConvNd):\n r\"\"\"\n This interface is used to construct a callable object of the ``Conv2D`` class.\n For more details, refer to code examples.\n The convolution2D layer calculates the output based on the input, filter\n and strides, paddings, dilations, groups parameters. Input and\n Output are in NCHW format, where N is batch size, C is the number of\n the feature map, H is the height of the feature map, and W is the width of the feature map.\n Filter's shape is [MCHW] , where M is the number of output feature map,\n C is the number of input feature map, H is the height of the filter,\n and W is the width of the filter. If the groups is greater than 1,\n C will equal the number of input feature map divided by the groups.\n Please refer to UFLDL's `convolution\n <http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_\n for more details.\n If bias attribution and activation type are provided, bias is added to the\n output of the convolution, and the corresponding activation function is\n applied to the final result.\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\sigma (W \\\\ast X + b)\n\n Where:\n\n * :math:`X`: Input value, a ``Tensor`` with NCHW format.\n * :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n \n Parameters:\n in_channels(int): The number of input channels in the input image.\n out_channels(int): The number of output channels produced by the convolution.\n kernel_size(int|list|tuple, optional): The size of the convolving kernel.\n stride(int|list|tuple, optional): The stride size. If stride is a tuple, it must\n contain three integers, (stride_H, stride_W). Otherwise, the\n stride_H = stride_W = stride. The default value is 1.\n padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.\n 1. a string in ['valid', 'same'].\n 2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` \n 3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].\n 4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.\n 5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).\n The default value is 0.\n dilation(int|list|tuple, optional): The dilation size. If dilation is a tuple, it must\n contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the\n dilation_D = dilation_H = dilation_W = dilation. The default value is 1.\n groups(int, optional): The groups number of the Conv3D Layer. According to grouped\n convolution in Alex Krizhevsky's Deep CNN paper: when group=2,\n the first half of the filters is only connected to the first half\n of the input channels, while the second half of the filters is only\n connected to the second half of the input channels. The default value is 1.\n padding_mode(str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``.\n weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights\n of conv2d. If it is set to None or one attribute of ParamAttr, conv2d\n will create ParamAttr as param_attr. If it is set to None, the parameter\n is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is\n :math:`(\\\\frac{2.0 }{filter\\_elem\\_num})^{0.5}`. The default value is None.\n bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv2d.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv2d\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. The default value is None.\n data_format(str, optional): Data format that specifies the layout of input.\n It can be \"NCHW\" or \"NHWC\". Default: \"NCHW\".\n\n Attribute:\n\n **weight** (Parameter): the learnable weights of filter of this layer.\n\n **bias** (Parameter or None): the learnable bias of this layer.\n\n Shape:\n\n - x: :math:`(N, C_{in}, H_{in}, W_{in})`\n\n - output: :math:`(N, C_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n H_{out}&= \\\\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (kernel\\_size[0] - 1) + 1))}{strides[0]} + 1\n\n W_{out}&= \\\\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (kernel\\_size[1] - 1) + 1))}{strides[1]} + 1\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import paddle.nn as nn\n \n paddle.disable_static()\n \n x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)\n \n conv = nn.Conv2D(4, 6, (3, 3))\n y_var = conv(x_var)\n y_np = y_var.numpy()\n print(y_np.shape)\n # (2, 6, 6, 6)\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n padding_mode='zeros',\n weight_attr=None,\n bias_attr=None,\n data_format=\"NCHW\"):\n super(Conv2D, self).__init__(\n in_channels,\n out_channels,\n kernel_size,\n False,\n 2,\n stride=stride,\n padding=padding,\n padding_mode=padding_mode,\n dilation=dilation,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=bias_attr,\n data_format=data_format)\n\n def forward(self, x):\n if self._padding_mode != 'zeros':\n x = F.pad(x,\n self._reversed_padding_repeated_twice,\n mode=self._padding_mode,\n data_format=self._data_format)\n\n out = F.conv._conv_nd(\n x,\n self.weight,\n bias=self.bias,\n stride=self._stride,\n padding=self._padding,\n padding_algorithm=self._padding_algorithm,\n dilation=self._dilation,\n groups=self._groups,\n data_format=self._data_format,\n channel_dim=self._channel_dim,\n op_type=self._op_type,\n use_cudnn=self._use_cudnn)\n return out\n\n\nclass Conv2DTranspose(_ConvNd):\n r\"\"\"\n This interface is used to construct a callable object of the ``Conv2DTranspose`` class.\n For more details, refer to code examples.\n The convolution2D transpose layer calculates the output based on the input,\n filter, and dilations, strides, paddings. Input and output\n are in NCHW format. Where N is batch size, C is the number of feature map,\n H is the height of the feature map, and W is the width of the feature map.\n Filter's shape is [MCHW] , where M is the number of input feature map,\n C is the number of output feature map, H is the height of the filter,\n and W is the width of the filter. If the groups is greater than 1,\n C will equal the number of input feature map divided by the groups.\n If bias attribution and activation type are provided, bias is added to\n the output of the convolution, and the corresponding activation function\n is applied to the final result.\n The details of convolution transpose layer, please refer to the following explanation and references\n `conv2dtranspose <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_ .\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\sigma (W \\\\ast X + b)\n\n Where:\n\n * :math:`X`: Input value, a ``Tensor`` with NCHW format.\n * :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n \n Parameters:\n in_channels(int): The number of channels in the input image.\n out_channels(int): The number of channels produced by the convolution.\n kernel_size(int|list|uple): The kernel size. If kernel_size is a tuple,\n it must contain two integers, (kernel_size_H, kernel_size_W).\n Otherwise, the kernel will be a square.\n stride(int|list|tuple, optional): The stride size. If stride is a tuple, it must\n contain two integers, (stride_H, stride_W). Otherwise, the\n stride_H = stride_W = stride. Default: 1.\n padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.\n 1. a string in ['valid', 'same'].\n 2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` on both sides \n 3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].\n 4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.\n 5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).\n The default value is 0.\n output_padding(int|list|tuple, optional): Additional size added to one side\n of each dimension in the output shape. Default: 0.\n dilation(int|list|tuple, optional): The dilation size. If dilation is a tuple, it must\n contain two integers, (dilation_H, dilation_W). Otherwise, the\n dilation_H = dilation_W = dilation. Default: 1.\n groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by\n grouped convolution in Alex Krizhevsky's Deep CNN paper, in which\n when group=2, the first half of the filters is only connected to the\n first half of the input channels, while the second half of the\n filters is only connected to the second half of the input channels.\n Default: 1.\n weight_attr(ParamAttr, optional): The parameter attribute for learnable weights(Parameter)\n of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. Default: None.\n bias_attr(ParamAttr|bool, optional): The attribute for the bias of conv2d_transpose.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv2d_transpose\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n data_format(str, optional): Data format that specifies the layout of input.\n It can be \"NCHW\" or \"NHWC\". Default: \"NCHW\".\n\n Attribute:\n\n **weight** (Parameter): the learnable weights of filters of this layer.\n\n **bias** (Parameter or None): the learnable bias of this layer.\n\n Shape:\n\n - x: :math:`(N, C_{in}, H_{in}, W_{in})`\n\n - output: :math:`(N, C_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n H^\\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (kernel\\_size[0] - 1) + 1\n\n W^\\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (kernel\\_size[1] - 1) + 1\n\n H_{out} &\\in [ H^\\prime_{out}, H^\\prime_{out} + strides[0] )\n\n W_{out} &\\in [ W^\\prime_{out}, W^\\prime_{out} + strides[1] )\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import paddle.nn as nn\n \n paddle.disable_static()\n\n x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)\n\n conv = nn.Conv2DTranspose(4, 6, (3, 3))\n y_var = conv(x_var)\n y_np = y_var.numpy()\n print(y_np.shape)\n # (2, 6, 10, 10)\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n output_padding=0,\n dilation=1,\n groups=1,\n weight_attr=None,\n bias_attr=None,\n data_format=\"NCHW\"):\n super(Conv2DTranspose, self).__init__(\n in_channels,\n out_channels,\n kernel_size,\n True,\n 2,\n stride=stride,\n padding=padding,\n dilation=dilation,\n output_padding=output_padding,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=bias_attr,\n data_format=data_format)\n\n def forward(self, x, output_size=None):\n if output_size is None:\n output_padding = self.output_padding\n else:\n output_padding = 0\n\n out = F.conv2d_transpose(\n x,\n self.weight,\n bias=self.bias,\n padding=self._padding,\n output_padding=output_padding,\n stride=self._stride,\n dilation=self._dilation,\n groups=self._groups,\n output_size=output_size,\n data_format=self._data_format)\n return out\n\n\nclass Conv3D(_ConvNd):\n r\"\"\"\n **Convlution3d Layer**\n The convolution3d layer calculates the output based on the input, filter\n and strides, paddings, dilations, groups parameters. Input(Input) and\n Output(Output) are multidimensional tensors with a shape of \n :math:`[N, C, D, H, W]` . Where N is batch size, C is the number of\n channels, D is the depth of the feature, H is the height of the feature,\n and W is the width of the feature. Convlution3D is similar with Convlution2D\n but adds one dimension(depth). If bias attribution and activation type are\n provided, bias is added to the output of the convolution, and the\n corresponding activation function is applied to the final result.\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\sigma (W \\\\ast X + b)\n\n In the above equation:\n\n * :math:`X`: Input value, a tensor with NCDHW or NDHWC format.\n * :math:`W`: Filter value, a tensor with MCDHW format.\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 2-D tensor with shape [M, 1].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n Parameters:\n in_channels(int): The number of input channels in the input image.\n out_channels(int): The number of output channels produced by the convolution.\n kernel_size(int|list|tuple, optional): The size of the convolving kernel.\n stride(int|list|tuple, optional): The stride size. If stride is a tuple, it must\n contain three integers, (stride_D, stride_H, stride_W). Otherwise, the\n stride_D = stride_H = stride_W = stride. The default value is 1.\n padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.\n 1. a string in ['valid', 'same'].\n 2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` \n 3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].\n 4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.\n 5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).\n The default value is 0.\n dilation(int|list|tuple, optional): The dilation size. If dilation is a tuple, it must\n contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the\n dilation_D = dilation_H = dilation_W = dilation. The default value is 1.\n groups(int, optional): The groups number of the Conv3D Layer. According to grouped\n convolution in Alex Krizhevsky's Deep CNN paper: when group=2,\n the first half of the filters is only connected to the first half\n of the input channels, while the second half of the filters is only\n connected to the second half of the input channels. The default value is 1.\n padding_mode(str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``.\n weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights\n of conv3d. If it is set to None or one attribute of ParamAttr, conv3d\n will create ParamAttr as param_attr. If it is set to None, the parameter\n is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is\n :math:`(\\\\frac{2.0 }{filter\\_elem\\_num})^{0.5}`. The default value is None.\n bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv3d.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv3d\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. The default value is None.\n data_format(str, optional): Data format that specifies the layout of input.\n It can be \"NCDHW\" or \"NDHWC\". Default: \"NCDHW\".\n\n Attribute:\n\n **weight** (Parameter): the learnable weights of filters of this layer.\n\n **bias** (Parameter): the learnable bias of this layer.\n\n Shape:\n\n - x: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`\n\n - output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n D_{out}&= \\\\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (kernel\\_size[0] - 1) + 1))}{strides[0]} + 1\n\n H_{out}&= \\\\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (kernel\\_size[1] - 1) + 1))}{strides[1]} + 1\n\n W_{out}&= \\\\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (kernel\\_size[2] - 1) + 1))}{strides[2]} + 1\n\n Raises:\n ValueError: If the shapes of input, filter_size, stride, padding and\n groups mismatch.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import paddle.nn as nn\n \n paddle.disable_static()\n\n x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.)\n \n conv = nn.Conv3D(4, 6, (3, 3, 3))\n y_var = conv(x_var)\n y_np = y_var.numpy()\n print(y_np.shape)\n # (2, 6, 6, 6, 6)\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n padding_mode='zeros',\n weight_attr=None,\n bias_attr=None,\n data_format=\"NCDHW\"):\n super(Conv3D, self).__init__(\n in_channels,\n out_channels,\n kernel_size,\n False,\n 3,\n stride=stride,\n padding=padding,\n padding_mode=padding_mode,\n dilation=dilation,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=bias_attr,\n data_format=data_format)\n\n def forward(self, x):\n if self._padding_mode != 'zeros':\n x = F.pad(x,\n self._reversed_padding_repeated_twice,\n mode=self._padding_mode,\n data_format=self._data_format)\n\n out = F.conv._conv_nd(\n x,\n self.weight,\n bias=self.bias,\n stride=self._stride,\n padding=self._padding,\n padding_algorithm=self._padding_algorithm,\n dilation=self._dilation,\n groups=self._groups,\n data_format=self._data_format,\n channel_dim=self._channel_dim,\n op_type=self._op_type,\n use_cudnn=self._use_cudnn)\n return out\n\n\nclass Conv3DTranspose(_ConvNd):\n r\"\"\"\n **Convlution3D transpose layer**\n The convolution3D transpose layer calculates the output based on the input,\n filter, and dilations, strides, paddings. Input(Input) and output(Output)\n are in NCDHW format. Where N is batch size, C is the number of channels,\n D is the depth of the feature, H is the height of the feature, and W\n is the width of the feature. Parameters(dilations, strides, paddings) are\n two elements. These two elements represent height and width, respectively.\n The details of convolution transpose layer, please refer to the following\n explanation and references `therein <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_.\n If bias attribution and activation type are provided, bias is added to\n the output of the convolution, and the corresponding activation function\n is applied to the final result.\n For each input :math:`X`, the equation is:\n \n .. math::\n\n Out = \\sigma (W \\\\ast X + b)\n\n In the above equation:\n\n * :math:`X`: Input value, a tensor with NCDHW format.\n * :math:`W`: Filter value, a tensor with MCDHW format.\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 2-D tensor with shape [M, 1].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n **Note**:\n\n The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,\n when stride > 1, conv3d maps multiple input shape to the same output shape, \n so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.\n If output_size is None, :math:`H_{out} = H^\\prime_{out}, :math:`H_{out} = \\\n H^\\prime_{out}, W_{out} = W^\\prime_{out}`; else, the :math:`D_{out}` of the output \n size must between :math:`D^\\prime_{out}` and :math:`D^\\prime_{out} + strides[0]`, \n the :math:`H_{out}` of the output size must between :math:`H^\\prime_{out}` \n and :math:`H^\\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must \n between :math:`W^\\prime_{out}` and :math:`W^\\prime_{out} + strides[2]`, \n conv3d_transpose can compute the kernel size automatically.\n\n Parameters:\n in_channels(int): The number of channels in the input image.\n out_channels(int): The number of channels produced by the convolution.\n kernel_size(int|list|tuple): The kernel size. If kernel_size is a tuple,\n it must contain three integers, (kernel_size_D, kernel_size_H, kernel_size_W).\n Otherwise, the kernel will be a square.\n stride(int|list|tuple, optional): The stride size. It means the stride in transposed convolution. \n If stride is a tuple, it must contain three integers, (stride_depth, stride_height, \n stride_width). Otherwise, stride_depth = stride_height = stride_width = stride. \n The default value is 1.\n padding(int|str|tuple|list, optional): The padding size. Padding coule be in one of the following forms.\n 1. a string in ['valid', 'same'].\n 2. an int, which means each spartial dimension(depth, height, width) is zero paded by size of `padding` \n 3. a list[int] or tuple[int] whose length is the number of spartial dimensions, which contains the amount of padding on each side for each spartial dimension. It has the form [pad_d1, pad_d2, ...].\n 4. a list[int] or tuple[int] whose length is 2 * number of spartial dimensions. It has the form [pad_before, pad_after, pad_before, pad_after, ...] for all spartial dimensions.\n 5. a list or tuple of pairs of ints. It has the form [[pad_before, pad_after], [pad_before, pad_after], ...]. Note that, the batch dimension and channel dimension are also included. Each pair of integers correspond to the amount of padding for a dimension of the input. Padding in batch dimension and channel dimension should be [0, 0] or (0, 0).\n The default value is 0.\n output_padding(int|list|tuple, optional): Additional size added to one side\n of each dimension in the output shape. Default: 0.\n dilation(int|list|tuple, optional): The dilation size. If dilation is a tuple, it must\n contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the\n dilation_D = dilation_H = dilation_W = dilation. The default value is 1.\n groups(int, optional): The groups number of the Conv3D transpose layer. Inspired by\n grouped convolution in Alex Krizhevsky's Deep CNN paper, in which\n when group=2, the first half of the filters is only connected to the\n first half of the input channels, while the second half of the\n filters is only connected to the second half of the input channels.\n The default value is 1.\n weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights\n of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. The default value is None.\n bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv3d_transpose\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. The default value is None.\n output_size(int|list|tuple, optional): The output image size. If output size is a\n tuple, it must contain two integers, (image_H, image_W). None if use\n filter_size, padding, and stride to calculate output_size.\n if output_size and filter_size are specified at the same time, They\n should follow the formula above. Default: None.\n data_format(str, optional): Data format that specifies the layout of input.\n It can be \"NCDHW\" or \"NDHWC\". Default: \"NCDHW\".\n\n Attribute:\n\n **weight** (Parameter): the learnable weights of filters of this layer.\n\n **bias** (Parameter): the learnable bias of this layer.\n\n Shape:\n\n - x: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`\n\n - output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n D^\\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (kernel\\_size[0] - 1) + 1\n \n H^\\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (kernel\\_size[1] - 1) + 1\n \n W^\\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (kernel\\_size[2] - 1) + 1\n \n Raises:\n ValueError: If the shapes of input, filter_size, stride, padding and\n groups mismatch.\n Examples:\n\n .. code-block:: python\n\n import paddle\n import paddle.nn as nn\n \n paddle.disable_static()\n\n x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.)\n \n conv = nn.Conv3DTranspose(4, 6, (3, 3, 3))\n y_var = conv(x_var)\n y_np = y_var.numpy()\n print(y_np.shape)\n # (2, 6, 10, 10, 10)\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n output_padding=0,\n dilation=1,\n groups=1,\n weight_attr=None,\n bias_attr=None,\n data_format=\"NCDHW\"):\n super(Conv3DTranspose, self).__init__(\n in_channels,\n out_channels,\n kernel_size,\n True,\n 3,\n stride=stride,\n padding=padding,\n dilation=dilation,\n output_padding=output_padding,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=bias_attr,\n data_format=data_format)\n\n def forward(self, x, output_size=None):\n if output_size is None:\n output_padding = self.output_padding\n else:\n output_padding = 0\n\n out = F.conv3d_transpose(\n x,\n self.weight,\n bias=self.bias,\n padding=self._padding,\n output_padding=output_padding,\n stride=self._stride,\n dilation=self._dilation,\n groups=self._groups,\n output_size=output_size,\n data_format=self._data_format)\n return out\n" ]
[ [ "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Kunind/building-controls-simulator
[ "7d3b74539233cfe5e2983f2f7554afd1bbbacbe9" ]
[ "src/python/BuildingControlsSimulator/DataClients/GCSDYDSource.py" ]
[ "# created by Tom Stesco [email protected]\n\nimport logging\n\nimport attr\nimport pandas as pd\nimport numpy as np\nimport gcsfs\n\nfrom BuildingControlsSimulator.DataClients.GCSDataSource import GCSDataSource\nfrom BuildingControlsSimulator.DataClients.DataSpec import DonateYourDataSpec\n\n\nlogger = logging.getLogger(__name__)\ngcsfs_logger = logging.getLogger(\"gcsfs\")\ngcsfs_logger.setLevel(logging.WARN)\n\n\[email protected](kw_only=True)\nclass GCSDYDSource(GCSDataSource):\n\n data_spec = attr.ib(factory=DonateYourDataSpec)\n file_extension = attr.ib(default=\"csv.zip\")\n source_name = attr.ib(default=\"GCSDYD\")\n meta_gcs_uri = attr.ib(default=None)\n\n def get_metadata(self):\n if self.meta_gcs_uri:\n _fs = gcsfs.GCSFileSystem(\n project=self.gcp_project,\n token=self.gcs_token,\n access=\"read_only\",\n )\n with _fs.open(self.meta_gcs_uri) as _file:\n _df = pd.read_csv(_file).drop_duplicates(subset=[\"Identifier\"])\n\n else:\n raise ValueError(\"Must supply `meta_gcs_uri` to dataclient.\")\n\n return _df\n\n def get_gcs_uri(self, sim_config):\n # first cast to utc timestamp\n # DYD uses UTC\n start_utc = pd.to_datetime(\n sim_config[\"start_utc\"], utc=True, infer_datetime_format=True\n )\n end_utc = pd.to_datetime(\n sim_config[\"end_utc\"], utc=True, infer_datetime_format=True\n )\n\n if isinstance(start_utc, pd.Timestamp):\n start_year = start_utc.year\n else:\n start_year = start_utc.dt.year\n\n if isinstance(end_utc, pd.Timestamp):\n end_year = end_utc.year\n else:\n end_year = end_utc.dt.year\n\n # supporting cross year simulations would require loading both years\n if np.any(end_year != start_year):\n raise ValueError(\"start_utc must be in same year as end_utc.\")\n\n years_supported = [2016, 2017, 2018, 2019]\n if start_year not in years_supported:\n raise ValueError(\n f\"start_utc must be in supported years: {years_supported}\"\n )\n\n return (\n self.gcs_uri_base\n + \"/\"\n + str(start_year)\n + \"/\"\n + sim_config[\"identifier\"]\n + \".\"\n + self.file_extension\n )\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "numpy.any" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
ah4d1/anoapycore
[ "b530a7fd97e0f97659b3936733db8bc906efaa3e" ]
[ "src/anoapycore/data/__init__.py" ]
[ "from . import column\nfrom . import load\nfrom . import null\nfrom . import row\nfrom . import save\nfrom . import series\nfrom . import stat\nfrom . import value\n\nimport pandas as __pd\nfrom sklearn.preprocessing import MinMaxScaler as __minmax\nfrom sklearn.preprocessing import StandardScaler as __standard\nfrom imblearn.over_sampling import SMOTE as __smote\n\nimport anoapycore as __ap\n\ndef array_to_df (a_array,b_as_column='') :\n \"\"\"\n This will convert array to pandas dataframe\n use [] for b_as_column\n \"\"\"\n if b_as_column == '' :\n loc_result = __pd.DataFrame(data=a_array)\n else :\n loc_result = __pd.DataFrame(data=a_array,columns=b_as_column)\n return loc_result\n\ndef array_to_str (a_array,b_delimiter=' ') :\n return b_delimiter.join(a_array)\n\ndef copy (a_data) :\n \"\"\"\n This function is aimed to copy one dataframe to another dataframe.\n This will prevent a dataframe to be affected by another dataframe.\n \"\"\"\n return a_data.copy()\n\ndef dict_to_array (a_dict) :\n return list(a_dict.items())\n\ndef df_to_array (a_data) :\n return a_data.to_numpy()\n\ndef deselect (a_data,a_column) :\n \"\"\"\n Not to select a_column in a_data\n Get remaining columns\n Use [] in a_column\n \"\"\"\n loc_data = a_data.drop(a_column,axis = 1) \n return loc_data\n\ndef dimension (a_data) :\n print (str(row.count(a_data)) + ' rows x ' + str(column.count(a_data)) + ' columns')\n \ndef groupby (a_data,a_column,b_method='count') :\n if b_method == 'count' :\n loc_result = a_data.groupby(a_column).count() \n elif b_method == 'mean' :\n loc_result = a_data.groupby(a_column).mean() \n return loc_result\n # for future dev : \n # from collections import Counter\n # print(sorted(Counter(a_data[a_column]).items()))\n \ndef info (a_data) :\n return a_data.info()\n \ndef map (a_data,a_column,a_old,a_new) :\n \"\"\"\n Map value a_old of a_column in a_data with a_new\n Use [] in a_old and a_new\n a_new must match in length with a_old\n \"\"\"\n loc_new_data = a_data\n a_data[a_column].replace(a_old,a_new,inplace=True)\n\ndef merge (*a_data) :\n \"\"\"\n Merge dataframes by index\n \"\"\"\n i = 0\n for loc_data in a_data :\n i += 1\n if i == 1 :\n loc_new_df = loc_data\n else :\n loc_new_df = __pd.merge(loc_new_df,loc_data,left_index=True,right_index=True)\n return loc_new_df\n\ndef normalize (a_data,a_column,b_method='MinMax') :\n \"\"\"\n This function is aimed to normalize data.\n Use [] when passing parameter to a_column.\n Options for b_method = 'MinMax' (default),'Standard'\n Return directly to a_data[a_column]\n \"\"\"\n if b_method == 'MinMax' :\n loc_scaler = __minmax()\n a_data[a_column] = loc_scaler.fit_transform(a_data[a_column])\n elif b_method == 'Standard' :\n loc_scaler = __standard()\n a_data[a_column] = loc_scaler.fit_transform(a_data[a_column])\n \ndef unique (a_data,a_column) :\n \"\"\"\n Get unique value of a_column in a_data (for int or float data type only)\n \"\"\"\n return list(__np.unique(a_data[a_column]))\n \ndef sample (a_data,a_row=5) :\n return a_data.head(a_row)\n\ndef select (a_data,a_column) :\n \"\"\"\n Select a_column in a_data\n Use [] in a_column\n \"\"\"\n return a_data[a_column]\n \ndef show (a_data,a_index_begin,a_index_end) :\n x = 0\n for i in range(0,len(a_data)) :\n if i >= a_index_begin and i <= a_index_end :\n x += 1\n loc_this_df = __ap.data.row.index(a_data=a_data,a_index=i)\n if x == 1 :\n loc_new_data = loc_this_df\n else :\n loc_new_data = __ap.data.union(loc_new_data,loc_this_df)\n return loc_new_data\n \ndef smote (a_x,a_y) :\n loc_smote = __smote()\n loc_x = __ap.data.df_to_array(a_x)\n loc_y = __ap.data.df_to_array(a_y)\n loc_x_smote,loc_y_smote = loc_smote.fit_resample(loc_x,loc_y)\n loc_x_smote = __ap.data.array_to_df(loc_x_smote)\n loc_y_smote = __ap.data.array_to_df(loc_y_smote)\n return loc_x_smote,loc_y_smote\n \ndef union (*a_data) :\n x = 0\n for loc_data in a_data :\n x += 1\n if x == 1 :\n loc_new_data = loc_data\n else :\n loc_new_data = __pd.concat([loc_new_data,loc_data])\n return loc_new_data\n \n \n \n \n \n" ]
[ [ "pandas.merge", "pandas.concat", "pandas.DataFrame", "sklearn.preprocessing.StandardScaler", "sklearn.preprocessing.MinMaxScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
lwhluvdemo/PaddleDetection
[ "99c7e3a75a955b4a4cb038679c8f88e170bb3b44" ]
[ "ppdet/modeling/layers.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport six\nimport numpy as np\nfrom numbers import Integral\n\nimport paddle\nimport paddle.nn as nn\nfrom paddle import ParamAttr\nfrom paddle import to_tensor\nfrom paddle.nn import Conv2D, BatchNorm2D, GroupNorm\nimport paddle.nn.functional as F\nfrom paddle.nn.initializer import Normal, Constant, XavierUniform\nfrom paddle.regularizer import L2Decay\n\nfrom ppdet.core.workspace import register, serializable\nfrom ppdet.modeling.bbox_utils import delta2bbox\nfrom . import ops\n\nfrom paddle.vision.ops import DeformConv2D\n\n\ndef _to_list(l):\n if isinstance(l, (list, tuple)):\n return list(l)\n return [l]\n\n\nclass DeformableConvV2(nn.Layer):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n weight_attr=None,\n bias_attr=None,\n lr_scale=1,\n regularizer=None,\n skip_quant=False,\n dcn_bias_regularizer=L2Decay(0.),\n dcn_bias_lr_scale=2.):\n super(DeformableConvV2, self).__init__()\n self.offset_channel = 2 * kernel_size**2\n self.mask_channel = kernel_size**2\n\n if lr_scale == 1 and regularizer is None:\n offset_bias_attr = ParamAttr(initializer=Constant(0.))\n else:\n offset_bias_attr = ParamAttr(\n initializer=Constant(0.),\n learning_rate=lr_scale,\n regularizer=regularizer)\n self.conv_offset = nn.Conv2D(\n in_channels,\n 3 * kernel_size**2,\n kernel_size,\n stride=stride,\n padding=(kernel_size - 1) // 2,\n weight_attr=ParamAttr(initializer=Constant(0.0)),\n bias_attr=offset_bias_attr)\n if skip_quant:\n self.conv_offset.skip_quant = True\n\n if bias_attr:\n # in FCOS-DCN head, specifically need learning_rate and regularizer\n dcn_bias_attr = ParamAttr(\n initializer=Constant(value=0),\n regularizer=dcn_bias_regularizer,\n learning_rate=dcn_bias_lr_scale)\n else:\n # in ResNet backbone, do not need bias\n dcn_bias_attr = False\n self.conv_dcn = DeformConv2D(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=(kernel_size - 1) // 2 * dilation,\n dilation=dilation,\n groups=groups,\n weight_attr=weight_attr,\n bias_attr=dcn_bias_attr)\n\n def forward(self, x):\n offset_mask = self.conv_offset(x)\n offset, mask = paddle.split(\n offset_mask,\n num_or_sections=[self.offset_channel, self.mask_channel],\n axis=1)\n mask = F.sigmoid(mask)\n y = self.conv_dcn(x, offset, mask=mask)\n return y\n\n\nclass ConvNormLayer(nn.Layer):\n def __init__(self,\n ch_in,\n ch_out,\n filter_size,\n stride,\n groups=1,\n norm_type='bn',\n norm_decay=0.,\n norm_groups=32,\n use_dcn=False,\n bias_on=False,\n lr_scale=1.,\n freeze_norm=False,\n initializer=Normal(\n mean=0., std=0.01),\n skip_quant=False,\n dcn_lr_scale=2.,\n dcn_regularizer=L2Decay(0.)):\n super(ConvNormLayer, self).__init__()\n assert norm_type in ['bn', 'sync_bn', 'gn']\n\n if bias_on:\n bias_attr = ParamAttr(\n initializer=Constant(value=0.), learning_rate=lr_scale)\n else:\n bias_attr = False\n\n if not use_dcn:\n self.conv = nn.Conv2D(\n in_channels=ch_in,\n out_channels=ch_out,\n kernel_size=filter_size,\n stride=stride,\n padding=(filter_size - 1) // 2,\n groups=groups,\n weight_attr=ParamAttr(\n initializer=initializer, learning_rate=1.),\n bias_attr=bias_attr)\n if skip_quant:\n self.conv.skip_quant = True\n else:\n # in FCOS-DCN head, specifically need learning_rate and regularizer\n self.conv = DeformableConvV2(\n in_channels=ch_in,\n out_channels=ch_out,\n kernel_size=filter_size,\n stride=stride,\n padding=(filter_size - 1) // 2,\n groups=groups,\n weight_attr=ParamAttr(\n initializer=initializer, learning_rate=1.),\n bias_attr=True,\n lr_scale=dcn_lr_scale,\n regularizer=dcn_regularizer,\n skip_quant=skip_quant)\n\n norm_lr = 0. if freeze_norm else 1.\n param_attr = ParamAttr(\n learning_rate=norm_lr,\n regularizer=L2Decay(norm_decay) if norm_decay is not None else None)\n bias_attr = ParamAttr(\n learning_rate=norm_lr,\n regularizer=L2Decay(norm_decay) if norm_decay is not None else None)\n if norm_type == 'bn':\n self.norm = nn.BatchNorm2D(\n ch_out, weight_attr=param_attr, bias_attr=bias_attr)\n elif norm_type == 'sync_bn':\n self.norm = nn.SyncBatchNorm(\n ch_out, weight_attr=param_attr, bias_attr=bias_attr)\n elif norm_type == 'gn':\n self.norm = nn.GroupNorm(\n num_groups=norm_groups,\n num_channels=ch_out,\n weight_attr=param_attr,\n bias_attr=bias_attr)\n\n def forward(self, inputs):\n out = self.conv(inputs)\n out = self.norm(out)\n return out\n\n\nclass LiteConv(nn.Layer):\n def __init__(self,\n in_channels,\n out_channels,\n stride=1,\n with_act=True,\n norm_type='sync_bn',\n name=None):\n super(LiteConv, self).__init__()\n self.lite_conv = nn.Sequential()\n conv1 = ConvNormLayer(\n in_channels,\n in_channels,\n filter_size=5,\n stride=stride,\n groups=in_channels,\n norm_type=norm_type,\n initializer=XavierUniform())\n conv2 = ConvNormLayer(\n in_channels,\n out_channels,\n filter_size=1,\n stride=stride,\n norm_type=norm_type,\n initializer=XavierUniform())\n conv3 = ConvNormLayer(\n out_channels,\n out_channels,\n filter_size=1,\n stride=stride,\n norm_type=norm_type,\n initializer=XavierUniform())\n conv4 = ConvNormLayer(\n out_channels,\n out_channels,\n filter_size=5,\n stride=stride,\n groups=out_channels,\n norm_type=norm_type,\n initializer=XavierUniform())\n conv_list = [conv1, conv2, conv3, conv4]\n self.lite_conv.add_sublayer('conv1', conv1)\n self.lite_conv.add_sublayer('relu6_1', nn.ReLU6())\n self.lite_conv.add_sublayer('conv2', conv2)\n if with_act:\n self.lite_conv.add_sublayer('relu6_2', nn.ReLU6())\n self.lite_conv.add_sublayer('conv3', conv3)\n self.lite_conv.add_sublayer('relu6_3', nn.ReLU6())\n self.lite_conv.add_sublayer('conv4', conv4)\n if with_act:\n self.lite_conv.add_sublayer('relu6_4', nn.ReLU6())\n\n def forward(self, inputs):\n out = self.lite_conv(inputs)\n return out\n\n\n@register\n@serializable\nclass AnchorGeneratorSSD(object):\n def __init__(self,\n steps=[8, 16, 32, 64, 100, 300],\n aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],\n min_ratio=15,\n max_ratio=90,\n base_size=300,\n min_sizes=[30.0, 60.0, 111.0, 162.0, 213.0, 264.0],\n max_sizes=[60.0, 111.0, 162.0, 213.0, 264.0, 315.0],\n offset=0.5,\n flip=True,\n clip=False,\n min_max_aspect_ratios_order=False):\n self.steps = steps\n self.aspect_ratios = aspect_ratios\n self.min_ratio = min_ratio\n self.max_ratio = max_ratio\n self.base_size = base_size\n self.min_sizes = min_sizes\n self.max_sizes = max_sizes\n self.offset = offset\n self.flip = flip\n self.clip = clip\n self.min_max_aspect_ratios_order = min_max_aspect_ratios_order\n\n if self.min_sizes == [] and self.max_sizes == []:\n num_layer = len(aspect_ratios)\n step = int(\n math.floor(((self.max_ratio - self.min_ratio)) / (num_layer - 2\n )))\n for ratio in six.moves.range(self.min_ratio, self.max_ratio + 1,\n step):\n self.min_sizes.append(self.base_size * ratio / 100.)\n self.max_sizes.append(self.base_size * (ratio + step) / 100.)\n self.min_sizes = [self.base_size * .10] + self.min_sizes\n self.max_sizes = [self.base_size * .20] + self.max_sizes\n\n self.num_priors = []\n for aspect_ratio, min_size, max_size in zip(\n aspect_ratios, self.min_sizes, self.max_sizes):\n if isinstance(min_size, (list, tuple)):\n self.num_priors.append(\n len(_to_list(min_size)) + len(_to_list(max_size)))\n else:\n self.num_priors.append((len(aspect_ratio) * 2 + 1) * len(\n _to_list(min_size)) + len(_to_list(max_size)))\n\n def __call__(self, inputs, image):\n boxes = []\n for input, min_size, max_size, aspect_ratio, step in zip(\n inputs, self.min_sizes, self.max_sizes, self.aspect_ratios,\n self.steps):\n box, _ = ops.prior_box(\n input=input,\n image=image,\n min_sizes=_to_list(min_size),\n max_sizes=_to_list(max_size),\n aspect_ratios=aspect_ratio,\n flip=self.flip,\n clip=self.clip,\n steps=[step, step],\n offset=self.offset,\n min_max_aspect_ratios_order=self.min_max_aspect_ratios_order)\n boxes.append(paddle.reshape(box, [-1, 4]))\n return boxes\n\n\n@register\n@serializable\nclass RCNNBox(object):\n __shared__ = ['num_classes']\n\n def __init__(self,\n prior_box_var=[10., 10., 5., 5.],\n code_type=\"decode_center_size\",\n box_normalized=False,\n num_classes=80):\n super(RCNNBox, self).__init__()\n self.prior_box_var = prior_box_var\n self.code_type = code_type\n self.box_normalized = box_normalized\n self.num_classes = num_classes\n\n def __call__(self, bbox_head_out, rois, im_shape, scale_factor):\n bbox_pred = bbox_head_out[0]\n cls_prob = bbox_head_out[1]\n roi = rois[0]\n rois_num = rois[1]\n\n origin_shape = paddle.floor(im_shape / scale_factor + 0.5)\n scale_list = []\n origin_shape_list = []\n for idx, roi_per_im in enumerate(roi):\n rois_num_per_im = rois_num[idx]\n expand_im_shape = paddle.expand(im_shape[idx, :],\n [rois_num_per_im, 2])\n origin_shape_list.append(expand_im_shape)\n\n origin_shape = paddle.concat(origin_shape_list)\n\n # bbox_pred.shape: [N, C*4]\n # C=num_classes in faster/mask rcnn(bbox_head), C=1 in cascade rcnn(cascade_head)\n bbox = paddle.concat(roi)\n if bbox.shape[0] == 0:\n bbox = paddle.zeros([0, bbox_pred.shape[1]], dtype='float32')\n else:\n bbox = delta2bbox(bbox_pred, bbox, self.prior_box_var)\n scores = cls_prob[:, :-1]\n\n # bbox.shape: [N, C, 4]\n # bbox.shape[1] must be equal to scores.shape[1]\n bbox_num_class = bbox.shape[1]\n if bbox_num_class == 1:\n bbox = paddle.tile(bbox, [1, self.num_classes, 1])\n\n origin_h = paddle.unsqueeze(origin_shape[:, 0], axis=1)\n origin_w = paddle.unsqueeze(origin_shape[:, 1], axis=1)\n zeros = paddle.zeros_like(origin_h)\n x1 = paddle.maximum(paddle.minimum(bbox[:, :, 0], origin_w), zeros)\n y1 = paddle.maximum(paddle.minimum(bbox[:, :, 1], origin_h), zeros)\n x2 = paddle.maximum(paddle.minimum(bbox[:, :, 2], origin_w), zeros)\n y2 = paddle.maximum(paddle.minimum(bbox[:, :, 3], origin_h), zeros)\n bbox = paddle.stack([x1, y1, x2, y2], axis=-1)\n bboxes = (bbox, rois_num)\n return bboxes, scores\n\n\n@register\n@serializable\nclass MultiClassNMS(object):\n def __init__(self,\n score_threshold=.05,\n nms_top_k=-1,\n keep_top_k=100,\n nms_threshold=.5,\n normalized=True,\n nms_eta=1.0,\n return_index=False,\n return_rois_num=True):\n super(MultiClassNMS, self).__init__()\n self.score_threshold = score_threshold\n self.nms_top_k = nms_top_k\n self.keep_top_k = keep_top_k\n self.nms_threshold = nms_threshold\n self.normalized = normalized\n self.nms_eta = nms_eta\n self.return_index = return_index\n self.return_rois_num = return_rois_num\n\n def __call__(self, bboxes, score, background_label=-1):\n \"\"\"\n bboxes (Tensor|List[Tensor]): 1. (Tensor) Predicted bboxes with shape \n [N, M, 4], N is the batch size and M\n is the number of bboxes\n 2. (List[Tensor]) bboxes and bbox_num,\n bboxes have shape of [M, C, 4], C\n is the class number and bbox_num means\n the number of bboxes of each batch with\n shape [N,] \n score (Tensor): Predicted scores with shape [N, C, M] or [M, C]\n background_label (int): Ignore the background label; For example, RCNN\n is num_classes and YOLO is -1. \n \"\"\"\n kwargs = self.__dict__.copy()\n if isinstance(bboxes, tuple):\n bboxes, bbox_num = bboxes\n kwargs.update({'rois_num': bbox_num})\n if background_label > -1:\n kwargs.update({'background_label': background_label})\n return ops.multiclass_nms(bboxes, score, **kwargs)\n\n\n@register\n@serializable\nclass MatrixNMS(object):\n __append_doc__ = True\n\n def __init__(self,\n score_threshold=.05,\n post_threshold=.05,\n nms_top_k=-1,\n keep_top_k=100,\n use_gaussian=False,\n gaussian_sigma=2.,\n normalized=False,\n background_label=0):\n super(MatrixNMS, self).__init__()\n self.score_threshold = score_threshold\n self.post_threshold = post_threshold\n self.nms_top_k = nms_top_k\n self.keep_top_k = keep_top_k\n self.normalized = normalized\n self.use_gaussian = use_gaussian\n self.gaussian_sigma = gaussian_sigma\n self.background_label = background_label\n\n def __call__(self, bbox, score, *args):\n return ops.matrix_nms(\n bboxes=bbox,\n scores=score,\n score_threshold=self.score_threshold,\n post_threshold=self.post_threshold,\n nms_top_k=self.nms_top_k,\n keep_top_k=self.keep_top_k,\n use_gaussian=self.use_gaussian,\n gaussian_sigma=self.gaussian_sigma,\n background_label=self.background_label,\n normalized=self.normalized)\n\n\n@register\n@serializable\nclass YOLOBox(object):\n __shared__ = ['num_classes']\n\n def __init__(self,\n num_classes=80,\n conf_thresh=0.005,\n downsample_ratio=32,\n clip_bbox=True,\n scale_x_y=1.):\n self.num_classes = num_classes\n self.conf_thresh = conf_thresh\n self.downsample_ratio = downsample_ratio\n self.clip_bbox = clip_bbox\n self.scale_x_y = scale_x_y\n\n def __call__(self,\n yolo_head_out,\n anchors,\n im_shape,\n scale_factor,\n var_weight=None):\n boxes_list = []\n scores_list = []\n origin_shape = im_shape / scale_factor\n origin_shape = paddle.cast(origin_shape, 'int32')\n for i, head_out in enumerate(yolo_head_out):\n boxes, scores = ops.yolo_box(head_out, origin_shape, anchors[i],\n self.num_classes, self.conf_thresh,\n self.downsample_ratio // 2**i,\n self.clip_bbox, self.scale_x_y)\n boxes_list.append(boxes)\n scores_list.append(paddle.transpose(scores, perm=[0, 2, 1]))\n yolo_boxes = paddle.concat(boxes_list, axis=1)\n yolo_scores = paddle.concat(scores_list, axis=2)\n return yolo_boxes, yolo_scores\n\n\n@register\n@serializable\nclass SSDBox(object):\n def __init__(self, is_normalized=True):\n self.is_normalized = is_normalized\n self.norm_delta = float(not self.is_normalized)\n\n def __call__(self,\n preds,\n prior_boxes,\n im_shape,\n scale_factor,\n var_weight=None):\n boxes, scores = preds\n outputs = []\n for box, score, prior_box in zip(boxes, scores, prior_boxes):\n pb_w = prior_box[:, 2] - prior_box[:, 0] + self.norm_delta\n pb_h = prior_box[:, 3] - prior_box[:, 1] + self.norm_delta\n pb_x = prior_box[:, 0] + pb_w * 0.5\n pb_y = prior_box[:, 1] + pb_h * 0.5\n out_x = pb_x + box[:, :, 0] * pb_w * 0.1\n out_y = pb_y + box[:, :, 1] * pb_h * 0.1\n out_w = paddle.exp(box[:, :, 2] * 0.2) * pb_w\n out_h = paddle.exp(box[:, :, 3] * 0.2) * pb_h\n\n if self.is_normalized:\n h = paddle.unsqueeze(\n im_shape[:, 0] / scale_factor[:, 0], axis=-1)\n w = paddle.unsqueeze(\n im_shape[:, 1] / scale_factor[:, 1], axis=-1)\n output = paddle.stack(\n [(out_x - out_w / 2.) * w, (out_y - out_h / 2.) * h,\n (out_x + out_w / 2.) * w, (out_y + out_h / 2.) * h],\n axis=-1)\n else:\n output = paddle.stack(\n [\n out_x - out_w / 2., out_y - out_h / 2.,\n out_x + out_w / 2. - 1., out_y + out_h / 2. - 1.\n ],\n axis=-1)\n outputs.append(output)\n boxes = paddle.concat(outputs, axis=1)\n\n scores = F.softmax(paddle.concat(scores, axis=1))\n scores = paddle.transpose(scores, [0, 2, 1])\n\n return boxes, scores\n\n\n@register\n@serializable\nclass AnchorGrid(object):\n \"\"\"Generate anchor grid\n\n Args:\n image_size (int or list): input image size, may be a single integer or\n list of [h, w]. Default: 512\n min_level (int): min level of the feature pyramid. Default: 3\n max_level (int): max level of the feature pyramid. Default: 7\n anchor_base_scale: base anchor scale. Default: 4\n num_scales: number of anchor scales. Default: 3\n aspect_ratios: aspect ratios. default: [[1, 1], [1.4, 0.7], [0.7, 1.4]]\n \"\"\"\n\n def __init__(self,\n image_size=512,\n min_level=3,\n max_level=7,\n anchor_base_scale=4,\n num_scales=3,\n aspect_ratios=[[1, 1], [1.4, 0.7], [0.7, 1.4]]):\n super(AnchorGrid, self).__init__()\n if isinstance(image_size, Integral):\n self.image_size = [image_size, image_size]\n else:\n self.image_size = image_size\n for dim in self.image_size:\n assert dim % 2 ** max_level == 0, \\\n \"image size should be multiple of the max level stride\"\n self.min_level = min_level\n self.max_level = max_level\n self.anchor_base_scale = anchor_base_scale\n self.num_scales = num_scales\n self.aspect_ratios = aspect_ratios\n\n @property\n def base_cell(self):\n if not hasattr(self, '_base_cell'):\n self._base_cell = self.make_cell()\n return self._base_cell\n\n def make_cell(self):\n scales = [2**(i / self.num_scales) for i in range(self.num_scales)]\n scales = np.array(scales)\n ratios = np.array(self.aspect_ratios)\n ws = np.outer(scales, ratios[:, 0]).reshape(-1, 1)\n hs = np.outer(scales, ratios[:, 1]).reshape(-1, 1)\n anchors = np.hstack((-0.5 * ws, -0.5 * hs, 0.5 * ws, 0.5 * hs))\n return anchors\n\n def make_grid(self, stride):\n cell = self.base_cell * stride * self.anchor_base_scale\n x_steps = np.arange(stride // 2, self.image_size[1], stride)\n y_steps = np.arange(stride // 2, self.image_size[0], stride)\n offset_x, offset_y = np.meshgrid(x_steps, y_steps)\n offset_x = offset_x.flatten()\n offset_y = offset_y.flatten()\n offsets = np.stack((offset_x, offset_y, offset_x, offset_y), axis=-1)\n offsets = offsets[:, np.newaxis, :]\n return (cell + offsets).reshape(-1, 4)\n\n def generate(self):\n return [\n self.make_grid(2**l)\n for l in range(self.min_level, self.max_level + 1)\n ]\n\n def __call__(self):\n if not hasattr(self, '_anchor_vars'):\n anchor_vars = []\n helper = LayerHelper('anchor_grid')\n for idx, l in enumerate(range(self.min_level, self.max_level + 1)):\n stride = 2**l\n anchors = self.make_grid(stride)\n var = helper.create_parameter(\n attr=ParamAttr(name='anchors_{}'.format(idx)),\n shape=anchors.shape,\n dtype='float32',\n stop_gradient=True,\n default_initializer=NumpyArrayInitializer(anchors))\n anchor_vars.append(var)\n var.persistable = True\n self._anchor_vars = anchor_vars\n\n return self._anchor_vars\n\n\n@register\n@serializable\nclass FCOSBox(object):\n __shared__ = ['num_classes']\n\n def __init__(self, num_classes=80):\n super(FCOSBox, self).__init__()\n self.num_classes = num_classes\n\n def _merge_hw(self, inputs, ch_type=\"channel_first\"):\n \"\"\"\n Merge h and w of the feature map into one dimension.\n Args:\n inputs (Tensor): Tensor of the input feature map\n ch_type (str): \"channel_first\" or \"channel_last\" style\n Return:\n new_shape (Tensor): The new shape after h and w merged\n \"\"\"\n shape_ = paddle.shape(inputs)\n bs, ch, hi, wi = shape_[0], shape_[1], shape_[2], shape_[3]\n img_size = hi * wi\n img_size.stop_gradient = True\n if ch_type == \"channel_first\":\n new_shape = paddle.concat([bs, ch, img_size])\n elif ch_type == \"channel_last\":\n new_shape = paddle.concat([bs, img_size, ch])\n else:\n raise KeyError(\"Wrong ch_type %s\" % ch_type)\n new_shape.stop_gradient = True\n return new_shape\n\n def _postprocessing_by_level(self, locations, box_cls, box_reg, box_ctn,\n scale_factor):\n \"\"\"\n Postprocess each layer of the output with corresponding locations.\n Args:\n locations (Tensor): anchor points for current layer, [H*W, 2]\n box_cls (Tensor): categories prediction, [N, C, H, W], \n C is the number of classes\n box_reg (Tensor): bounding box prediction, [N, 4, H, W]\n box_ctn (Tensor): centerness prediction, [N, 1, H, W]\n scale_factor (Tensor): [h_scale, w_scale] for input images\n Return:\n box_cls_ch_last (Tensor): score for each category, in [N, C, M]\n C is the number of classes and M is the number of anchor points\n box_reg_decoding (Tensor): decoded bounding box, in [N, M, 4]\n last dimension is [x1, y1, x2, y2]\n \"\"\"\n act_shape_cls = self._merge_hw(box_cls)\n box_cls_ch_last = paddle.reshape(x=box_cls, shape=act_shape_cls)\n box_cls_ch_last = F.sigmoid(box_cls_ch_last)\n\n act_shape_reg = self._merge_hw(box_reg)\n box_reg_ch_last = paddle.reshape(x=box_reg, shape=act_shape_reg)\n box_reg_ch_last = paddle.transpose(box_reg_ch_last, perm=[0, 2, 1])\n box_reg_decoding = paddle.stack(\n [\n locations[:, 0] - box_reg_ch_last[:, :, 0],\n locations[:, 1] - box_reg_ch_last[:, :, 1],\n locations[:, 0] + box_reg_ch_last[:, :, 2],\n locations[:, 1] + box_reg_ch_last[:, :, 3]\n ],\n axis=1)\n box_reg_decoding = paddle.transpose(box_reg_decoding, perm=[0, 2, 1])\n\n act_shape_ctn = self._merge_hw(box_ctn)\n box_ctn_ch_last = paddle.reshape(x=box_ctn, shape=act_shape_ctn)\n box_ctn_ch_last = F.sigmoid(box_ctn_ch_last)\n\n # recover the location to original image\n im_scale = paddle.concat([scale_factor, scale_factor], axis=1)\n box_reg_decoding = box_reg_decoding / im_scale\n box_cls_ch_last = box_cls_ch_last * box_ctn_ch_last\n return box_cls_ch_last, box_reg_decoding\n\n def __call__(self, locations, cls_logits, bboxes_reg, centerness,\n scale_factor):\n pred_boxes_ = []\n pred_scores_ = []\n for pts, cls, box, ctn in zip(locations, cls_logits, bboxes_reg,\n centerness):\n pred_scores_lvl, pred_boxes_lvl = self._postprocessing_by_level(\n pts, cls, box, ctn, scale_factor)\n pred_boxes_.append(pred_boxes_lvl)\n pred_scores_.append(pred_scores_lvl)\n pred_boxes = paddle.concat(pred_boxes_, axis=1)\n pred_scores = paddle.concat(pred_scores_, axis=2)\n return pred_boxes, pred_scores\n\n\n@register\nclass TTFBox(object):\n __shared__ = ['down_ratio']\n\n def __init__(self, max_per_img=100, score_thresh=0.01, down_ratio=4):\n super(TTFBox, self).__init__()\n self.max_per_img = max_per_img\n self.score_thresh = score_thresh\n self.down_ratio = down_ratio\n\n def _simple_nms(self, heat, kernel=3):\n \"\"\"\n Use maxpool to filter the max score, get local peaks.\n \"\"\"\n pad = (kernel - 1) // 2\n hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)\n keep = paddle.cast(hmax == heat, 'float32')\n return heat * keep\n\n def _topk(self, scores):\n \"\"\"\n Select top k scores and decode to get xy coordinates.\n \"\"\"\n k = self.max_per_img\n shape_fm = paddle.shape(scores)\n shape_fm.stop_gradient = True\n cat, height, width = shape_fm[1], shape_fm[2], shape_fm[3]\n # batch size is 1\n scores_r = paddle.reshape(scores, [cat, -1])\n topk_scores, topk_inds = paddle.topk(scores_r, k)\n topk_scores, topk_inds = paddle.topk(scores_r, k)\n topk_ys = topk_inds // width\n topk_xs = topk_inds % width\n\n topk_score_r = paddle.reshape(topk_scores, [-1])\n topk_score, topk_ind = paddle.topk(topk_score_r, k)\n k_t = paddle.full(paddle.shape(topk_ind), k, dtype='int64')\n topk_clses = paddle.cast(paddle.floor_divide(topk_ind, k_t), 'float32')\n\n topk_inds = paddle.reshape(topk_inds, [-1])\n topk_ys = paddle.reshape(topk_ys, [-1, 1])\n topk_xs = paddle.reshape(topk_xs, [-1, 1])\n topk_inds = paddle.gather(topk_inds, topk_ind)\n topk_ys = paddle.gather(topk_ys, topk_ind)\n topk_xs = paddle.gather(topk_xs, topk_ind)\n\n return topk_score, topk_inds, topk_clses, topk_ys, topk_xs\n\n def __call__(self, hm, wh, im_shape, scale_factor):\n heatmap = F.sigmoid(hm)\n heat = self._simple_nms(heatmap)\n scores, inds, clses, ys, xs = self._topk(heat)\n ys = paddle.cast(ys, 'float32') * self.down_ratio\n xs = paddle.cast(xs, 'float32') * self.down_ratio\n scores = paddle.tensor.unsqueeze(scores, [1])\n clses = paddle.tensor.unsqueeze(clses, [1])\n\n wh_t = paddle.transpose(wh, [0, 2, 3, 1])\n wh = paddle.reshape(wh_t, [-1, paddle.shape(wh_t)[-1]])\n wh = paddle.gather(wh, inds)\n\n x1 = xs - wh[:, 0:1]\n y1 = ys - wh[:, 1:2]\n x2 = xs + wh[:, 2:3]\n y2 = ys + wh[:, 3:4]\n\n bboxes = paddle.concat([x1, y1, x2, y2], axis=1)\n\n scale_y = scale_factor[:, 0:1]\n scale_x = scale_factor[:, 1:2]\n scale_expand = paddle.concat(\n [scale_x, scale_y, scale_x, scale_y], axis=1)\n boxes_shape = paddle.shape(bboxes)\n boxes_shape.stop_gradient = True\n scale_expand = paddle.expand(scale_expand, shape=boxes_shape)\n bboxes = paddle.divide(bboxes, scale_expand)\n results = paddle.concat([clses, scores, bboxes], axis=1)\n # hack: append result with cls=-1 and score=1. to avoid all scores\n # are less than score_thresh which may cause error in gather.\n fill_r = paddle.to_tensor(np.array([[-1, 1, 0, 0, 0, 0]]))\n fill_r = paddle.cast(fill_r, results.dtype)\n results = paddle.concat([results, fill_r])\n scores = results[:, 1]\n valid_ind = paddle.nonzero(scores > self.score_thresh)\n results = paddle.gather(results, valid_ind)\n return results, paddle.shape(results)[0:1]\n\n\n@register\n@serializable\nclass JDEBox(object):\n __shared__ = ['num_classes']\n\n def __init__(self, num_classes=1, conf_thresh=0.3, downsample_ratio=32):\n self.num_classes = num_classes\n self.conf_thresh = conf_thresh\n self.downsample_ratio = downsample_ratio\n\n def generate_anchor(self, nGh, nGw, anchor_wh):\n nA = len(anchor_wh)\n yv, xv = paddle.meshgrid([paddle.arange(nGh), paddle.arange(nGw)])\n mesh = paddle.stack(\n (xv, yv), axis=0).cast(dtype='float32') # 2 x nGh x nGw\n meshs = paddle.tile(mesh, [nA, 1, 1, 1])\n\n anchor_offset_mesh = anchor_wh[:, :, None][:, :, :, None].repeat(\n int(nGh), axis=-2).repeat(\n int(nGw), axis=-1)\n anchor_offset_mesh = paddle.to_tensor(\n anchor_offset_mesh.astype(np.float32))\n # nA x 2 x nGh x nGw\n\n anchor_mesh = paddle.concat([meshs, anchor_offset_mesh], axis=1)\n anchor_mesh = paddle.transpose(anchor_mesh,\n [0, 2, 3, 1]) # (nA x nGh x nGw) x 4\n return anchor_mesh\n\n def decode_delta(self, delta, fg_anchor_list):\n px, py, pw, ph = fg_anchor_list[:, 0], fg_anchor_list[:,1], \\\n fg_anchor_list[:, 2], fg_anchor_list[:,3]\n dx, dy, dw, dh = delta[:, 0], delta[:, 1], delta[:, 2], delta[:, 3]\n gx = pw * dx + px\n gy = ph * dy + py\n gw = pw * paddle.exp(dw)\n gh = ph * paddle.exp(dh)\n gx1 = gx - gw * 0.5\n gy1 = gy - gh * 0.5\n gx2 = gx + gw * 0.5\n gy2 = gy + gh * 0.5\n return paddle.stack([gx1, gy1, gx2, gy2], axis=1)\n\n def decode_delta_map(self, delta_map, anchors):\n delta_map_shape = paddle.shape(delta_map)\n delta_map_shape.stop_gradient = True\n nB, nA, nGh, nGw, _ = delta_map_shape[:]\n anchor_mesh = self.generate_anchor(nGh, nGw, anchors)\n # only support bs=1\n anchor_mesh = paddle.unsqueeze(anchor_mesh, 0)\n\n pred_list = self.decode_delta(\n paddle.reshape(\n delta_map, shape=[-1, 4]),\n paddle.reshape(\n anchor_mesh, shape=[-1, 4]))\n pred_map = paddle.reshape(pred_list, shape=[nB, -1, 4])\n return pred_map\n\n def __call__(self, yolo_head_out, anchors):\n bbox_pred_list = []\n for i, head_out in enumerate(yolo_head_out):\n stride = self.downsample_ratio // 2**i\n anc_w, anc_h = anchors[i][0::2], anchors[i][1::2]\n anchor_vec = np.stack((anc_w, anc_h), axis=1) / stride\n nA = len(anc_w)\n boxes_shape = paddle.shape(head_out)\n boxes_shape.stop_gradient = True\n nB, nGh, nGw = boxes_shape[0], boxes_shape[-2], boxes_shape[-1]\n\n p = head_out.reshape((nB, nA, self.num_classes + 5, nGh, nGw))\n p = paddle.transpose(p, perm=[0, 1, 3, 4, 2]) # [nB, 4, 19, 34, 6]\n p_box = p[:, :, :, :, :4] # [nB, 4, 19, 34, 4]\n boxes = self.decode_delta_map(p_box, anchor_vec) # [nB, 4*19*34, 4]\n boxes = boxes * stride\n\n p_conf = paddle.transpose(\n p[:, :, :, :, 4:6], perm=[0, 4, 1, 2, 3]) # [nB, 2, 4, 19, 34]\n p_conf = F.softmax(\n p_conf,\n axis=1)[:, 1, :, :, :].unsqueeze(-1) # [nB, 4, 19, 34, 1]\n scores = paddle.reshape(p_conf, shape=[nB, -1, 1])\n\n bbox_pred_list.append(paddle.concat([boxes, scores], axis=-1))\n\n yolo_boxes_pred = paddle.concat(bbox_pred_list, axis=1)\n boxes_idx = paddle.nonzero(yolo_boxes_pred[:, :, -1] > self.conf_thresh)\n boxes_idx.stop_gradient = True\n if boxes_idx.shape[0] == 0: # TODO: deploy\n boxes_idx = paddle.to_tensor(np.array([[0]], dtype='int64'))\n yolo_boxes_out = paddle.to_tensor(\n np.array(\n [[[0.0, 0.0, 0.0, 0.0]]], dtype='float32'))\n yolo_scores_out = paddle.to_tensor(\n np.array(\n [[[0.0]]], dtype='float32'))\n return boxes_idx, yolo_boxes_out, yolo_scores_out\n\n yolo_boxes = paddle.gather_nd(yolo_boxes_pred, boxes_idx)\n yolo_boxes_out = paddle.reshape(yolo_boxes[:, :4], shape=[nB, -1, 4])\n yolo_scores_out = paddle.reshape(yolo_boxes[:, 4:5], shape=[nB, 1, -1])\n boxes_idx = boxes_idx[:, 1:]\n return boxes_idx, yolo_boxes_out, yolo_scores_out # [163], [1, 163, 4], [1, 1, 163]\n\n\n@register\n@serializable\nclass MaskMatrixNMS(object):\n \"\"\"\n Matrix NMS for multi-class masks.\n Args:\n update_threshold (float): Updated threshold of categroy score in second time.\n pre_nms_top_n (int): Number of total instance to be kept per image before NMS\n post_nms_top_n (int): Number of total instance to be kept per image after NMS.\n kernel (str): 'linear' or 'gaussian'.\n sigma (float): std in gaussian method.\n Input:\n seg_preds (Variable): shape (n, h, w), segmentation feature maps\n seg_masks (Variable): shape (n, h, w), segmentation feature maps\n cate_labels (Variable): shape (n), mask labels in descending order\n cate_scores (Variable): shape (n), mask scores in descending order\n sum_masks (Variable): a float tensor of the sum of seg_masks\n Returns:\n Variable: cate_scores, tensors of shape (n)\n \"\"\"\n\n def __init__(self,\n update_threshold=0.05,\n pre_nms_top_n=500,\n post_nms_top_n=100,\n kernel='gaussian',\n sigma=2.0):\n super(MaskMatrixNMS, self).__init__()\n self.update_threshold = update_threshold\n self.pre_nms_top_n = pre_nms_top_n\n self.post_nms_top_n = post_nms_top_n\n self.kernel = kernel\n self.sigma = sigma\n\n def _sort_score(self, scores, top_num):\n if paddle.shape(scores)[0] > top_num:\n return paddle.topk(scores, top_num)[1]\n else:\n return paddle.argsort(scores, descending=True)\n\n def __call__(self,\n seg_preds,\n seg_masks,\n cate_labels,\n cate_scores,\n sum_masks=None):\n # sort and keep top nms_pre\n sort_inds = self._sort_score(cate_scores, self.pre_nms_top_n)\n seg_masks = paddle.gather(seg_masks, index=sort_inds)\n seg_preds = paddle.gather(seg_preds, index=sort_inds)\n sum_masks = paddle.gather(sum_masks, index=sort_inds)\n cate_scores = paddle.gather(cate_scores, index=sort_inds)\n cate_labels = paddle.gather(cate_labels, index=sort_inds)\n\n seg_masks = paddle.flatten(seg_masks, start_axis=1, stop_axis=-1)\n # inter.\n inter_matrix = paddle.mm(seg_masks, paddle.transpose(seg_masks, [1, 0]))\n n_samples = paddle.shape(cate_labels)\n # union.\n sum_masks_x = paddle.expand(sum_masks, shape=[n_samples, n_samples])\n # iou.\n iou_matrix = (inter_matrix / (\n sum_masks_x + paddle.transpose(sum_masks_x, [1, 0]) - inter_matrix))\n iou_matrix = paddle.triu(iou_matrix, diagonal=1)\n # label_specific matrix.\n cate_labels_x = paddle.expand(cate_labels, shape=[n_samples, n_samples])\n label_matrix = paddle.cast(\n (cate_labels_x == paddle.transpose(cate_labels_x, [1, 0])),\n 'float32')\n label_matrix = paddle.triu(label_matrix, diagonal=1)\n\n # IoU compensation\n compensate_iou = paddle.max((iou_matrix * label_matrix), axis=0)\n compensate_iou = paddle.expand(\n compensate_iou, shape=[n_samples, n_samples])\n compensate_iou = paddle.transpose(compensate_iou, [1, 0])\n\n # IoU decay\n decay_iou = iou_matrix * label_matrix\n\n # matrix nms\n if self.kernel == 'gaussian':\n decay_matrix = paddle.exp(-1 * self.sigma * (decay_iou**2))\n compensate_matrix = paddle.exp(-1 * self.sigma *\n (compensate_iou**2))\n decay_coefficient = paddle.min(decay_matrix / compensate_matrix,\n axis=0)\n elif self.kernel == 'linear':\n decay_matrix = (1 - decay_iou) / (1 - compensate_iou)\n decay_coefficient = paddle.min(decay_matrix, axis=0)\n else:\n raise NotImplementedError\n\n # update the score.\n cate_scores = cate_scores * decay_coefficient\n y = paddle.zeros(shape=paddle.shape(cate_scores), dtype='float32')\n keep = paddle.where(cate_scores >= self.update_threshold, cate_scores,\n y)\n keep = paddle.nonzero(keep)\n keep = paddle.squeeze(keep, axis=[1])\n # Prevent empty and increase fake data\n keep = paddle.concat(\n [keep, paddle.cast(paddle.shape(cate_scores)[0] - 1, 'int64')])\n\n seg_preds = paddle.gather(seg_preds, index=keep)\n cate_scores = paddle.gather(cate_scores, index=keep)\n cate_labels = paddle.gather(cate_labels, index=keep)\n\n # sort and keep top_k\n sort_inds = self._sort_score(cate_scores, self.post_nms_top_n)\n seg_preds = paddle.gather(seg_preds, index=sort_inds)\n cate_scores = paddle.gather(cate_scores, index=sort_inds)\n cate_labels = paddle.gather(cate_labels, index=sort_inds)\n return seg_preds, cate_scores, cate_labels\n\n\ndef Conv2d(in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n bias=True,\n weight_init=Normal(std=0.001),\n bias_init=Constant(0.)):\n weight_attr = paddle.framework.ParamAttr(initializer=weight_init)\n if bias:\n bias_attr = paddle.framework.ParamAttr(initializer=bias_init)\n else:\n bias_attr = False\n conv = nn.Conv2D(\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n dilation,\n groups,\n weight_attr=weight_attr,\n bias_attr=bias_attr)\n return conv\n\n\ndef ConvTranspose2d(in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n output_padding=0,\n groups=1,\n bias=True,\n dilation=1,\n weight_init=Normal(std=0.001),\n bias_init=Constant(0.)):\n weight_attr = paddle.framework.ParamAttr(initializer=weight_init)\n if bias:\n bias_attr = paddle.framework.ParamAttr(initializer=bias_init)\n else:\n bias_attr = False\n conv = nn.Conv2DTranspose(\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n output_padding,\n dilation,\n groups,\n weight_attr=weight_attr,\n bias_attr=bias_attr)\n return conv\n\n\ndef BatchNorm2d(num_features, eps=1e-05, momentum=0.9, affine=True):\n if not affine:\n weight_attr = False\n bias_attr = False\n else:\n weight_attr = None\n bias_attr = None\n batchnorm = nn.BatchNorm2D(\n num_features,\n momentum,\n eps,\n weight_attr=weight_attr,\n bias_attr=bias_attr)\n return batchnorm\n\n\ndef ReLU():\n return nn.ReLU()\n\n\ndef Upsample(scale_factor=None, mode='nearest', align_corners=False):\n return nn.Upsample(None, scale_factor, mode, align_corners)\n\n\ndef MaxPool(kernel_size, stride, padding, ceil_mode=False):\n return nn.MaxPool2D(kernel_size, stride, padding, ceil_mode=ceil_mode)\n\n\nclass Concat(nn.Layer):\n def __init__(self, dim=0):\n super(Concat, self).__init__()\n self.dim = dim\n\n def forward(self, inputs):\n return paddle.concat(inputs, axis=self.dim)\n\n def extra_repr(self):\n return 'dim={}'.format(self.dim)\n" ]
[ [ "numpy.hstack", "numpy.meshgrid", "numpy.arange", "numpy.stack", "numpy.outer", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
foxlf823/NCRFpp
[ "bea99df2951b729fabb96ccdc36edc2272847d50" ]
[ "model/charbigru.py" ]
[ "# -*- coding: utf-8 -*-\n# @Author: Jie Yang\n# @Date: 2017-10-17 16:47:32\n# @Last Modified by: Jie Yang, Contact: [email protected]\n# @Last Modified time: 2018-10-18 11:12:13\nfrom __future__ import print_function\nimport torch\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nimport numpy as np\n\nclass CharBiGRU(nn.Module):\n def __init__(self, alphabet_size, pretrain_char_embedding, embedding_dim, hidden_dim, dropout, gpu, bidirect_flag = True):\n super(CharBiGRU, self).__init__()\n print(\"build char sequence feature extractor: GRU ...\")\n self.gpu = gpu\n self.hidden_dim = hidden_dim\n if bidirect_flag:\n self.hidden_dim = hidden_dim // 2\n self.char_drop = nn.Dropout(dropout)\n self.char_embeddings = nn.Embedding(alphabet_size, embedding_dim)\n if pretrain_char_embedding is not None:\n self.char_embeddings.weight.data.copy_(torch.from_numpy(pretrain_char_embedding))\n else:\n self.char_embeddings.weight.data.copy_(torch.from_numpy(self.random_embedding(alphabet_size, embedding_dim)))\n self.char_lstm = nn.GRU(embedding_dim, self.hidden_dim, num_layers=1, batch_first=True, bidirectional=bidirect_flag)\n if self.gpu >= 0 and torch.cuda.is_available():\n self.char_drop = self.char_drop.cuda(self.gpu)\n self.char_embeddings = self.char_embeddings.cuda(self.gpu)\n self.char_lstm = self.char_lstm.cuda(self.gpu)\n\n\n def random_embedding(self, vocab_size, embedding_dim):\n pretrain_emb = np.empty([vocab_size, embedding_dim])\n scale = np.sqrt(3.0 / embedding_dim)\n for index in range(vocab_size):\n pretrain_emb[index,:] = np.random.uniform(-scale, scale, [1, embedding_dim])\n return pretrain_emb\n\n\n def get_last_hiddens(self, input, seq_lengths):\n \"\"\"\n input:\n input: Variable(batch_size, word_length)\n seq_lengths: numpy array (batch_size, 1)\n output:\n Variable(batch_size, char_hidden_dim)\n Note it only accepts ordered (length) variable, length size is recorded in seq_lengths\n \"\"\"\n batch_size = input.size(0)\n char_embeds = self.char_drop(self.char_embeddings(input))\n char_hidden = None\n pack_input = pack_padded_sequence(char_embeds, seq_lengths, True)\n char_rnn_out, char_hidden = self.char_lstm(pack_input, char_hidden)\n # char_rnn_out, _ = pad_packed_sequence(char_rnn_out)\n return char_hidden.transpose(1,0).contiguous().view(batch_size,-1)\n\n def get_all_hiddens(self, input, seq_lengths):\n \"\"\"\n input:\n input: Variable(batch_size, word_length)\n seq_lengths: numpy array (batch_size, 1)\n output:\n Variable(batch_size, word_length, char_hidden_dim)\n Note it only accepts ordered (length) variable, length size is recorded in seq_lengths\n \"\"\"\n batch_size = input.size(0)\n char_embeds = self.char_drop(self.char_embeddings(input))\n char_hidden = None\n pack_input = pack_padded_sequence(char_embeds, seq_lengths, True)\n char_rnn_out, char_hidden = self.char_lstm(pack_input, char_hidden)\n char_rnn_out, _ = pad_packed_sequence(char_rnn_out)\n return char_rnn_out.transpose(1,0)\n\n\n def forward(self, input, seq_lengths):\n return self.get_all_hiddens(input, seq_lengths)\n" ]
[ [ "torch.nn.Dropout", "numpy.sqrt", "torch.nn.GRU", "torch.from_numpy", "torch.nn.Embedding", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.utils.rnn.pad_packed_sequence", "torch.cuda.is_available", "numpy.random.uniform", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Manfred-Hyt/meshpy
[ "9381d91b259dff9fb2404cffe23f27f88ec0ccb4" ]
[ "meshpy/mesh_creation_functions/beam_fibers_in_rectangle.py" ]
[ "# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# MeshPy: A beam finite element input generator\n#\n# MIT License\n#\n# Copyright (c) 2021 Ivo Steinbrecher\n# Institute for Mathematics and Computer-Based Simulation\n# Universitaet der Bundeswehr Muenchen\n# https://www.unibw.de/imcs-en\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# -----------------------------------------------------------------------------\n\"\"\"\nThis file has functions to create a honeycomb beam mesh.\n\"\"\"\n\n# Python packages.\nimport numpy as np\n\n# Meshpy modules.\nfrom .. import mpy, GeometrySet\nfrom ..container import GeometryName\nfrom ..utility import check_node_by_coordinate\nfrom .beam_basic_geometry import create_beam_mesh_line\n\n\ndef _intersect_line_with_rectangle(length, width, start_line, direction_line,\n fail_if_no_intersection=True):\n \"\"\"\n Calculate the intersection points between a line and a rectangle.\n\n Args\n ----\n length: scalar\n Rectangle length in x direction.\n width: scalar\n Rectangle width in y direction.\n start_line: 2d-list\n Point on the line.\n direction_line: 2d-list\n Direction of the line.\n fail_if_no_intersection: bool\n If this is true and no intersections are found, an error will be\n thrown.\n\n Return\n ----\n (start, end, projection_found)\n start: 2D vector\n Start point of intersected line.\n end: 2D vector\n End point of intersected line.\n projection_found: bool\n True if intersection is valid.\n \"\"\"\n\n # Convert the input values to np.arrays.\n start_line = np.array(start_line)\n direction_line = np.array(direction_line)\n\n # Set definition for the boundary lines of the rectangle. The director is\n # chosen in a way, that the values [0, 1] for the line parameters alpha are\n # valid.\n # boundary_lines = [..., [start, dir], ...]\n boundary_lines = [\n [[0, 0], [length, 0]],\n [[0, 0], [0, width]],\n [[0, width], [length, 0]],\n [[length, 0], [0, width]]\n ]\n # Convert fo numpy arrays.\n boundary_lines = [[np.array(item) for item in boundary] for boundary\n in boundary_lines]\n\n # Loop over the boundaries.\n alpha_list = []\n for start_boundary, direction_boundary in boundary_lines:\n # Set up the linear system to solve the intersection problem.\n A = np.transpose(np.array([direction_line, -direction_boundary]))\n\n # Check if the system is solvable.\n if np.abs(np.linalg.det(A)) > 1e-10:\n alpha = np.linalg.solve(A, start_boundary - start_line)\n if (0 <= alpha[1] and alpha[1] <= 1):\n alpha_list.append(alpha[0])\n\n # Check that intersections were found.\n if len(alpha_list) < 2:\n if fail_if_no_intersection:\n raise ValueError('No intersections found!')\n return (None, None, False)\n\n # Return the start and end point on the line.\n return (\n start_line + min(alpha_list) * direction_line,\n start_line + max(alpha_list) * direction_line,\n True\n )\n\n\ndef create_fibers_in_rectangle(mesh, beam_object, material, length, width,\n angle, fiber_distance, fiber_element_length, *, offset=0.0):\n \"\"\"\n Create multiple fibers in a rectangle.\n\n Args\n ----\n mesh: Mesh\n Mesh that the fibers will be added to.\n beam_object: Beam\n Object that will be used to create the beam elements.\n material: Material\n Material for the beam.\n length: float\n Length of the rectangle in x direction (starting at x=0)\n width: float\n Width of the rectangle in y direction (starting at y=0)\n angle: float\n Angle of the fibers in degree.\n fiber_distance: float\n Perpendicular distance between the fibers.\n fiber_element_length: float\n Length of a single beam element. In general it will not be possible to\n exactly achieve this length. If a line at a corner is shorter than this\n value, it will not be meshed.\n offset: double\n Fibers will be offset by this value from the symmetric layout.\n \"\"\"\n\n if offset < 0:\n raise ValueError('The offset has to be positive!')\n if np.abs(offset) >= fiber_distance:\n raise ValueError('The offset has to smaller than the fiber distance!')\n\n # Get the fiber angle in rad.\n fiber_angle = angle * np.pi / 180.\n sin = np.sin(fiber_angle)\n cos = np.cos(fiber_angle)\n\n # The cosinus has to be positive for the algorithm to work.\n if cos < 0:\n cos = -cos\n sin = -sin\n\n # Direction and normal vector of the fibers.\n fiber_direction = np.array([cos, sin])\n fiber_normal = np.array([-sin, cos])\n\n # Get starting point for the creation of the fibers.\n if sin >= 0:\n fiber_start_point = np.array([length, 0])\n plate_diagonal = np.array([-length, width])\n else:\n fiber_start_point = np.array([0, 0])\n plate_diagonal = np.array([length, width])\n\n # Get the number of fibers in this layer.\n fiber_diagonal_distance = np.dot(fiber_normal, plate_diagonal)\n fiber_n = int(fiber_diagonal_distance // fiber_distance)\n\n # Calculate the offset, so the fibers are placed in the 'middle' of the\n # diagonal.\n fiber_offset = ((fiber_diagonal_distance / fiber_distance - fiber_n)\n * fiber_distance * 0.5 * fiber_normal)\n\n # Loop over each fiber and create the beam element.\n for n in range(-1, fiber_n + 1):\n\n # Get the start and end point of the line.\n start, end, projection_found = _intersect_line_with_rectangle(\n length, width,\n (fiber_offset + fiber_start_point\n + (n * fiber_distance + offset) * fiber_normal),\n fiber_direction,\n fail_if_no_intersection=False)\n\n if projection_found:\n # Calculate the length of the line.\n fiber_length = np.linalg.norm(end - start)\n\n # Create the beams if the length is not smaller than the fiber\n # distance.\n if fiber_length > fiber_distance:\n\n # Calculate the number of elements in this fiber.\n fiber_nel = int(fiber_length // fiber_element_length)\n fiber_nel = np.max([fiber_nel, 1])\n create_beam_mesh_line(mesh, beam_object, material,\n np.append(start, 0.),\n np.append(end, 0.),\n n_el=fiber_nel)\n\n return_set = GeometryName()\n return_set['north'] = GeometrySet(mpy.geo.point,\n nodes=mesh.get_nodes_by_function(check_node_by_coordinate, 1, width))\n return_set['east'] = GeometrySet(mpy.geo.point,\n nodes=mesh.get_nodes_by_function(check_node_by_coordinate, 0, length))\n return_set['south'] = GeometrySet(mpy.geo.point,\n nodes=mesh.get_nodes_by_function(check_node_by_coordinate, 1, 0))\n return_set['west'] = GeometrySet(mpy.geo.point,\n nodes=mesh.get_nodes_by_function(check_node_by_coordinate, 0, 0))\n return_set['all'] = GeometrySet(mpy.geo.line,\n nodes=mesh.get_global_nodes())\n return return_set\n" ]
[ [ "numpy.dot", "numpy.linalg.solve", "numpy.abs", "numpy.cos", "numpy.linalg.norm", "numpy.sin", "numpy.linalg.det", "numpy.max", "numpy.append", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GeunYoung2/learn
[ "7e4c0e0ac0825d81cf8a2a5ce9aa00cd2e02fdd4" ]
[ "Data_Analysis.py" ]
[ "## 영상 처리 및 데이터 분석 툴\r\nfrom tkinter import *;\r\nimport os.path;\r\nimport math\r\nfrom tkinter.filedialog import *\r\nfrom tkinter.simpledialog import *\r\nimport struct;\r\nimport threading\r\nimport matplotlib.pyplot as plt\r\nimport xlwt\r\nimport xlsxwriter\r\nimport pymysql\r\nimport csv\r\nimport sqlite3\r\nimport xlrd\r\nimport xlwt\r\n\r\n## 함수 선언부\r\n###################Menu 1 (파일메뉴)############################\r\ndef loadImage(fname):\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n fsize = os.path.getsize(fname)\r\n inH = inW = int(math.sqrt(fsize))\r\n inImage = []\r\n tmpList = []\r\n for i in range(inH):\r\n tmpList = []\r\n for k in range(inW):\r\n tmpList.append(0)\r\n inImage.append(tmpList)\r\n\r\n fp = open(fname, 'rb')\r\n for i in range(inH):\r\n for k in range(inW):\r\n inImage[i][k] = int(ord(fp.read(1)))\r\n fp.close()\r\n\r\n\r\ndef openFile():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n filename = askopenfilename(parent=window,\r\n filetypes=((\"RAW파일\", \"*.raw\"), (\"모든파일\", \"*.*\")))\r\n loadImage(filename)\r\n equal()\r\n\r\n\r\ndef saveFile():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n saveFp = asksaveasfile(parent=window, mode='wb',\r\n defaultextension=\"*.raw\", filetypes=((\"RAW파일\", \"*.raw\"), (\"모든파일\", \"*.*\")))\r\n for i in range(outW):\r\n for k in range(outH):\r\n saveFp.write(struct.pack('B', outImage[i][k]))\r\n\r\n saveFp.close()\r\n\r\n\r\ndef exitFile():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n pass\r\n\r\n\r\ndef display():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n\r\n if canvas != None:\r\n canvas.destroy()\r\n\r\n VIEW_X, VIEW_Y = 256, 256\r\n if VIEW_X >= outW or VIEW_Y >= outH:\r\n VIEW_X = outW\r\n VIEW_Y = outH\r\n step = 1\r\n else:\r\n step = int(outW / VIEW_X)\r\n\r\n window.geometry(str(VIEW_X * 2) + 'x' + str(VIEW_Y * 2))\r\n canvas = Canvas(window, width=VIEW_X, height=VIEW_Y)\r\n paper = PhotoImage(width=VIEW_X, height=VIEW_Y)\r\n canvas.create_image((VIEW_X / 2, VIEW_X / 2), image=paper, state='normal')\r\n\r\n def putPixel():\r\n for i in range(0, outH, step):\r\n for k in range(0, outW, step):\r\n data = outImage[i][k]\r\n paper.put('#%02x%02x%02x' % (data, data, data),\r\n (int(k / step), int(i / step)))\r\n\r\n threading.Thread(target=putPixel).start()\r\n canvas.pack(expand=1, anchor=CENTER)\r\n status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH))\r\n\r\n#################################################################################\r\n\r\n\r\n#####################Menu2(화소점처리)############################################################\r\ndef equal(): # 동일 영상 알고리즘\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n outW = inW;\r\n outH = inH;\r\n outImage = [];\r\n tmpList = []\r\n for i in range(outH):\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n\r\n for i in range(inH):\r\n for k in range(inW):\r\n outImage[i][k] = inImage[i][k]\r\n\r\n display()\r\n\r\n\r\ndef addImage(): # 밝게하기\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n outW = inW;\r\n outH = inH;\r\n outImage = [];\r\n tmpList = []\r\n for i in range(outH):\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)\r\n for i in range(inH):\r\n for k in range(inW):\r\n if inImage[i][k] + value > 255:\r\n outImage[i][k] = 255\r\n else:\r\n outImage[i][k] = inImage[i][k] + value\r\n display()\r\n\r\ndef multiplyImage() : # 영상 곱하기 알고리즘\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n outW = inW; outH = inH;\r\n for i in range(outH) :\r\n tmpList=[]\r\n for k in range(outW) :\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n value = askinteger('밝게 곱하기', '밝게할 값-->', minvalue=1, maxvalue=255)\r\n for i in range(inH) :\r\n for k in range(inW):\r\n if inImage [i][k] * value > 255 :\r\n outImage[i][k] = 255\r\n elif inImage[i][k] * value < 0 :\r\n outImage[i][k] = 0\r\n else :\r\n outImage[i][k] = inImage[i][k] * value\r\n display()\r\n\r\ndef decreaseImage() :\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n outW = inW; outH = inH;\r\n for i in range(outH) :\r\n tmpList=[]\r\n for k in range(outW) :\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n\r\n value = askinteger('어둡게빼기', '어둡게할 값-->', minvalue=1, maxvalue=255)\r\n for i in range(inH) :\r\n for k in range(inW):\r\n if inImage[i][k] - value < 0 :\r\n outImage[i][k] = 0\r\n else :\r\n outImage[i][k] = inImage[i][k] - value\r\n\r\ndef divisionImage() : # 영상 나누기 알고리즘\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n outW = inW; outH = inH;\r\n for i in range(outH) :\r\n tmpList=[]\r\n for k in range(outW) :\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n\r\n value = askinteger('어둡게나누기', '어둡게할 값-->', minvalue=1, maxvalue=255)\r\n for i in range(inH) :\r\n for k in range(inW):\r\n\r\n if inImage[i][k] // value > 255:\r\n outImage[i][k] = 255\r\n elif inImage[i][k] // value < 0:\r\n outImage[i][k] = 0\r\n else:\r\n outImage[i][k] = inImage[i][k] // value\r\n\r\n display()\r\n\r\ndef morphing(): # 합성하기\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n\r\n outW = inW\r\n outH = inH\r\n\r\n filename2 = askopenfilename(parent=window,\r\n filetypes=((\"RAW파일\", \"*.raw\"), (\"모든파일\", \"*.*\")))\r\n if filename2 == '' or filename2 == None:\r\n return\r\n inImage2 = []\r\n fsize2 = os.path.getsize(filename2)\r\n inH2 = inW2 = int(math.sqrt(fsize2))\r\n if inH2 != inH:\r\n return\r\n fp2 = open(filename2, 'rb')\r\n for i in range(inH2):\r\n tmpList = []\r\n for k in range(inW2):\r\n data = int(ord(fp2.read(1)))\r\n tmpList.append(data)\r\n inImage2.append(tmpList)\r\n fp2.close()\r\n\r\n outImage = [];\r\n tmpList = []\r\n for i in range(outH):\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n\r\n value = askinteger('합성비율', '두번째 영상의 가중치%-->', minvalue=1, maxvalue=99)\r\n w1 = 1 - (value / 100)\r\n w2 = 1 - w1\r\n for i in range(inH):\r\n for k in range(inW):\r\n data = int(inImage[i][k] * w1 + inImage2[i][k] * w2)\r\n if data > 255:\r\n data = 255\r\n elif data < 0:\r\n data = 0\r\n outImage[i][k] = data\r\n\r\n display()\r\n##################################################################\r\n\r\n#####################Menu2(화소영역처리 )############################################################\r\ndef embossing(): # 화소영역 - 엠보싱 알고리즘\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n\r\n outW = inW;outH = inH;outImage = []; tmpList = []\r\n for i in range(outH):\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n MSIZE = 3\r\n mask = [[-1, 0, 0], [0, 0, 0], [0, 0, 1]]\r\n tmpInImage = []\r\n for i in range(inH + 2):\r\n tmpList = []\r\n for k in range(inW + 2):\r\n tmpList.append(128)\r\n tmpInImage.append(tmpList)\r\n tmpOutImage = []\r\n for i in range(outH):\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n tmpOutImage.append(tmpList)\r\n\r\n for i in range(inH):\r\n for k in range(inW):\r\n tmpInImage[i + 1][k + 1] = inImage[i][k]\r\n\r\n for i in range(1, inH):\r\n for k in range(1, inW):\r\n\r\n S = 0.0\r\n for m in range(0, MSIZE):\r\n for n in range(0, MSIZE):\r\n S += mask[m][n] * tmpInImage[i + (m - 1)][k + (n - 1)]\r\n tmpOutImage[i - 1][k - 1] = S\r\n\r\n for i in range(outW):\r\n for k in range(outH):\r\n tmpOutImage[i][k] += 127\r\n\r\n for i in range(outW):\r\n for k in range(outH):\r\n value = int(tmpOutImage[i][k])\r\n if value > 255:\r\n value = 255\r\n elif value < 0:\r\n value = 0\r\n outImage[i][k] = value\r\n\r\n display()\r\n\r\ndef blurr() :\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n\r\n outW = inW;outH = inH;outImage = [];tmpList = []\r\n for i in range(outH):\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n\r\n mSize = 3\r\n mask = [[1/9, 1/9, 1/9], [1/9, 1/9, 1/9], [1/9, 1/9, 1/9]]\r\n\r\n tmpInImage = []\r\n for i in range(0, inW + 2):\r\n tmpList = []\r\n for k in range(0, inH + 2):\r\n tmpList.append(127)\r\n tmpInImage.append(tmpList)\r\n\r\n tmpOutImage = []\r\n for i in range(0, outW):\r\n tmpList = []\r\n for k in range(0, outH):\r\n tmpList.append(0)\r\n tmpOutImage.append(tmpList)\r\n\r\n for i in range(0, inW):\r\n for k in range(0, inH):\r\n tmpInImage[i + 1][k + 1] = inImage[i][k]\r\n\r\n for i in range(1, inW):\r\n for k in range(1, inH):\r\n\r\n s = 0.0\r\n for m in range(0, mSize):\r\n for n in range(0, mSize):\r\n s += mask[m][n] * tmpInImage[i + m][k + n]\r\n tmpOutImage[i - 1][k - 1] = s\r\n\r\n\r\n for i in range(0, outW):\r\n for k in range(0, outH):\r\n if tmpOutImage[i][k] < 0:\r\n outImage[i][k] = 0\r\n elif tmpOutImage[i][k] > 255:\r\n outImage[i][k] = 255\r\n else:\r\n outImage[i][k] = int(tmpOutImage[i][k])\r\n\r\n display()\r\n\r\n\r\n\r\ndef sharp() :\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n\r\n outW = inW;outH = inH;outImage = [];tmpList = []\r\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n\r\n mSize = 3\r\n mask = [[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]\r\n mask = [[0, -1, 0], [-1, 5, -1], [0, -1, 0]]\r\n\r\n tmpInImage = []\r\n for i in range(0, inW + 2):\r\n tmpList = []\r\n for k in range(0, inH + 2):\r\n tmpList.append(127)\r\n tmpInImage.append(tmpList)\r\n\r\n tmpOutImage = []\r\n for i in range(0, outW):\r\n tmpList = []\r\n for k in range(0, outH):\r\n tmpList.append(0)\r\n tmpOutImage.append(tmpList)\r\n\r\n for i in range(0, inW):\r\n for k in range(0, inH):\r\n tmpInImage[i + 1][k + 1] = inImage[i][k]\r\n\r\n for i in range(1, inW):\r\n for k in range(1, inH):\r\n s = 0.0\r\n for m in range(0, mSize):\r\n for n in range(0, mSize):\r\n s += mask[m][n] * tmpInImage[i + m][k + n]\r\n tmpOutImage[i - 1][k - 1] = s\r\n\r\n for i in range(0, outW):\r\n for k in range(0, outH):\r\n if tmpOutImage[i][k] < 0:\r\n outImage[i][k] = 0\r\n elif tmpOutImage[i][k] > 255:\r\n outImage[i][k] = 255\r\n else:\r\n outImage[i][k] = int(tmpOutImage[i][k])\r\n\r\n display()\r\n\r\n\r\ndef edge1() :\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n\r\n # 중요! 출력메모리의 크기를 결정\r\n outW = inW;\r\n outH = inH;\r\n outImage = [];\r\n tmpList = []\r\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n #############################\r\n # 진짜 영상처리 알고리즘을 구현\r\n ############################\r\n mSize = 3\r\n mask = [[-1/9, -1/9, -1/9], [-1/9, 8/9, -1/9], [-1/9, -1/9, -1/9]]\r\n #####################\r\n # 임시 입력 영상 + 2\r\n tmpInImage = []\r\n for i in range(0, inW + 2):\r\n tmpList = []\r\n for k in range(0, inH + 2):\r\n tmpList.append(127)\r\n tmpInImage.append(tmpList)\r\n # 임시 출력 영상\r\n tmpOutImage = []\r\n for i in range(0, outW):\r\n tmpList = []\r\n for k in range(0, outH):\r\n tmpList.append(0)\r\n tmpOutImage.append(tmpList)\r\n # 입력 ==> 임시 입력\r\n for i in range(0, inW):\r\n for k in range(0, inH):\r\n tmpInImage[i + 1][k + 1] = inImage[i][k]\r\n # 회선 연산.\r\n for i in range(1, inW):\r\n for k in range(1, inH):\r\n # 1점에 대해서 3x3마스크 연산 --> 모두 곱해서 더하기.\r\n s = 0.0\r\n for m in range(0, mSize):\r\n for n in range(0, mSize):\r\n s += mask[m][n] * tmpInImage[i + m][k + n]\r\n tmpOutImage[i - 1][k - 1] = s\r\n\r\n # 결과값 처리 (0<, 255>, mask합계가 0이면 어두워)\r\n # for i in range(0, outW):\r\n # for k in range(0, outH):\r\n # tmpOutImage[i][k] += 127.0\r\n\r\n # 임시 출력 --> 출력\r\n for i in range(0, outW):\r\n for k in range(0, outH):\r\n if tmpOutImage[i][k] < 0:\r\n outImage[i][k] = 0\r\n elif tmpOutImage[i][k] > 255:\r\n outImage[i][k] = 255\r\n else:\r\n outImage[i][k] = int(tmpOutImage[i][k])\r\n\r\n display()\r\n\r\n\r\ndef edge2() :\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n\r\n outW = inW;outH = inH;outImage = [];\r\n tmpList = []\r\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n\r\n mSize = 3\r\n mask = [[0, 0, 0], [-1, 1, 0], [0, 0, 0]]\r\n\r\n tmpInImage = []\r\n for i in range(0, inW + 2):\r\n tmpList = []\r\n for k in range(0, inH + 2):\r\n tmpList.append(127)\r\n tmpInImage.append(tmpList)\r\n # 임시 출력 영상\r\n tmpOutImage = []\r\n for i in range(0, outW):\r\n tmpList = []\r\n for k in range(0, outH):\r\n tmpList.append(0)\r\n tmpOutImage.append(tmpList)\r\n # 입력 ==> 임시 입력\r\n for i in range(0, inW):\r\n for k in range(0, inH):\r\n tmpInImage[i + 1][k + 1] = inImage[i][k]\r\n # 회선 연산.\r\n for i in range(1, inW):\r\n for k in range(1, inH):\r\n\r\n s = 0.0\r\n for m in range(0, mSize):\r\n for n in range(0, mSize):\r\n s += mask[m][n] * tmpInImage[i + m][k + n]\r\n tmpOutImage[i - 1][k - 1] = s\r\n\r\n for i in range(0, outW):\r\n for k in range(0, outH):\r\n if tmpOutImage[i][k] < 0:\r\n outImage[i][k] = 0\r\n elif tmpOutImage[i][k] > 255:\r\n outImage[i][k] = 255\r\n else:\r\n outImage[i][k] = int(tmpOutImage[i][k])\r\n\r\n display()\r\n\r\n\r\ndef edge3() :\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n\r\n # 중요! 출력메모리의 크기를 결정\r\n outW = inW; outH = inH;outImage = [];tmpList = []\r\n for i in range(outH):\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n\r\n mSize = 3\r\n mask = [[0, -1, 0], [0, 1, 0], [0, 0, 0]]\r\n\r\n tmpInImage = []\r\n for i in range(0, inW + 2):\r\n tmpList = []\r\n for k in range(0, inH + 2):\r\n tmpList.append(127)\r\n tmpInImage.append(tmpList)\r\n\r\n tmpOutImage = []\r\n for i in range(0, outW):\r\n tmpList = []\r\n for k in range(0, outH):\r\n tmpList.append(0)\r\n tmpOutImage.append(tmpList)\r\n\r\n for i in range(0, inW):\r\n for k in range(0, inH):\r\n tmpInImage[i + 1][k + 1] = inImage[i][k]\r\n\r\n for i in range(1, inW):\r\n for k in range(1, inH):\r\n\r\n s = 0.0\r\n for m in range(0, mSize):\r\n for n in range(0, mSize):\r\n s += mask[m][n] * tmpInImage[i + m][k + n]\r\n tmpOutImage[i - 1][k - 1] = s\r\n\r\n\r\n for i in range(0, outW):\r\n for k in range(0, outH):\r\n if tmpOutImage[i][k] < 0:\r\n outImage[i][k] = 0\r\n elif tmpOutImage[i][k] > 255:\r\n outImage[i][k] = 255\r\n else:\r\n outImage[i][k] = int(tmpOutImage[i][k])\r\n\r\n display()\r\n######################Menu3 기하학처리##############################\r\n\r\ndef upDown(): # 상하 반전 알고리즘\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n outW = inW;outH = inH;outImage = [];tmpList = []\r\n for i in range(outH):\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n\r\n for i in range(inH):\r\n for k in range(inW):\r\n outImage[outW - 1 - i][k] = inImage[i][k]\r\n\r\n display()\r\n\r\ndef leftRight() : #좌우 반전 알고리즘\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n outW = inW; outH = inH;\r\n for i in range(outH):\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n for i in range(inH) :\r\n for k in range(inW) :\r\n outImage[i][outW-1-k] = inImage[i][k]\r\n display()\r\n\r\ndef panImage():\r\n global panYN\r\n panYN = True\r\n\r\n\r\ndef mouseClick(event):\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n global sx, sy, ex, ey, panYN\r\n if not panYN:\r\n return\r\n sx = event.x;\r\n sy = event.y;\r\n\r\n\r\ndef mouseDrop(event):\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n global sx, sy, ex, ey, panYN\r\n if not panYN:\r\n return\r\n ex = event.x;\r\n ey = event.y;\r\n my = sx - ex;\r\n mx = sy - ey\r\n\r\n outW = inW;\r\n outH = inH;\r\n outImage = [];\r\n tmpList = []\r\n for i in range(outH):\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n for i in range(inH):\r\n for k in range(inW):\r\n if 0 <= i - mx < outH and 0 <= k - my < outW:\r\n outImage[i - mx][k - my] = inImage[i][k]\r\n panYN = False\r\n display()\r\n\r\n\r\ndef zoomOut(): # 축소하기 알고리즘\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n # 중요! 출력메모리의 크기를 결정\r\n scale = askinteger('축소하기', '축소할 배수-->', minvalue=2, maxvalue=32)\r\n outW = int(inW / scale);outH = int(inH / scale); outImage = [];tmpList = []\r\n for i in range(outH):\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n\r\n for i in range(inH):\r\n for k in range(inW):\r\n outImage[int(i / scale)][int(k / scale)] = inImage[i][k]\r\n display()\r\n\r\ndef zoomIn() : # 확대하기 알고리즘\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n scale = askinteger('확대하기', '확대할 배수-->', minvalue=2, maxvalue=32)\r\n outW = int(inW*scale); outH = int(inH*scale);\r\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n\r\n for i in range(inH) :\r\n for k in range(inW) :\r\n outImage[int(i*scale)][int(k*scale)] = inImage[i][k]\r\n\r\n display()\r\n\r\ndef rotate1():\r\n global inImage, outImage, inH, inW, outH, outW, window, canvas, paper, filename\r\n degree = askinteger('각도', '값 입력', minvalue=0, maxvalue=360)\r\n # 출력 파일의 크기 결정.\r\n outW = inW;\r\n outH = inH\r\n # 출력 영상 메모리 확보\r\n outImage = []\r\n for i in range(0, inW):\r\n tmpList = []\r\n for k in range(0, inH):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n ### 진짜 영상 처리 알고리즘 ###\r\n radian = degree * 3.141592 / 180.0\r\n for i in range(0, inW):\r\n for k in range(0, inH):\r\n xs = i;\r\n ys = k\r\n xd = int(math.cos(radian) * xs - math.sin(radian) * ys)\r\n yd = int(math.sin(radian) * xs + math.cos(radian) * ys)\r\n if 0 <= xd < outW and 0 <= yd < outH:\r\n outImage[xd][yd] = inImage[xs][ys]\r\n ###############################\r\n display()\r\n\r\n\r\ndef rotate2():\r\n global inImage, outImage, inH, inW, outH, outW, window, canvas, paper, filename\r\n degree = askinteger('각도', '값 입력', minvalue=0, maxvalue=360)\r\n # 출력 파일의 크기 결정.\r\n outW = inW;\r\n outH = inH\r\n # 출력 영상 메모리 확보\r\n outImage = []\r\n for i in range(0, inW):\r\n tmpList = []\r\n for k in range(0, inH):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n ### 진짜 영상 처리 알고리즘 ###\r\n radian = degree * 3.141592 / 180.0\r\n cx = int(inW / 2);\r\n cy = int(inH / 2)\r\n for i in range(0, outW):\r\n for k in range(0, outH):\r\n xs = i;\r\n ys = k\r\n xd = int(math.cos(radian) * (xs - cx)\r\n - math.sin(radian) * (ys - cy)) + cx\r\n yd = int(math.sin(radian) * (xs - cx)\r\n + math.cos(radian) * (ys - cy)) + cy\r\n if 0 <= xd < outW and 0 <= yd < outH:\r\n outImage[xs][ys] = inImage[xd][yd]\r\n else:\r\n outImage[xs][ys] = 255\r\n display()\r\n\r\n\r\ndef rotate3():\r\n global inImage, outImage, inH, inW, outH, outW, window, canvas, paper, filename\r\n degree = askinteger('각도', '값 입력', minvalue=0, maxvalue=360)\r\n\r\n radian90 = (90 - degree) * 3.141592 / 180.0\r\n radian = degree * 3.141592 / 180.0\r\n\r\n outW = int(inH * math.cos(radian90) + inW * math.cos(radian))\r\n outH = int(inH * math.cos(radian) + inW * math.cos(radian90))\r\n\r\n outImage = []\r\n for i in range(0, outW):\r\n tmpList = []\r\n for k in range(0, outH):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n\r\n inImage2 = []\r\n for i in range(0, outW):\r\n tmpList = []\r\n for k in range(0, outH):\r\n tmpList.append(255)\r\n inImage2.append(tmpList)\r\n\r\n # inImage --> inImage2의 중앙으로\r\n gap = int((outW - inW) / 2)\r\n for i in range(0, inW):\r\n for k in range(0, inH):\r\n inImage2[i + gap][k + gap] = inImage[i][k]\r\n\r\n\r\n cx = int(outW / 2);\r\n cy = int(outH / 2)\r\n\r\n for i in range(0, outW):\r\n for k in range(0, outH):\r\n xs = i;\r\n ys = k\r\n xd = int(math.cos(radian) * (xs - cx)\r\n - math.sin(radian) * (ys - cy)) + cx\r\n yd = int(math.sin(radian) * (xs - cx)\r\n + math.cos(radian) * (ys - cy)) + cy\r\n\r\n if 0 <= xd < outW and 0 <= yd < outH:\r\n outImage[xs][ys] = inImage2[xd][yd]\r\n else:\r\n outImage[xs][ys] = 255\r\n ###############################\r\n display()\r\n\r\n\r\n\r\n########################################\r\n\r\n\r\n############MEnu(데이터분석)###########\r\ndef a_average(): # 입출력 영상의 평균값\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n rawSum = 0\r\n for i in range(inH):\r\n for k in range(inW):\r\n rawSum += inImage[i][k]\r\n inRawAvg = int(rawSum / (inH * inW))\r\n rawSum = 0\r\n for i in range(outH):\r\n for k in range(outW):\r\n rawSum += outImage[i][k]\r\n outRawAvg = int(rawSum / (outH * outW))\r\n subWindow = Toplevel(window)\r\n subWindow.geometry('200x100')\r\n label1 = Label(subWindow, text='입력영상 평균값 -->' + str(inRawAvg));\r\n label1.pack()\r\n label2 = Label(subWindow, text='출력영상 평균값 -->' + str(outRawAvg));\r\n label2.pack()\r\n subWindow.mainloop()\r\n\r\n\r\ndef a_histogram(): # 히스토 그램\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n countList = [0] * 256;\r\n normalList = [0] * 256\r\n\r\n for i in range(outH):\r\n for k in range(outW):\r\n value = outImage[i][k]\r\n countList[value] += 1\r\n\r\n # 정규화된값 = (카운트값 - 최소값) * High / (최대값 - 최소값)\r\n maxVal = max(countList);\r\n minVal = min(countList)\r\n for i in range(len(countList)):\r\n normalList[i] = (countList[i] - minVal) * 256 / (maxVal - minVal)\r\n\r\n # 화면 출력\r\n subWindow = Toplevel(window)\r\n subWindow.geometry('256x256')\r\n subCanvas = Canvas(subWindow, width=256, height=256)\r\n subPaper = PhotoImage(width=256, height=256)\r\n subCanvas.create_image((256 / 2, 256 / 2), image=subPaper, state='normal')\r\n\r\n for i in range(0, 256):\r\n for k in range(0, int(normalList[i])):\r\n data = 0\r\n subPaper.put('#%02x%02x%02x' % (data, data, data), (i, 255 - k))\r\n\r\n subCanvas.pack(expand=1, anchor=CENTER)\r\n subWindow.mainloop()\r\n\r\n\r\ndef a_histogram2(): # 히스토 그램\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n countList = [0] * 256\r\n\r\n for i in range(outH):\r\n for k in range(outW):\r\n value = outImage[i][k]\r\n countList[value] += 1\r\n\r\n plt.plot(countList)\r\n plt.show()\r\n\r\n\r\ndef a_histoStretch(): # 히스토그램 스트래칭 알고리즘\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n # 중요! 출력메모리의 크기를 결정\r\n outW = inW;\r\n outH = inH;\r\n outImage = [];\r\n tmpList = []\r\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n\r\n maxVal, minVal, HIGH = 0, 255, 255\r\n for i in range(inH):\r\n for k in range(inW):\r\n data = inImage[i][k]\r\n if data > maxVal:\r\n maxVal = data\r\n if data < minVal:\r\n minVal = data\r\n # 히스토그램 스트래칭\r\n # OUT = (IN - 최소값) * HIGH / (최대값 - 최소값)\r\n for i in range(inH):\r\n for k in range(inW):\r\n value = int((inImage[i][k] - minVal) * HIGH / (maxVal - minVal))\r\n if value < 0:\r\n value = 0\r\n elif value > 255:\r\n value = 255\r\n outImage[i][k] = value\r\n\r\n display()\r\n\r\n\r\ndef a_endInSearch(): # 엔드-인 탐색 알고리즘\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n # 중요! 출력메모리의 크기를 결정\r\n outW = inW;\r\n outH = inH;\r\n outImage = [];\r\n tmpList = []\r\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n\r\n maxVal, minVal, HIGH = 0, 255, 255\r\n for i in range(inH):\r\n for k in range(inW):\r\n data = inImage[i][k]\r\n if data > maxVal:\r\n maxVal = data\r\n if data < minVal:\r\n minVal = data\r\n limit = askinteger('엔드인', '상하 범위:', minvalue=1, maxvalue=127)\r\n maxVal -= limit\r\n minVal += limit\r\n # 히스토그램 스트래칭\r\n # OUT = (IN - 최소값) * HIGH / (최대값 - 최소값)\r\n for i in range(inH):\r\n for k in range(inW):\r\n value = int((inImage[i][k] - minVal) * HIGH / (maxVal - minVal))\r\n if value < 0:\r\n value = 0\r\n elif value > 255:\r\n value = 255\r\n outImage[i][k] = value\r\n\r\n display()\r\n\r\n\r\ndef a_histoEqual(): # 히스토그램 평활화 알고리즘\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n # 중요! 출력메모리의 크기를 결정\r\n outW = inW;\r\n outH = inH;\r\n outImage = [];\r\n tmpList = []\r\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\r\n tmpList = []\r\n for k in range(outW):\r\n tmpList.append(0)\r\n outImage.append(tmpList)\r\n\r\n histo = [0] * 255;\r\n sumHisto = [0] * 255;\r\n normalHisto = [0] * 255\r\n HIGH = 255\r\n # 히스토그램 작성\r\n for i in range(inH):\r\n for k in range(inW):\r\n value = inImage[i][k]\r\n histo[value] += 1\r\n # 누적 히스토그램 작성\r\n sVal = 0\r\n for i in range(len(histo)):\r\n sVal += histo[i]\r\n sumHisto[i] = sVal\r\n # 정규화된 누적 히스토그램 : (누적합 / (행개수*열개수)) * HIGH\r\n for i in range(len(sumHisto)):\r\n normalHisto[i] = int(sumHisto[i] / (outW * outH) * HIGH)\r\n\r\n # 정규화된 값으로 출력하기\r\n for i in range(inH):\r\n for k in range(inW):\r\n index = inImage[i][k]\r\n outImage[i][k] = normalHisto[index]\r\n display()\r\n\r\n\r\n########################################\r\n##########menu(4)\r\ndef drawSheet(cList) :\r\n global cellList, input_file\r\n print(cellList)\r\n if cellList == None or cellList == [] :\r\n pass\r\n else :\r\n for row in cellList:\r\n for col in row:\r\n col.destroy()\r\n\r\n rowNum = len(cList)\r\n colNum = len(cList[0])\r\n cellList = []\r\n\r\n for i in range(0, rowNum):\r\n tmpList = []\r\n for k in range(0, colNum):\r\n ent = Entry(window, text='')\r\n tmpList.append(ent)\r\n ent.grid(row=i, column=k)\r\n cellList.append(tmpList)\r\n\r\n for i in range(0, rowNum):\r\n for k in range(0, colNum):\r\n cellList[i][k].insert(0, cList[i][k])\r\n\r\ndef saveCSV():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n output_file = asksaveasfile(parent=window, mode='w',\r\n defaultextension=\"*.csv\", filetypes=((\"CSV파일\", \"*.csv\"), (\"모든파일\", \"*.*\")))\r\n output_file = output_file.name\r\n\r\n header = ['Column', 'Row', 'Value']\r\n with open(output_file, 'w', newline='') as filewriter:\r\n csvWriter = csv.writer(filewriter)\r\n csvWriter.writerow(header)\r\n for row in range(outW):\r\n for col in range(outH):\r\n data = outImage[row][col]\r\n row_list = [row, col, data]\r\n csvWriter.writerow(row_list)\r\n\r\n print('OK!')\r\n\r\n\r\ndef saveShuffleCSV():\r\n pass\r\n\r\n\r\ndef loadCSV(fname):\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n fsize = -1\r\n fp = open(fname, 'r')\r\n for f in fp:\r\n fsize += 1\r\n fp.close()\r\n inH = inW = int(math.sqrt(fsize))\r\n inImage = [];\r\n tmpList = []\r\n for i in range(inH):\r\n tmpList = []\r\n for k in range(inW):\r\n tmpList.append(0)\r\n inImage.append(tmpList)\r\n\r\n fp = open(fname, 'r')\r\n csvFP = csv.reader(fp)\r\n next(csvFP)\r\n for row_list in csvFP:\r\n row = int(row_list[0])\r\n col = int(row_list[1])\r\n value = int(row_list[2])\r\n inImage[row][col] = value\r\n fp.close()\r\n\r\n\r\ndef openCSV():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n filename = askopenfilename(parent=window,\r\n filetypes=((\"CSV파일\", \"*.csv\"), (\"모든파일\", \"*.*\")))\r\n loadCSV(filename)\r\n equal()\r\n\r\n\r\n\r\ndef saveSQLite():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n global csvList, input_file\r\n con = sqlite3.connect('imageDB')\r\n cur = con.cursor()\r\n colList = []\r\n fname = os.path.basename(filename).split(\".\")[0]\r\n try:\r\n sql = \"CREATE TABLE imageTable( filename CHAR(20), resolution smallint\" + \\\r\n \", row smallint, col smallint, value smallint)\"\r\n cur.execute(sql)\r\n except:\r\n pass\r\n\r\n for i in range(inW):\r\n for k in range(inH):\r\n sql = \"INSERT INTO imageTable VALUES('\" + fname + \"',\" + str(inW) + \\\r\n \",\" + str(i) + \",\" + str(k) + \",\" + str(inImage[i][k]) + \")\"\r\n cur.execute(sql)\r\n\r\n con.commit()\r\n\r\n cur.close()\r\n con.close()\r\n print('Ok!')\r\n\r\ndef openSQLite():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH,inImage,outImage\r\n global csvList, input_file\r\n con = sqlite3.connect('imageDB')\r\n cur = con.cursor()\r\n try:\r\n sql = \"SELECT DISTINCT filename, resolution FROM imageTable\"\r\n cur.execute(sql)\r\n tableNameList = []\r\n while True:\r\n row = cur.fetchone()\r\n if row == None:\r\n break\r\n tableNameList.append(row[0] + ':' + str(row[1]))\r\n\r\n ######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######\r\n def selectTable():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n selectedIndex = listbox.curselection()[0]\r\n subWindow.destroy()\r\n fname, res = tableNameList[selectedIndex].split(':')\r\n filename = fname\r\n sql = \"SELECT row, col, value FROM imageTable WHERE filename='\" + \\\r\n fname + \"' AND resolution=\" + res\r\n print(sql)\r\n cur.execute(sql)\r\n\r\n inH = inW = int(res)\r\n inImage = []\r\n tmpList = []\r\n for i in range(inH): # 입력메모리 확보(0으로 초기화)\r\n tmpList = []\r\n for k in range(inW):\r\n tmpList.append(0)\r\n inImage.append(tmpList)\r\n\r\n while True:\r\n row_tuple = cur.fetchone()\r\n if row_tuple == None:\r\n break\r\n row, col, value = row_tuple\r\n inImage[row][col] = value\r\n\r\n\r\n cur.close()\r\n con.close()\r\n equal()\r\n print(\"Ok! openSQLite\")\r\n\r\n ################################################################\r\n\r\n subWindow = Toplevel(window)\r\n listbox = Listbox(subWindow)\r\n button = Button(subWindow, text='선택', command=selectTable)\r\n listbox.pack();\r\n button.pack()\r\n for sName in tableNameList:\r\n listbox.insert(END, sName)\r\n subWindow.lift()\r\n\r\n except:\r\n cur.close()\r\n con.close()\r\n print(\"Error! openSQLite\")\r\n\r\n\r\n\r\ndef saveMySQL():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n global csvList, input_file\r\n con = pymysql.connect(host='192.168.174.129', user='root',\r\n password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)\r\n cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)\r\n # 열이름 리스트 만들기\r\n colList = []\r\n fname = os.path.basename(filename).split(\".\")[0]\r\n try:\r\n sql = \"CREATE TABLE imageTable( filename CHAR(20), resolution smallint\" + \\\r\n \", row smallint, col smallint, value smallint)\"\r\n cur.execute(sql)\r\n except:\r\n pass\r\n\r\n try:\r\n sql = \"DELETE FROM imageTable WHERE filename='\" + \\\r\n fname + \"' AND resolution=\" + str(outW)\r\n cur.execute(sql)\r\n con.commit()\r\n except:\r\n pass\r\n\r\n for i in range(inW):\r\n for k in range(inH):\r\n sql = \"INSERT INTO imageTable VALUES('\" + fname + \"',\" + str(outW) + \\\r\n \",\" + str(i) + \",\" + str(k) + \",\" + str(outImage[i][k]) + \")\"\r\n cur.execute(sql)\r\n\r\n con.commit()\r\n\r\n cur.close()\r\n con.close() # 데이터베이스 연결 종료\r\n print('Ok! saveMySQL')\r\n\r\n\r\ndef openMySQL():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n global csvList, input_file\r\n con = pymysql.connect(host='192.168.174.129', user='root',\r\n password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)\r\n cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)\r\n try:\r\n sql = \"SELECT DISTINCT filename, resolution FROM imageTable\"\r\n cur.execute(sql)\r\n tableNameList = [] # ['강아지:128', '강아지:512' ....]\r\n while True:\r\n row = cur.fetchone()\r\n if row == None:\r\n break\r\n tableNameList.append(row[0] + ':' + str(row[1]))\r\n\r\n ######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######\r\n def selectTable():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n selectedIndex = listbox.curselection()[0]\r\n subWindow.destroy()\r\n fname, res = tableNameList[selectedIndex].split(':')\r\n filename = fname\r\n sql = \"SELECT row, col, value FROM imageTable WHERE filename='\" + \\\r\n fname + \"' AND resolution=\" + res\r\n print(sql)\r\n cur.execute(sql)\r\n\r\n inH = inW = int(res)\r\n inImage = [];\r\n tmpList = []\r\n for i in range(inH): # 입력메모리 확보(0으로 초기화)\r\n tmpList = []\r\n for k in range(inW):\r\n tmpList.append(0)\r\n inImage.append(tmpList)\r\n while True:\r\n row_tuple = cur.fetchone()\r\n if row_tuple == None:\r\n break\r\n row, col, value = row_tuple\r\n inImage[row][col] = value\r\n\r\n cur.close()\r\n con.close()\r\n equal()\r\n print(\"Ok! openMySQL\")\r\n\r\n ################################################################\r\n\r\n subWindow = Toplevel(window)\r\n listbox = Listbox(subWindow)\r\n button = Button(subWindow, text='선택', command=selectTable)\r\n listbox.pack();\r\n button.pack()\r\n for sName in tableNameList:\r\n listbox.insert(END, sName)\r\n subWindow.lift()\r\n\r\n\r\n\r\n except:\r\n cur.close()\r\n con.close()\r\n print(\"Error! openMySQL\")\r\n\r\n\r\n\r\n\r\n## Excel\r\ndef saveExcel1():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n output_file = asksaveasfile(parent=window, mode='w',\r\n defaultextension=\"*.xls\", filetypes=((\"XLS파일\", \"*.xls\"), (\"모든파일\", \"*.*\")))\r\n output_file = output_file.name\r\n\r\n sheetName = os.path.basename(output_file).split(\".\")[0]\r\n wb = xlwt.Workbook()\r\n ws = wb.add_sheet(sheetName)\r\n\r\n for rowNum in range(outH):\r\n for colNum in range(outW):\r\n data = outImage[rowNum][colNum]\r\n ws.write(rowNum, colNum, data)\r\n\r\n wb.save(output_file)\r\n print('OK! saveExcel1')\r\n\r\n\r\n\r\ndef saveExcel2():\r\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\r\n output_file = asksaveasfile(parent=window, mode='w',\r\n defaultextension=\"*.xlsx\", filetypes=((\"XLSX파일\", \"*.xls\"), (\"모든파일\", \"*.*\")))\r\n output_file = output_file.name\r\n\r\n sheetName = os.path.basename(output_file).split(\".\")[0]\r\n wb = xlsxwriter.Workbook(output_file)\r\n ws = wb.add_worksheet(sheetName)\r\n\r\n ws.set_column(0, outW, 1.0) # 약 0.34 쯤\r\n for r in range(outH):\r\n ws.set_row(r, 9.5) # 약 0.35 쯤\r\n for rowNum in range(outW):\r\n for colNum in range(outH):\r\n data = outImage[rowNum][colNum]\r\n # data 값으로 셀의 배경색을 조절 #000000~#FFFFFF\r\n if data > 15:\r\n hexStr = '#' + (hex(data)[2:]) * 3\r\n else:\r\n hexStr = '#' + ('0' + hex(data)[2:]) * 3\r\n\r\n # 셀의 포맷을 준비\r\n cell_format = wb.add_format()\r\n cell_format.set_bg_color(hexStr)\r\n\r\n ws.write(rowNum, colNum, '', cell_format)\r\n\r\n wb.close()\r\n print('OK! saveExcel2')\r\n\r\ndef excelData() :\r\n global csvList, input_file\r\n csvList = []\r\n input_file = askopenfilename(parent=window,\r\n filetypes=((\"엑셀파일\", \"*.xls;*.xlsx\"), (\"모든파일\", \"*.*\")))\r\n workbook = xlrd.open_workbook(input_file)\r\n sheetCount = workbook.nsheets # 속성\r\n for worksheet in workbook.sheets():\r\n sheetName = worksheet.name\r\n sRow = worksheet.nrows\r\n sCol = worksheet.ncols\r\n print(sheetName, sRow, sCol)\r\n\r\n\r\ndef exDataone() :\r\n global csvList, input_file\r\n csvList = []\r\n input_file = askopenfilename(parent=window,\r\n filetypes=((\"엑셀파일\", \"*.xls;*.xlsx\"), (\"모든파일\", \"*.*\")))\r\n workbook = xlrd.open_workbook(input_file)\r\n sheetCount = workbook.nsheets # 속성\r\n sheet1 = workbook.sheets()[0]\r\n sheetName = sheet1.name\r\n sRow = sheet1.nrows\r\n sCol = sheet1.ncols\r\n #print(sheetName, sRow, sCol)\r\n # Worksheet --> csvList\r\n for i in range(sRow) :\r\n tmpList = []\r\n for k in range(sCol) :\r\n value = sheet1.cell_value(i,k)\r\n tmpList.append(value)\r\n csvList.append(tmpList)\r\n\r\n drawSheet(csvList)\r\n\r\ndef exDataAll() :\r\n global csvList, input_file\r\n csvList = []\r\n input_file = askopenfilename(parent=window,\r\n filetypes=((\"엑셀파일\", \"*.xls;*.xlsx\"), (\"모든파일\", \"*.*\")))\r\n workbook = xlrd.open_workbook(input_file)\r\n sheetCount = workbook.nsheets # 속성\r\n for worksheet in workbook.sheets():\r\n sRow = worksheet.nrows\r\n sCol = worksheet.ncols\r\n # Worksheet --> csvList\r\n for i in range(sRow) :\r\n tmpList = []\r\n for k in range(sCol) :\r\n value = worksheet.cell_value(i,k)\r\n tmpList.append(value)\r\n csvList.append(tmpList)\r\n\r\n drawSheet(csvList)\r\n\r\ndef exDataSelect() :\r\n global csvList, input_file\r\n csvList = []\r\n input_file = askopenfilename(parent=window,\r\n filetypes=((\"엑셀파일\", \"*.xls;*.xlsx\"), (\"모든파일\", \"*.*\")))\r\n workbook = xlrd.open_workbook(input_file)\r\n sheetNameList = []\r\n for worksheet in workbook.sheets():\r\n sheetNameList.append(worksheet.name)\r\n\r\n ##################################\r\n def selectSheet() :\r\n selectedIndex = listbox.curselection()[0]\r\n subWindow.destroy()\r\n sheet1 = workbook.sheets()[selectedIndex]\r\n sRow = sheet1.nrows\r\n sCol = sheet1.ncols\r\n for i in range(sRow):\r\n tmpList = []\r\n for k in range(sCol):\r\n value = sheet1.cell_value(i, k)\r\n tmpList.append(value)\r\n csvList.append(tmpList)\r\n drawSheet(csvList)\r\n\r\n subWindow = Toplevel(window) # window의 하위로 지정\r\n listbox = Listbox(subWindow)\r\n button = Button(subWindow, text='선택', command=selectSheet)\r\n listbox.pack(); button.pack()\r\n for sName in sheetNameList :\r\n listbox.insert(END, sName)\r\n subWindow.lift()\r\n\r\n\r\n\r\n\r\n\r\n\r\n## 전역 변수부\r\nwindow, canvas, paper, filename = [None] * 4\r\ninImage, outImage = [], [];\r\ninW, inH, outW, outH = [0] * 4\r\npanYN = False;\r\nsx, sy, ex, ey = [0] * 4\r\nVIEW_X, VIEW_Y = 128, 128\r\nstatus = None\r\n\r\n## 메인 코드부\r\nwindow = Tk();\r\nwindow.geometry('400x400');\r\nwindow.title('영상 처리&데이터 분석 Ver 1.0 (Beta 2)')\r\nwindow.bind(\"<Button-1>\", mouseClick)\r\nwindow.bind(\"<ButtonRelease-1>\", mouseDrop)\r\n\r\nstatus = Label(window, text='이미지 정보:', bd=1, relief=SUNKEN, anchor=W)\r\nstatus.pack(side=BOTTOM, fill=X)\r\n\r\nmainMenu = Menu(window);\r\nwindow.config(menu=mainMenu)\r\nfileMenu = Menu(mainMenu);\r\nmainMenu.add_cascade(label='파일', menu=fileMenu)\r\nfileMenu.add_command(label='열기', command=openFile)\r\nfileMenu.add_command(label='저장', command=saveFile)\r\nfileMenu.add_separator()\r\nfileMenu.add_command(label='종료', command=exitFile)\r\n\r\npixelMenu = Menu(mainMenu);\r\nmainMenu.add_cascade(label='화소점처리', menu=pixelMenu)\r\npixelMenu.add_command(label='동일영상', command=equal)\r\npixelMenu.add_command(label='밝게하기', command=addImage)\r\npixelMenu.add_command(label='밝게곱하기',command=multiplyImage)\r\npixelMenu.add_command(label='어둡게빼기',command= decreaseImage)\r\npixelMenu.add_command(label='어둡게나누기',command=multiplyImage)\r\npixelMenu.add_separator()\r\npixelMenu.add_command(label='영상합성', command=morphing)\r\n\r\nareaMenu = Menu(mainMenu);\r\nmainMenu.add_cascade(label='화소영역처리', menu=areaMenu)\r\nareaMenu.add_command(label='엠보싱', command=embossing)\r\nareaMenu.add_command(label='블러링', command=blurr)\r\nareaMenu.add_command(label='샤프닝', command=sharp)\r\nareaMenu.add_command(label='경계선추출(고주파)', command=edge1)\r\nareaMenu.add_command(label='경계선추출(수직에지)', command=edge2)\r\nareaMenu.add_command(label='경계선추출(수평에지)', command=edge3)\r\n\r\ngeoMenu = Menu(mainMenu);\r\nmainMenu.add_cascade(label='기하학 처리', menu=geoMenu)\r\ngeoMenu.add_command(label='상하반전', command=upDown)\r\ngeoMenu.add_command(label='화면이동', command=panImage)\r\ngeoMenu.add_command(label='화면축소', command=zoomOut)\r\ngeoMenu.add_command(label='좌우반전',command=leftRight)\r\ngeoMenu.add_command(label='확대하기',command=zoomIn)\r\ngeoMenu.add_separator()\r\ngeoMenu.add_command(label='영상회전(포워딩)', command=rotate1)\r\ngeoMenu.add_command(label='영상회전(백워딩 및 중앙)', command=rotate2)\r\ngeoMenu.add_command(label='영상회전(확대)', command=rotate3)\r\n\r\n\r\nanalyzeMenu = Menu(mainMenu);\r\nmainMenu.add_cascade(label='데이터분석', menu=analyzeMenu)\r\nanalyzeMenu.add_command(label='평균값', command=a_average)\r\nanalyzeMenu.add_command(label='히스토그램', command=a_histogram)\r\nanalyzeMenu.add_command(label='히스토그램(matplotlib)', command=a_histogram2)\r\nanalyzeMenu.add_separator()\r\nanalyzeMenu.add_command(label='히스토그램 스트래칭', command=a_histoStretch)\r\nanalyzeMenu.add_command(label='엔드-인 탐색', command=a_endInSearch)\r\nanalyzeMenu.add_command(label='히스토그램 평활화', command=a_histoEqual)\r\n\r\nCSVMenu = Menu(mainMenu);\r\nmainMenu.add_cascade(label='CSV 처리', menu=CSVMenu)\r\nCSVMenu.add_command(label='CSV로 내보내기', command=saveCSV)\r\nCSVMenu.add_command(label='CSV(셔플)로 내보내기', command=saveShuffleCSV)\r\nCSVMenu.add_command(label='CSV 불러오기', command=openCSV)\r\n\r\nSQLiteMenu = Menu(mainMenu);\r\nmainMenu.add_cascade(label='SQLite 처리', menu=SQLiteMenu)\r\nSQLiteMenu.add_command(label='SQLite로 내보내기', command=saveSQLite)\r\nSQLiteMenu.add_command(label='SQLite에서 가져오기', command=openSQLite)\r\n\r\n\r\nMySQLMenu = Menu(mainMenu);\r\nmainMenu.add_cascade(label='MYSQL 처리', menu=MySQLMenu)\r\nMySQLMenu.add_command(label='MySQL로 내보내기', command=saveMySQL)\r\nMySQLMenu.add_command(label='MySQL에서 가져오기', command=openMySQL)\r\n\r\nExcelMenu = Menu(mainMenu);\r\nmainMenu.add_cascade(label='Excel 처리', menu=ExcelMenu)\r\nExcelMenu.add_command(label='Excel로 내보내기(숫자)', command=saveExcel1)\r\nExcelMenu.add_command(label='Excel로 내보내기(음영)', command=saveExcel2)\r\nanalyzeMenu.add_separator()\r\nExcelMenu.add_command(label='Excel정보 보기', command=excelData)\r\nExcelMenu.add_command(label='Excel내용 보기 - 1st', command=exDataone )\r\nExcelMenu.add_command(label='Excel내용 보기 - All', command=exDataAll)\r\nExcelMenu.add_command(label='Excel내용 보기 - Select', command=exDataSelect)\r\nwindow.mainloop()" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
devforfu/catalyst
[ "0ec665981135b264120cb5c4c04d56034f1c831c" ]
[ "examples/atari/src/atari_wrappers.py" ]
[ "from collections import deque\nimport numpy as np\nimport gym\nfrom gym import spaces\nimport cv2\n\ncv2.ocl.setUseOpenCL(False)\n\n\nclass TimeLimit(gym.Wrapper):\n def __init__(self, env, max_episode_steps=None):\n super().__init__(env)\n self._max_episode_steps = max_episode_steps\n self._elapsed_steps = 0\n\n def step(self, ac):\n observation, reward, done, info = self.env.step(ac)\n self._elapsed_steps += 1\n if self._elapsed_steps >= self._max_episode_steps:\n done = True\n info[\"TimeLimit.truncated\"] = True\n return observation, reward, done, info\n\n def reset(self, **kwargs):\n self._elapsed_steps = 0\n return self.env.reset(**kwargs)\n\n\nclass TransposeObs(gym.ObservationWrapper):\n def __init__(self, env=None):\n \"\"\"\n Transpose observation space (base class)\n \"\"\"\n super().__init__(env)\n\n\nclass TransposeImage(TransposeObs):\n def __init__(self, env=None, op=[2, 0, 1]):\n \"\"\"\n Transpose observation space for images\n \"\"\"\n super().__init__(env)\n assert len(op) == 3, f\"Error: Operation, {str(op)}, must be dim3\"\n self.op = op\n obs_shape = self.observation_space.shape\n self.observation_space = gym.spaces.Box(\n self.observation_space.low[0, 0, 0],\n self.observation_space.high[0, 0, 0], [\n obs_shape[self.op[0]], obs_shape[self.op[1]],\n obs_shape[self.op[2]]\n ],\n dtype=self.observation_space.dtype\n )\n\n def observation(self, ob):\n return ob.transpose(self.op[0], self.op[1], self.op[2])\n\n\nclass NoopResetEnv(gym.Wrapper):\n def __init__(self, env, noop_max=30):\n \"\"\"\n Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.noop_max = noop_max\n self.override_num_noops = None\n self.noop_action = 0\n assert env.unwrapped.get_action_meanings()[0] == \"NOOP\"\n\n def reset(self, **kwargs):\n \"\"\" Do no-op action for a number of steps in [1, noop_max].\"\"\"\n self.env.reset(**kwargs)\n if self.override_num_noops is not None:\n noops = self.override_num_noops\n else:\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)\n assert noops > 0\n obs = None\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self.noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n\n def step(self, ac):\n return self.env.step(ac)\n\n\nclass FireResetEnv(gym.Wrapper):\n def __init__(self, env):\n \"\"\"\n Take action on reset for environments that are fixed until firing.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n assert env.unwrapped.get_action_meanings()[1] == \"FIRE\"\n assert len(env.unwrapped.get_action_meanings()) >= 3\n\n def reset(self, **kwargs):\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(1)\n if done:\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(2)\n if done:\n self.env.reset(**kwargs)\n return obs\n\n def step(self, ac):\n return self.env.step(ac)\n\n\nclass EpisodicLifeEnv(gym.Wrapper):\n def __init__(self, env):\n \"\"\"\n Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN and co. since it helps value estimation.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n self.was_real_done = done\n # check current lives, make loss of life terminal,\n # then update lives to handle bonus lives\n lives = self.env.unwrapped.ale.lives()\n if lives < self.lives and lives > 0:\n # for Qbert sometimes we stay in lives == 0\n # condition for a few frames\n # so it\"s important to keep lives > 0, so that we only reset once\n # the environment advertises done.\n done = True\n self.lives = lives\n return obs, reward, done, info\n\n def reset(self, **kwargs):\n \"\"\"Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.\n \"\"\"\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs\n\n\nclass MaxAndSkipEnv(gym.Wrapper):\n def __init__(self, env, skip=4):\n \"\"\"Return only every `skip`-th frame\"\"\"\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros(\n (2, ) + env.observation_space.shape, dtype=np.uint8\n )\n self._skip = skip\n\n def step(self, action):\n \"\"\"Repeat action, sum reward, and max over last observations.\"\"\"\n total_reward = 0.0\n done = None\n for i in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n if i == self._skip - 2:\n self._obs_buffer[0] = obs\n if i == self._skip - 1:\n self._obs_buffer[1] = obs\n total_reward += reward\n if done:\n break\n # Note that the observation on the done=True frame\n # doesn\"t matter\n max_frame = self._obs_buffer.max(axis=0)\n\n return max_frame, total_reward, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n\nclass ClipRewardEnv(gym.RewardWrapper):\n def __init__(self, env):\n gym.RewardWrapper.__init__(self, env)\n\n def reward(self, reward):\n \"\"\"Bin reward to {+1, 0, -1} by its sign.\"\"\"\n return np.sign(reward)\n\n\nclass WarpFrame(gym.ObservationWrapper):\n def __init__(self, env, width=84, height=84, grayscale=True):\n \"\"\"Warp frames to 84x84 as done in the Nature paper and later work.\"\"\"\n gym.ObservationWrapper.__init__(self, env)\n self.width = width\n self.height = height\n self.grayscale = grayscale\n if self.grayscale:\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(self.height, self.width, 1),\n dtype=np.uint8\n )\n else:\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(self.height, self.width, 3),\n dtype=np.uint8\n )\n\n def observation(self, frame):\n if self.grayscale:\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n frame = cv2.resize(\n frame, (self.width, self.height), interpolation=cv2.INTER_AREA\n )\n if self.grayscale:\n frame = np.expand_dims(frame, -1)\n return frame\n\n\nclass FrameStack(gym.Wrapper):\n def __init__(self, env, k):\n \"\"\"Stack k last frames.\n\n Returns lazy array, which is much more memory efficient.\n\n See Also\n --------\n baselines.common.atari_wrappers.LazyFrames\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.k = k\n self.frames = deque([], maxlen=k)\n shp = env.observation_space.shape\n self.observation_space = spaces.Box(\n low=0,\n high=255,\n shape=(shp[:-1] + (shp[-1] * k, )),\n dtype=env.observation_space.dtype\n )\n\n def reset(self):\n ob = self.env.reset()\n for _ in range(self.k):\n self.frames.append(ob)\n return self._get_ob()\n\n def step(self, action):\n ob, reward, done, info = self.env.step(action)\n self.frames.append(ob)\n return self._get_ob(), reward, done, info\n\n def _get_ob(self):\n assert len(self.frames) == self.k\n return LazyFrames(list(self.frames))\n\n\nclass ScaledFloatFrame(gym.ObservationWrapper):\n def __init__(self, env):\n gym.ObservationWrapper.__init__(self, env)\n self.observation_space = gym.spaces.Box(\n low=0, high=1, shape=env.observation_space.shape, dtype=np.float32\n )\n\n def observation(self, observation):\n # careful! This undoes the memory optimization, use\n # with smaller replay buffers only.\n return np.array(observation).astype(np.float32) / 255.0\n\n\nclass LazyFrames(object):\n def __init__(self, frames):\n \"\"\"\n This object ensures that common frames\n between the observations are only stored once.\n It exists purely to optimize memory usage\n which can be huge for DQN\"s 1M frames replay buffers.\n\n This object should only be converted to numpy array\n before being passed to the model.\n\n You\"d not believe how complex the previous solution was.\"\"\"\n self._frames = frames\n self._out = None\n\n def _force(self):\n if self._out is None:\n self._out = np.concatenate(self._frames, axis=-1)\n self._frames = None\n return self._out\n\n def __array__(self, dtype=None):\n out = self._force()\n if dtype is not None:\n out = out.astype(dtype)\n return out\n\n def __len__(self):\n return len(self._force())\n\n def __getitem__(self, i):\n return self._force()[..., i]\n\n\ndef make_atari(env_id, max_episode_steps=None):\n env = gym.make(env_id)\n assert \"NoFrameskip\" in env.spec.id\n env = NoopResetEnv(env, noop_max=30)\n # env = MaxAndSkipEnv(env, skip=4)\n if max_episode_steps is not None:\n env = TimeLimit(env, max_episode_steps=max_episode_steps)\n return env\n\n\ndef wrap_deepmind(\n env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False\n):\n \"\"\"Configure environment for DeepMind-style Atari.\n \"\"\"\n if episode_life:\n env = EpisodicLifeEnv(env)\n if \"FIRE\" in env.unwrapped.get_action_meanings():\n env = FireResetEnv(env)\n env = WarpFrame(env)\n if scale:\n env = ScaledFloatFrame(env)\n if clip_rewards:\n env = ClipRewardEnv(env)\n if frame_stack:\n env = FrameStack(env, 4)\n return env\n\n\ndef make_atari_env(\n env_id,\n max_episode_steps=None,\n episode_life=True,\n clip_rewards=False,\n frame_stack=False,\n scale=False\n):\n env = gym.make(env_id)\n assert \"NoFrameskip\" in env.spec.id\n env = NoopResetEnv(env, noop_max=30)\n # env = MaxAndSkipEnv(env, skip=4)\n if max_episode_steps is not None:\n env = TimeLimit(env, max_episode_steps=max_episode_steps)\n if episode_life:\n env = EpisodicLifeEnv(env)\n if \"FIRE\" in env.unwrapped.get_action_meanings():\n env = FireResetEnv(env)\n env = WarpFrame(env)\n if scale:\n env = ScaledFloatFrame(env)\n if clip_rewards:\n env = ClipRewardEnv(env)\n if frame_stack:\n env = FrameStack(env, 4)\n\n env = TransposeImage(env, op=[2, 0, 1])\n return env\n" ]
[ [ "numpy.expand_dims", "numpy.sign", "numpy.concatenate", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jiecaoyu/FBGEMM
[ "2c547924deafa1839483d31096de800078c35711" ]
[ "fbgemm_gpu/bench/bench_utils.py" ]
[ "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport itertools\nimport logging\nimport statistics\nimport time\nfrom typing import Callable, List, Optional, Tuple\n\nimport numpy as np\nimport torch\nfrom fbgemm_gpu.split_table_batched_embeddings_ops import SparseType\nfrom numpy.random import default_rng\nfrom torch import Tensor\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\ndef benchmark_torch_function(\n # pyre-fixme[2]: Parameter must be annotated.\n f,\n # pyre-fixme[2]: Parameter must be annotated.\n args,\n flush_gpu_cache_size_mb: int = 40,\n iters: int = 10,\n num_warmups: int = 2,\n) -> Tuple[float, Tensor]:\n for _ in range(num_warmups):\n output = f(*args)\n\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n start_event = torch.cuda.Event(enable_timing=True)\n end_event = torch.cuda.Event(enable_timing=True)\n # flush the cache\n if flush_gpu_cache_size_mb:\n _ = torch.rand(\n flush_gpu_cache_size_mb * 1024 * 1024 // 4, dtype=torch.float\n )\n torch.cuda.synchronize()\n start_event.record()\n for _ in range(iters):\n output = f(*args)\n end_event.record()\n torch.cuda.synchronize()\n elapsed_time = start_event.elapsed_time(end_event) * 1.0e-3\n else:\n start_time = time.time()\n for _ in range(iters):\n output = f(*args)\n elapsed_time = time.time() - start_time\n\n # pyre-fixme[61]: `output` is undefined, or not always defined.\n return float(elapsed_time) / iters, output\n\n\ndef round_up(a: int, b: int) -> int:\n return int((a + b - 1) // b) * b\n\n\ndef get_device() -> torch.device:\n return (\n torch.cuda.current_device()\n if torch.cuda.is_available()\n else torch.device(\"cpu\")\n )\n\n\n# Merged indices with shape (T, B, L) -> (flattened indices with shape\n# (T * B * L), offsets with shape (T * B + 1))\ndef get_table_batched_offsets_from_dense(\n merged_indices: Tensor,\n) -> Tuple[Tensor, Tensor]:\n (T, B, L) = merged_indices.size()\n lengths = np.ones((T, B)) * L\n flat_lengths = lengths.flatten()\n return (\n merged_indices.long().contiguous().view(-1).to(get_device()),\n torch.tensor(([0] + np.cumsum(flat_lengths).tolist())).long().to(get_device()),\n )\n\n\ndef get_offsets_from_dense(indices: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n (B, L) = indices.size()\n return (\n indices.contiguous().view(-1),\n torch.tensor(\n np.cumsum(np.asarray([0] + [L for _ in range(B)])[:-1]).astype(np.int64)\n ),\n )\n\n\ndef b_indices(\n b: Callable[..., torch.Tensor],\n x: torch.Tensor,\n per_sample_weights: Optional[torch.Tensor] = None,\n use_cpu: bool = False,\n do_pooling: bool = True,\n) -> torch.Tensor:\n (indices, offsets) = get_offsets_from_dense(x)\n if do_pooling:\n return b(\n indices.cuda(),\n offsets.cuda(),\n per_sample_weights=per_sample_weights,\n )\n else:\n return b(indices.cuda())\n\n\ndef generate_requests(\n iters: int,\n B: int,\n T: int,\n L: int,\n E: int,\n # inter-batch indices reuse rate\n reuse: float = 0.0,\n # alpha <= 1.0: use uniform distribution\n # alpha > 1.0: use zipf distribution\n alpha: float = 1.0,\n weights_precision: SparseType = SparseType.FP32,\n weighted: bool = False,\n requests_data_file: Optional[str] = None,\n # Comma-separated list of table numbers\n tables: Optional[str] = None,\n) -> List[Tuple[torch.IntTensor, torch.IntTensor, Optional[Tensor]]]:\n if requests_data_file is not None:\n indices_tensor, offsets_tensor, lengths_tensor = torch.load(requests_data_file)\n\n average_L = 0\n if tables is not None:\n emb_tables = tuple(int(x) for x in tables.split(\",\"))\n indices = torch.zeros(0, dtype=indices_tensor.dtype)\n offsets = torch.zeros(1, dtype=offsets_tensor.dtype)\n total_L = 0\n for t in emb_tables:\n t_offsets = offsets_tensor[B * t : B * (t + 1) + 1]\n total_L += t_offsets[-1] - t_offsets[0]\n indices = torch.cat(\n (indices, indices_tensor[t_offsets[0] : t_offsets[-1]])\n )\n offsets = torch.cat(\n (\n offsets,\n t_offsets[1:] - t_offsets[0] + offsets[-1],\n )\n )\n indices_tensor = indices\n offsets_tensor = offsets\n average_L = int(total_L / B)\n\n assert np.prod(offsets_tensor.size()) - 1 == np.prod((T, B)), (\n f\"Requested tables: {emb_tables} \"\n f\"does not conform to inputs (T, B) = ({T}, {B}).\"\n )\n logging.warning(\n f\"Using (indices = {indices_tensor.size()}, offsets = {offsets_tensor.size()}) based \"\n f\"on tables: {emb_tables}\"\n )\n else:\n average_L = int((offsets_tensor[-1] - offsets_tensor[0]) / B)\n assert (np.prod(offsets_tensor.size()) - 1) == np.prod((T, B)), (\n f\"Data file (indices = {indices_tensor.size()}, \"\n f\"offsets = {offsets_tensor.size()}, lengths = {lengths_tensor.size()}) \"\n f\"does not conform to inputs (T, B) = ({T}, {B}).\"\n )\n\n assert (\n L == average_L\n ), f\"Requested L does not align with provided data file ({L} vs. {average_L})\"\n assert E > max(indices_tensor), (\n f\"Number of embeddings is not enough to support maximum index \"\n f\"provided by data file {E} vs. {max(indices_tensor)}\"\n )\n\n weights_tensor = (\n None\n if not weighted\n else torch.randn(indices_tensor.size(), device=get_device())\n )\n rs = []\n for _ in range(iters):\n rs.append(\n (\n indices_tensor.to(get_device()),\n offsets_tensor.to(get_device()),\n weights_tensor,\n )\n )\n return rs\n\n if alpha <= 1.0:\n all_indices = torch.randint(\n low=0,\n high=E,\n size=(iters, T, B, L),\n device=get_device(),\n dtype=torch.int32,\n )\n # each bag is usually sorted\n (all_indices, _) = torch.sort(all_indices)\n all_indices = all_indices.reshape(iters, T, B * L)\n else:\n assert E >= L, \"num-embeddings must be greater than equal to bag-size\"\n # oversample and then remove duplicates to obtain sampling without\n # replacement\n all_indices = (np.random.zipf(a=alpha, size=(iters, T, B, 3 * L)) - 1) % E\n for index_tuple in itertools.product(range(iters), range(T), range(B)):\n # sample without replacement from\n # https://stats.stackexchange.com/questions/20590/how-do-i-sample-without-replacement-using-a-sampling-with-replacement-function\n r = set()\n for x in all_indices[index_tuple]:\n if x not in r:\n r.add(x)\n if len(r) == L:\n break\n assert (len(r)) == L, \"too skewed distribution (alpha too big)\"\n all_indices[index_tuple][:L] = list(r)\n # shuffle indices so we don't have unintended spatial locality\n all_indices = torch.as_tensor(all_indices[:, :, :, :L])\n rng = default_rng()\n permutation = torch.as_tensor(\n rng.choice(E, size=all_indices.max().item() + 1, replace=False)\n )\n all_indices = permutation.gather(0, all_indices.flatten())\n all_indices = all_indices.to(get_device()).int().reshape(iters, T, B * L)\n for it in range(iters - 1):\n for t in range(T):\n reused_indices = torch.randperm(B * L, device=get_device())[\n : int(B * L * reuse)\n ]\n all_indices[it + 1, t, reused_indices] = all_indices[it, t, reused_indices]\n\n rs = []\n for it in range(iters):\n weights_tensor = (\n None if not weighted else torch.randn(T * B * L, device=get_device())\n )\n rs.append(\n get_table_batched_offsets_from_dense(all_indices[it].view(T, B, L))\n + (weights_tensor,)\n )\n return rs\n\n\ndef benchmark_requests(\n requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[Tensor]]],\n func: Callable[[Tensor, Tensor, Optional[Tensor]], Tensor],\n flush_gpu_cache_size_mb: int = 0,\n check_median: bool = False,\n) -> float:\n times = []\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n start_event = torch.cuda.Event(enable_timing=True)\n end_event = torch.cuda.Event(enable_timing=True)\n for (indices, offsets, weights) in requests:\n start_time = time.time()\n if torch.cuda.is_available():\n if flush_gpu_cache_size_mb:\n _ = torch.rand(\n flush_gpu_cache_size_mb * 1024 * 1024 // 4, dtype=torch.float\n )\n torch.cuda.synchronize()\n start_event.record()\n func(indices, offsets, weights)\n if torch.cuda.is_available():\n end_event.record()\n torch.cuda.synchronize()\n it_time = start_event.elapsed_time(end_event) * 1.0e-3\n times.append(it_time)\n else:\n it_time = time.time() - start_time\n times.append(it_time)\n avg_time = sum(times) / len(requests)\n median_time = statistics.median(times)\n return median_time if check_median else avg_time\n\n\ndef benchmark_requests_refer(\n requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[Tensor]]],\n T: int,\n B: int,\n L: int,\n E: int,\n D: int,\n pooling_mode: str,\n weighted: bool,\n flush_gpu_cache_size_mb: int = 0,\n check_median: bool = False,\n) -> float:\n do_pooling = pooling_mode in [\"sum\", \"mean\"]\n if do_pooling:\n nn_embedding_list = [\n torch.nn.EmbeddingBag(E, D, mode=pooling_mode, sparse=True).cuda()\n ] * T\n else:\n nn_embedding_list = [torch.nn.Embedding(E, D, sparse=True).cuda()] * T\n\n times = []\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n start_event = torch.cuda.Event(enable_timing=True)\n end_event = torch.cuda.Event(enable_timing=True)\n for (indices, _, weights) in requests:\n indices_list = indices.view(T, B, L).split(1)\n\n if weighted:\n assert weights is not None\n weights_list = weights.view(T, B, L).split(1)\n\n start_time = time.time()\n if torch.cuda.is_available():\n if flush_gpu_cache_size_mb:\n _ = torch.rand(\n flush_gpu_cache_size_mb * 1024 * 1024 // 4, dtype=torch.float\n )\n torch.cuda.synchronize()\n start_event.record()\n\n nn_embedding_output = (\n [\n b_indices(nn_embedding, x, use_cpu=False, do_pooling=do_pooling)\n for (nn_embedding, x) in zip(nn_embedding_list, indices_list)\n ]\n if not weighted\n else [\n b_indices(\n nn_embedding,\n x,\n per_sample_weights=xw.view(-1),\n use_cpu=False,\n do_pooling=do_pooling,\n )\n for (nn_embedding, x, xw) in zip(\n nn_embedding_list,\n indices_list,\n # pyre-fixme[61]: `weights_list` is undefined, or not always\n # defined.\n weights_list,\n )\n ]\n )\n if do_pooling:\n final_output = torch.cat(\n [f.view(B, -1) for f in nn_embedding_output], dim=1\n )\n else:\n final_output = torch.cat(nn_embedding_output, dim=0).view(-1, D)\n\n if torch.cuda.is_available():\n end_event.record()\n torch.cuda.synchronize()\n it_time = start_event.elapsed_time(end_event) * 1.0e-3\n times.append(it_time)\n else:\n it_time = time.time() - start_time\n times.append(it_time)\n avg_time = sum(times) / len(requests)\n median_time = statistics.median(times)\n return median_time if check_median else avg_time\n\n\ndef benchmark_pipelined_requests(\n requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[Tensor]]],\n func1: Callable[[Tensor, Tensor, Optional[Tensor]], None],\n func2: Callable[[Tensor, Tensor, Optional[Tensor]], None],\n flush_gpu_cache_size_mb: int = 0,\n check_median: bool = False,\n) -> Tuple[float, float]:\n torch.cuda.synchronize()\n start_events = [\n (torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True))\n for _ in requests\n ]\n end_events = [\n (torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True))\n for _ in requests\n ]\n for ((indices, offsets, indices_weights), start_event, end_event) in zip(\n requests, start_events, end_events\n ):\n if flush_gpu_cache_size_mb:\n _ = torch.rand(\n flush_gpu_cache_size_mb * 1024 * 1024 // 4, dtype=torch.float\n )\n torch.cuda.synchronize()\n start_event[0].record()\n func1(indices, offsets, indices_weights)\n end_event[0].record()\n start_event[1].record()\n func2(indices, offsets, indices_weights)\n end_event[1].record()\n torch.cuda.synchronize()\n avg_time = (\n sum(\n start_event[0].elapsed_time(end_event[0]) * 1.0e-3\n for start_event, end_event in zip(start_events, end_events)\n )\n / len(requests),\n sum(\n start_event[1].elapsed_time(end_event[1]) * 1.0e-3\n for start_event, end_event in zip(start_events, end_events)\n )\n / len(requests),\n )\n median_time = (\n statistics.median(\n start_event[0].elapsed_time(end_event[0]) * 1.0e-3\n for start_event, end_event in zip(start_events, end_events)\n ),\n statistics.median(\n start_event[1].elapsed_time(end_event[1]) * 1.0e-3\n for start_event, end_event in zip(start_events, end_events)\n ),\n )\n return median_time if check_median else avg_time\n" ]
[ [ "torch.cuda.synchronize", "torch.cuda.current_device", "torch.load", "torch.zeros", "torch.cat", "torch.cuda.Event", "torch.nn.EmbeddingBag", "numpy.cumsum", "numpy.random.zipf", "numpy.ones", "torch.nn.Embedding", "torch.sort", "torch.cuda.is_available", "torch.rand", "torch.device", "numpy.prod", "numpy.random.default_rng", "torch.as_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anubhabPanda/TSAI-EVA5
[ "d16d83c796240632e120ba51cff2d10349ffee34" ]
[ "Week10/Picasso/models/model11.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom models.resnet import BasicBlock\n\n\nclass Residual_Block(nn.Module):\n def __init__(self, in_channels, out_channels, basic_block = None):\n super(Residual_Block, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False),\n nn.MaxPool2d(2, 2),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()\n )\n\n if basic_block is not None:\n self.res_block = basic_block\n else:\n self.res_block = None\n\n def forward(self, x):\n x = self.conv1(x)\n if self.res_block is not None:\n r1 = self.res_block(x)\n x = x + r1\n\n return x\n\nclass Modified_Resnet(nn.Module):\n def __init__(self, residual_block, resnet_base):\n super(Modified_Resnet, self).__init__()\n #Prep Layer\n self.prep = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU()\n )\n\n self.layer1 = residual_block(64, 128, basic_block=resnet_base(128, 128))\n self.layer2 = residual_block(128, 256)\n self.layer3 = residual_block(256, 512, basic_block=resnet_base(512, 512))\n self.pool1 = nn.MaxPool2d(4, 4)\n self.fc = nn.Linear(512, 10)\n \n def forward(self, x):\n x = self.prep(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.pool1(x)\n x = x.view(x.shape[0], -1)\n x = self.fc(x)\n x = F.log_softmax(x, dim=1) \n return x\n\ndef call_model():\n return Modified_Resnet(Residual_Block, BasicBlock) \n" ]
[ [ "torch.nn.functional.log_softmax", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mimacom/whtobu-ml
[ "d17f1ea704d8c347de51aaa385de0e6f381a1f36" ]
[ "api.py" ]
[ "#!whtobu/bin/python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport os\nimport tensorflow as tf\nimport time\nimport uuid\nfrom flask import Flask, jsonify, make_response, request, abort\nfrom werkzeug.utils import secure_filename\n\napp = Flask(__name__)\n\nUPLOAD_FOLDER = '/tmp'\n\n\[email protected](404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\ndef load_graph(model_file):\n graph = tf.Graph()\n graph_def = tf.GraphDef()\n\n with open(model_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n with graph.as_default():\n tf.import_graph_def(graph_def)\n\n return graph\n\n\ndef read_tensor_from_image_file(file_name, input_height=299, input_width=299,\n input_mean=0, input_std=255):\n input_name = \"file_reader\"\n output_name = \"normalized\"\n file_reader = tf.read_file(file_name, input_name)\n if file_name.endswith(\".png\"):\n image_reader = tf.image.decode_png(file_reader, channels=3,\n name='png_reader')\n elif file_name.endswith(\".gif\"):\n image_reader = tf.squeeze(tf.image.decode_gif(file_reader,\n name='gif_reader'))\n elif file_name.endswith(\".bmp\"):\n image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')\n else:\n image_reader = tf.image.decode_jpeg(file_reader, channels=3,\n name='jpeg_reader')\n float_caster = tf.cast(image_reader, tf.float32)\n dims_expander = tf.expand_dims(float_caster, 0);\n resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])\n normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])\n sess = tf.Session()\n result = sess.run(normalized)\n\n return result\n\n\ndef load_labels(label_file):\n label = []\n proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()\n for l in proto_as_ascii_lines:\n label.append(l.rstrip())\n return label\n\n\[email protected]('/analyze', methods=['POST'])\ndef analyze():\n print(str(request.files))\n\n if 'file' not in request.files:\n print('No file part')\n abort(400)\n\n file = request.files['file']\n\n if file.filename == '':\n print('No selected file')\n abort(400)\n\n file = request.files['file']\n filename = secure_filename(file.filename)\n\n print(app.config['UPLOAD_FOLDER'] + \"/\" + filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n file_name = app.config['UPLOAD_FOLDER'] + \"/\" + filename\n model_file = \"graph.pb\"\n label_file = \"labels.txt\"\n input_height = 224\n input_width = 224\n input_mean = 128\n input_std = 128\n input_layer = \"input\"\n output_layer = \"final_result\"\n\n graph = load_graph(model_file)\n t = read_tensor_from_image_file(\n file_name,\n input_height=input_height,\n input_width=input_width,\n input_mean=input_mean,\n input_std=input_std\n )\n\n input_name = \"import/\" + input_layer\n output_name = \"import/\" + output_layer\n input_operation = graph.get_operation_by_name(input_name)\n output_operation = graph.get_operation_by_name(output_name)\n\n with tf.Session(graph=graph) as sess:\n start = time.time()\n results = sess.run(\n output_operation.outputs[0],\n {input_operation.outputs[0]: t}\n )\n\n end = time.time()\n\n results = np.squeeze(results)\n\n top_k = results.argsort()[-5:][::-1]\n labels = load_labels(label_file)\n\n print('\\nEvaluation time (1-image): {:.3f}s\\n'.format(end - start))\n\n for i in top_k:\n print(labels[i], results[i])\n\n return jsonify([\n {\n 'ASIN': 'ABCDF'\n }\n ])\n\n\nif __name__ == '__main__':\n app.secret_key = str(uuid.uuid4())\n app.config['SESSION_TYPE'] = 'filesystem'\n app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n # sess.init_app(app)\n\n app.debug = True\n app.run()\n" ]
[ [ "tensorflow.Graph", "tensorflow.image.resize_bilinear", "tensorflow.import_graph_def", "tensorflow.read_file", "tensorflow.gfile.GFile", "numpy.squeeze", "tensorflow.cast", "tensorflow.image.decode_png", "tensorflow.expand_dims", "tensorflow.image.decode_bmp", "tensorflow.subtract", "tensorflow.image.decode_gif", "tensorflow.Session", "tensorflow.GraphDef", "tensorflow.image.decode_jpeg" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
233-puchi/mindspore
[ "e9d2684cdb7668eac48169feeff778eeffbfa70e" ]
[ "mindspore/explainer/_image_classification_runner.py" ]
[ "# Copyright 2020-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Image Classification Runner.\"\"\"\nimport os\nimport re\nimport json\nfrom time import time\n\nimport numpy as np\nfrom scipy.stats import beta\nfrom PIL import Image\n\nimport mindspore as ms\nfrom mindspore import context\nfrom mindspore import log\nimport mindspore.dataset as ds\nfrom mindspore.dataset import Dataset\nfrom mindspore.nn import Cell, SequentialCell\nfrom mindspore.ops.operations import ExpandDims\nfrom mindspore.train._utils import check_value_type\nfrom mindspore.train.summary._summary_adapter import _convert_image_format\nfrom mindspore.train.summary.summary_record import SummaryRecord\nfrom mindspore.train.summary_pb2 import Explain\nfrom mindspore.nn.probability.toolbox.uncertainty_evaluation import UncertaintyEvaluation\nfrom mindspore.explainer.benchmark import Localization\nfrom mindspore.explainer.benchmark._attribution.metric import AttributionMetric\nfrom mindspore.explainer.benchmark._attribution.metric import LabelSensitiveMetric\nfrom mindspore.explainer.benchmark._attribution.metric import LabelAgnosticMetric\nfrom mindspore.explainer.explanation import RISE\nfrom mindspore.explainer.explanation._attribution.attribution import Attribution\nfrom mindspore.explainer.explanation._counterfactual import hierarchical_occlusion as hoc\n\n\n_EXPAND_DIMS = ExpandDims()\n\n\ndef _normalize(img_np):\n \"\"\"Normalize the numpy image to the range of [0, 1]. \"\"\"\n max_ = img_np.max()\n min_ = img_np.min()\n normed = (img_np - min_) / (max_ - min_).clip(min=1e-10)\n return normed\n\n\ndef _np_to_image(img_np, mode):\n \"\"\"Convert numpy array to PIL image.\"\"\"\n return Image.fromarray(np.uint8(img_np * 255), mode=mode)\n\n\nclass _VerifyFlag:\n \"\"\"Verification flags of dataset and settings of ImageClassificationRunner.\"\"\"\n ALL = 0xFFFFFFFF\n REGISTRATION = 1\n DATA_N_NETWORK = 1 << 1\n SALIENCY = 1 << 2\n HOC = 1 << 3\n ENVIRONMENT = 1 << 4\n\n\nclass ImageClassificationRunner:\n \"\"\"\n A high-level API for users to generate and store results of the explanation methods and the evaluation methods.\n\n Update in 2020.11: Adjust the storage structure and format of the data. Summary files generated by previous version\n will be deprecated and will not be supported in MindInsight of current version.\n\n Args:\n summary_dir (str): The directory path to save the summary files which store the generated results.\n data (tuple[Dataset, list[str]]): Tuple of dataset and the corresponding class label list. The dataset\n should provides [images], [images, labels] or [images, labels, bboxes] as columns. The label list must\n share the exact same length and order of the network outputs.\n network (Cell): The network(with logit outputs) to be explained.\n activation_fn (Cell): The activation layer that transforms logits to prediction probabilities. For\n single label classification tasks, `nn.Softmax` is usually applied. As for multi-label classification\n tasks, `nn.Sigmoid` is usually be applied. Users can also pass their own customized `activation_fn` as long\n as when combining this function with network, the final output is the probability of the input.\n\n Examples:\n >>> from mindspore.explainer import ImageClassificationRunner\n >>> from mindspore.explainer.explanation import GuidedBackprop, Gradient\n >>> from mindspore.explainer.benchmark import Faithfulness\n >>> from mindspore.nn import Softmax\n >>> from mindspore.train.serialization import load_checkpoint, load_param_into_net\n >>>\n >>> # The detail of AlexNet is shown in model_zoo.official.cv.alexnet.src.alexnet.py\n >>> net = AlexNet(10)\n >>> # Load the checkpoint\n >>> param_dict = load_checkpoint(\"/path/to/checkpoint\")\n >>> load_param_into_net(net, param_dict)\n []\n >>>\n >>> # Prepare the dataset for explaining and evaluation.\n >>> # The detail of create_dataset_cifar10 method is shown in model_zoo.official.cv.alexnet.src.dataset.py\n >>>\n >>> dataset = create_dataset_cifar10(\"/path/to/cifar/dataset\", 1)\n >>> labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n >>>\n >>> activation_fn = Softmax()\n >>> gbp = GuidedBackprop(net)\n >>> gradient = Gradient(net)\n >>> explainers = [gbp, gradient]\n >>> faithfulness = Faithfulness(len(labels), activation_fn, \"NaiveFaithfulness\")\n >>> benchmarkers = [faithfulness]\n >>>\n >>> runner = ImageClassificationRunner(\"./summary_dir\", (dataset, labels), net, activation_fn)\n >>> runner.register_saliency(explainers=explainers, benchmarkers=benchmarkers)\n >>> runner.run()\n \"\"\"\n\n # datafile directory names\n _DATAFILE_DIRNAME_PREFIX = \"_explain_\"\n _ORIGINAL_IMAGE_DIRNAME = \"origin_images\"\n _HEATMAP_DIRNAME = \"heatmap\"\n # specfial filenames\n _MANIFEST_FILENAME = \"manifest.json\"\n # max. no. of sample per directory\n _SAMPLE_PER_DIR = 1000\n # seed for fixing the iterating order of the dataset\n _DATASET_SEED = 58\n # printing spacer\n _SPACER = \"{:120}\\r\"\n # datafile directory's permission\n _DIR_MODE = 0o750\n # datafile's permission\n _FILE_MODE = 0o600\n\n def __init__(self,\n summary_dir,\n data,\n network,\n activation_fn):\n\n check_value_type(\"data\", data, tuple)\n if len(data) != 2:\n raise ValueError(\"Argument data is not a tuple with 2 elements\")\n check_value_type(\"data[0]\", data[0], Dataset)\n check_value_type(\"data[1]\", data[1], list)\n if not all(isinstance(ele, str) for ele in data[1]):\n raise ValueError(\"Argument data[1] is not list of str.\")\n\n check_value_type(\"summary_dir\", summary_dir, str)\n check_value_type(\"network\", network, Cell)\n check_value_type(\"activation_fn\", activation_fn, Cell)\n\n self._summary_dir = summary_dir\n self._dataset = data[0]\n self._labels = data[1]\n self._network = network\n self._explainers = None\n self._benchmarkers = None\n self._uncertainty = None\n self._hoc_searcher = None\n self._summary_timestamp = None\n self._sample_index = -1\n self._manifest = None\n\n self._full_network = SequentialCell([self._network, activation_fn])\n self._full_network.set_train(False)\n\n self._verify(_VerifyFlag.DATA_N_NETWORK | _VerifyFlag.ENVIRONMENT)\n\n def register_saliency(self,\n explainers,\n benchmarkers=None):\n \"\"\"\n Register saliency explanation instances.\n\n Note:\n This function can not be invoked more than once on each runner.\n\n Args:\n explainers (list[Attribution]): The explainers to be evaluated,\n see `mindspore.explainer.explanation`. All explainers' class must be distinct and their network\n must be the exact same instance of the runner's network.\n benchmarkers (list[AttributionMetric], optional): The benchmarkers for scoring the explainers,\n see `mindspore.explainer.benchmark`. All benchmarkers' class must be distinct.\n\n Raises:\n ValueError: Be raised for any data or settings' value problem.\n TypeError: Be raised for any data or settings' type problem.\n RuntimeError: Be raised if this function was invoked before.\n \"\"\"\n check_value_type(\"explainers\", explainers, list)\n if not all(isinstance(ele, Attribution) for ele in explainers):\n raise TypeError(\"Argument explainers is not list of mindspore.explainer.explanation .\")\n\n if not explainers:\n raise ValueError(\"Argument explainers is empty.\")\n\n if benchmarkers is not None:\n check_value_type(\"benchmarkers\", benchmarkers, list)\n if not all(isinstance(ele, AttributionMetric) for ele in benchmarkers):\n raise TypeError(\"Argument benchmarkers is not list of mindspore.explainer.benchmark .\")\n\n if self._explainers is not None:\n raise RuntimeError(\"Function register_saliency() was invoked already.\")\n\n self._explainers = explainers\n self._benchmarkers = benchmarkers\n\n try:\n self._verify(_VerifyFlag.SALIENCY | _VerifyFlag.ENVIRONMENT)\n except (ValueError, TypeError):\n self._explainers = None\n self._benchmarkers = None\n raise\n\n def register_hierarchical_occlusion(self):\n \"\"\"\n Register hierarchical occlusion instances.\n\n Notes:\n Input images are required to be in 3 channels formats and the length of side short must be equals to or\n greater than 56 pixels. This function can not be invoked more than once on each runner.\n\n Raises:\n ValueError: Be raised for any data or settings' value problem.\n RuntimeError: Be raised if the function was called already.\n \"\"\"\n if self._hoc_searcher is not None:\n raise RuntimeError(\"Function register_hierarchical_occlusion() was invoked already.\")\n\n self._hoc_searcher = hoc.Searcher(self._full_network)\n\n try:\n self._verify(_VerifyFlag.HOC | _VerifyFlag.ENVIRONMENT)\n except ValueError:\n self._hoc_searcher = None\n raise\n\n def register_uncertainty(self):\n \"\"\"\n Register uncertainty instance to compute the epistemic uncertainty base on the Bayes' theorem.\n\n Note:\n Please refer to the documentation of mindspore.nn.probability.toolbox.uncertainty_evaluation for the\n details. The actual output is standard deviation of the classification predictions and the corresponding\n 95% confidence intervals. Users have to invoke register_saliency() as well for the uncertainty results are\n going to be shown on the saliency map page in MindInsight. This function can not be invoked more then once\n on each runner.\n\n Raises:\n RuntimeError: Be raised if the function was called already.\n \"\"\"\n if self._uncertainty is not None:\n raise RuntimeError(\"Function register_uncertainty() was invoked already.\")\n\n self._uncertainty = UncertaintyEvaluation(model=self._full_network,\n train_dataset=None,\n task_type='classification',\n num_classes=len(self._labels))\n\n def run(self):\n \"\"\"\n Run the explain job and save the result as a summary in summary_dir.\n\n Note:\n User should call register_saliency() once before running this function.\n\n Raises:\n ValueError: Be raised for any data or settings' value problem.\n TypeError: Be raised for any data or settings' type problem.\n RuntimeError: Be raised for any runtime problem.\n \"\"\"\n self._verify(_VerifyFlag.ALL)\n self._manifest = {\"saliency_map\": False,\n \"benchmark\": False,\n \"uncertainty\": False,\n \"hierarchical_occlusion\": False}\n with SummaryRecord(self._summary_dir, raise_exception=True) as summary:\n print(\"Start running and writing......\")\n begin = time()\n\n self._summary_timestamp = self._extract_timestamp(summary.file_info['file_name'])\n if self._summary_timestamp is None:\n raise RuntimeError(\"Cannot extract timestamp from summary filename!\"\n \" It should contains a timestamp after 'summary.' .\")\n\n self._save_metadata(summary)\n\n imageid_labels = self._run_inference(summary)\n sample_count = self._sample_index\n if self._is_saliency_registered:\n self._run_saliency(summary, imageid_labels)\n if not self._manifest[\"saliency_map\"]:\n raise RuntimeError(\n f\"No saliency map was generated in {sample_count} samples. \"\n f\"Please make sure the dataset, labels, activation function and network are properly trained \"\n f\"and configured.\")\n\n if self._is_hoc_registered and not self._manifest[\"hierarchical_occlusion\"]:\n raise RuntimeError(\n f\"No Hierarchical Occlusion result was found in {sample_count} samples. \"\n f\"Please make sure the dataset, labels, activation function and network are properly trained \"\n f\"and configured.\")\n\n self._save_manifest()\n\n print(\"Finish running and writing. Total time elapsed: {:.3f} s\".format(time() - begin))\n\n @property\n def _is_hoc_registered(self):\n \"\"\"Check if HOC module is registered.\"\"\"\n return self._hoc_searcher is not None\n\n @property\n def _is_saliency_registered(self):\n \"\"\"Check if saliency module is registered.\"\"\"\n return bool(self._explainers)\n\n @property\n def _is_uncertainty_registered(self):\n \"\"\"Check if uncertainty module is registered.\"\"\"\n return self._uncertainty is not None\n\n def _save_metadata(self, summary):\n \"\"\"Save metadata of the explain job to summary.\"\"\"\n print(\"Start writing metadata......\")\n\n explain = Explain()\n explain.metadata.label.extend(self._labels)\n\n if self._is_saliency_registered:\n exp_names = [exp.__class__.__name__ for exp in self._explainers]\n explain.metadata.explain_method.extend(exp_names)\n if self._benchmarkers is not None:\n bench_names = [bench.__class__.__name__ for bench in self._benchmarkers]\n explain.metadata.benchmark_method.extend(bench_names)\n\n summary.add_value(\"explainer\", \"metadata\", explain)\n summary.record(1)\n\n print(\"Finish writing metadata.\")\n\n def _run_inference(self, summary, threshold=0.5):\n \"\"\"\n Run inference for the dataset and write the inference related data into summary.\n\n Args:\n summary (SummaryRecord): The summary object to store the data.\n threshold (float): The threshold for prediction.\n\n Returns:\n dict, The map of sample d to the union of its ground truth and predicted labels.\n \"\"\"\n sample_id_labels = {}\n self._sample_index = 0\n ds.config.set_seed(self._DATASET_SEED)\n for j, next_element in enumerate(self._dataset):\n now = time()\n self._run_sample(summary, next_element, sample_id_labels, threshold)\n self._sample_index += 1\n self._spaced_print(\"Finish running and writing {}-th batch inference data.\"\n \" Time elapsed: {:.3f} s\".format(j, time() - now))\n return sample_id_labels\n\n def _run_sample(self, summary, next_element, sample_id_labels, threshold):\n \"\"\"\n Run inference for a sample.\n\n Args:\n summary (SummaryRecord): The summary object to store the data.\n next_element (tuple): The next dataset sample.\n sample_id_labels (dict): The sample id to labels dictionary.\n threshold (float): The threshold for prediction.\n \"\"\"\n inputs, labels, _ = self._unpack_next_element(next_element)\n prob = self._full_network(inputs).asnumpy()\n\n if self._uncertainty is not None:\n prob_var = self._uncertainty.eval_epistemic_uncertainty(inputs)\n else:\n prob_var = None\n\n for idx, inp in enumerate(inputs):\n gt_labels = labels[idx]\n gt_probs = [float(prob[idx][i]) for i in gt_labels]\n\n if prob_var is not None:\n gt_prob_vars = [float(prob_var[idx][i]) for i in gt_labels]\n gt_itl_lows, gt_itl_his, gt_prob_sds = \\\n self._calc_beta_intervals(gt_probs, gt_prob_vars)\n\n data_np = _convert_image_format(np.expand_dims(inp.asnumpy(), 0), 'NCHW')\n original_image = _np_to_image(_normalize(data_np), mode='RGB')\n original_image_path = self._save_original_image(self._sample_index, original_image)\n\n predicted_labels = [int(i) for i in (prob[idx] > threshold).nonzero()[0]]\n predicted_probs = [float(prob[idx][i]) for i in predicted_labels]\n\n if prob_var is not None:\n predicted_prob_vars = [float(prob_var[idx][i]) for i in predicted_labels]\n predicted_itl_lows, predicted_itl_his, predicted_prob_sds = \\\n self._calc_beta_intervals(predicted_probs, predicted_prob_vars)\n\n union_labs = list(set(gt_labels + predicted_labels))\n sample_id_labels[str(self._sample_index)] = union_labs\n\n explain = Explain()\n explain.sample_id = self._sample_index\n explain.image_path = original_image_path\n summary.add_value(\"explainer\", \"sample\", explain)\n\n explain = Explain()\n explain.sample_id = self._sample_index\n explain.ground_truth_label.extend(gt_labels)\n explain.inference.ground_truth_prob.extend(gt_probs)\n explain.inference.predicted_label.extend(predicted_labels)\n explain.inference.predicted_prob.extend(predicted_probs)\n\n if prob_var is not None:\n explain.inference.ground_truth_prob_sd.extend(gt_prob_sds)\n explain.inference.ground_truth_prob_itl95_low.extend(gt_itl_lows)\n explain.inference.ground_truth_prob_itl95_hi.extend(gt_itl_his)\n explain.inference.predicted_prob_sd.extend(predicted_prob_sds)\n explain.inference.predicted_prob_itl95_low.extend(predicted_itl_lows)\n explain.inference.predicted_prob_itl95_hi.extend(predicted_itl_his)\n\n self._manifest[\"uncertainty\"] = True\n\n summary.add_value(\"explainer\", \"inference\", explain)\n summary.record(1)\n\n if self._is_hoc_registered:\n self._run_hoc(summary, self._sample_index, inputs[idx], prob[idx])\n\n def _run_explainer(self, summary, sample_id_labels, explainer):\n \"\"\"\n Run the explainer.\n\n Args:\n summary (SummaryRecord): The summary object to store the data.\n sample_id_labels (dict): A dict that maps the sample id and its union labels.\n explainer (_Attribution): An Attribution object to generate saliency maps.\n \"\"\"\n for idx, next_element in enumerate(self._dataset):\n now = time()\n self._spaced_print(\"Start running {}-th explanation data for {}......\".format(\n idx, explainer.__class__.__name__))\n saliency_dict_lst = self._run_exp_step(next_element, explainer, sample_id_labels, summary)\n self._spaced_print(\n \"Finish writing {}-th batch explanation data for {}. Time elapsed: {:.3f} s\".format(\n idx, explainer.__class__.__name__, time() - now))\n\n if not self._benchmarkers:\n continue\n\n for bench in self._benchmarkers:\n now = time()\n self._spaced_print(\n \"Start running {}-th batch {} data for {}......\".format(\n idx, bench.__class__.__name__, explainer.__class__.__name__))\n self._run_exp_benchmark_step(next_element, explainer, bench, saliency_dict_lst)\n self._spaced_print(\n \"Finish running {}-th batch {} data for {}. Time elapsed: {:.3f} s\".format(\n idx, bench.__class__.__name__, explainer.__class__.__name__, time() - now))\n\n def _run_saliency(self, summary, sample_id_labels):\n \"\"\"Run the saliency explanations.\"\"\"\n\n for explainer in self._explainers:\n explain = Explain()\n if self._benchmarkers:\n for bench in self._benchmarkers:\n bench.reset()\n print(f\"Start running and writing explanation for {explainer.__class__.__name__}......\")\n self._sample_index = 0\n start = time()\n ds.config.set_seed(self._DATASET_SEED)\n self._run_explainer(summary, sample_id_labels, explainer)\n\n if not self._benchmarkers:\n continue\n\n for bench in self._benchmarkers:\n benchmark = explain.benchmark.add()\n benchmark.explain_method = explainer.__class__.__name__\n benchmark.benchmark_method = bench.__class__.__name__\n\n benchmark.total_score = bench.performance\n if isinstance(bench, LabelSensitiveMetric):\n benchmark.label_score.extend(bench.class_performances)\n\n self._spaced_print(\"Finish running and writing explanation and benchmark data for {}. \"\n \"Time elapsed: {:.3f} s\".format(explainer.__class__.__name__, time() - start))\n summary.add_value('explainer', 'benchmark', explain)\n summary.record(1)\n\n def _run_hoc(self, summary, sample_id, sample_input, prob):\n \"\"\"\n Run HOC search for a sample image, and then save the result to summary.\n\n Args:\n summary (SummaryRecord): The summary object to store the data.\n sample_id (int): The sample ID.\n sample_input (Union[Tensor, np.ndarray]): Sample image tensor in CHW or NCWH(N=1).\n prob (Union[Tensor, np.ndarray]): List of sample's classification prediction output, HOC will run for\n labels with prediction output strictly larger then HOC searcher's threshold(0.5 by default).\n \"\"\"\n if isinstance(sample_input, ms.Tensor):\n sample_input = sample_input.asnumpy()\n if len(sample_input.shape) == 3:\n sample_input = np.expand_dims(sample_input, axis=0)\n\n explain = None\n str_mask = hoc.auto_str_mask(sample_input)\n compiled_mask = None\n\n for label_idx, label_prob in enumerate(prob):\n if label_prob <= self._hoc_searcher.threshold:\n continue\n if compiled_mask is None:\n compiled_mask = hoc.compile_mask(str_mask, sample_input)\n try:\n edit_tree, layer_outputs = self._hoc_searcher.search(sample_input, label_idx, compiled_mask)\n except hoc.NoValidResultError:\n log.warning(f\"No Hierarchical Occlusion result was found in sample#{sample_id} \"\n f\"label:{self._labels[label_idx]}, skipped.\")\n continue\n\n if explain is None:\n explain = Explain()\n explain.sample_id = sample_id\n\n self._add_hoc_result_to_explain(label_idx, str_mask, edit_tree, layer_outputs, explain)\n\n if explain is not None:\n summary.add_value(\"explainer\", \"hoc\", explain)\n summary.record(1)\n self._manifest['hierarchical_occlusion'] = True\n\n @staticmethod\n def _add_hoc_result_to_explain(label_idx, str_mask, edit_tree, layer_outputs, explain):\n \"\"\"\n Add HOC result to Explain record.\n\n Args:\n label_idx (int): The label index.\n str_mask (str): The mask string.\n edit_tree (EditStep): The result HOC edit tree.\n layer_outputs (list[float]): The network output confident of each layer.\n explain (Explain): The Explain record.\n \"\"\"\n hoc_rec = explain.hoc.add()\n hoc_rec.label = label_idx\n hoc_rec.mask = str_mask\n layer_count = edit_tree.max_layer + 1\n for layer in range(layer_count):\n steps = edit_tree.get_layer_or_leaf_steps(layer)\n layer_output = layer_outputs[layer]\n hoc_layer = hoc_rec.layer.add()\n hoc_layer.prob = layer_output\n for step in steps:\n hoc_layer.box.extend(list(step.box))\n\n def _add_exp_step_samples(self, explainer, sample_label_sets, batch_saliency_full, summary):\n \"\"\"\n Add explanation results of samples to summary record.\n\n Args:\n explainer (Attribution): The explainer to be run.\n sample_label_sets (list[list[int]]): The label sets of samples.\n batch_saliency_full (Tensor): The saliency output from explainer.\n summary (SummaryRecord): The summary record.\n \"\"\"\n saliency_dict_lst = []\n has_saliency_rec = False\n for idx, label_set in enumerate(sample_label_sets):\n saliency_dict = {}\n explain = Explain()\n explain.sample_id = self._sample_index\n for k, lab in enumerate(label_set):\n saliency = batch_saliency_full[idx:idx + 1, k:k + 1]\n saliency_dict[lab] = saliency\n\n saliency_np = _normalize(saliency.asnumpy().squeeze())\n saliency_image = _np_to_image(saliency_np, mode='L')\n heatmap_path = self._save_heatmap(explainer.__class__.__name__, lab,\n self._sample_index, saliency_image)\n\n explanation = explain.explanation.add()\n explanation.explain_method = explainer.__class__.__name__\n explanation.heatmap_path = heatmap_path\n explanation.label = lab\n\n has_saliency_rec = True\n\n summary.add_value(\"explainer\", \"explanation\", explain)\n summary.record(1)\n\n self._sample_index += 1\n saliency_dict_lst.append(saliency_dict)\n\n return saliency_dict_lst, has_saliency_rec\n\n def _run_exp_step(self, next_element, explainer, sample_id_labels, summary):\n \"\"\"\n Run the explanation for each step and write explanation results into summary.\n\n Args:\n next_element (Tuple): Data of one step\n explainer (_Attribution): An Attribution object to generate saliency maps.\n sample_id_labels (dict): A dict that maps the sample id and its union labels.\n summary (SummaryRecord): The summary object to store the data.\n\n Returns:\n list, List of dict that maps label to its corresponding saliency map.\n \"\"\"\n inputs, labels, _ = self._unpack_next_element(next_element)\n sample_index = self._sample_index\n sample_label_sets = []\n for _ in range(len(labels)):\n sample_label_sets.append(sample_id_labels[str(sample_index)])\n sample_index += 1\n\n batch_label_sets = self._make_label_batch(sample_label_sets)\n\n if isinstance(explainer, RISE):\n batch_saliency_full = explainer(inputs, batch_label_sets)\n else:\n batch_saliency_full = []\n for i in range(len(batch_label_sets[0])):\n batch_saliency = explainer(inputs, batch_label_sets[:, i])\n batch_saliency_full.append(batch_saliency)\n concat = ms.ops.operations.Concat(1)\n batch_saliency_full = concat(tuple(batch_saliency_full))\n\n saliency_dict_lst, has_saliency_rec = \\\n self._add_exp_step_samples(explainer, sample_label_sets, batch_saliency_full, summary)\n\n if has_saliency_rec:\n self._manifest['saliency_map'] = True\n\n return saliency_dict_lst\n\n def _run_exp_benchmark_step(self, next_element, explainer, benchmarker, saliency_dict_lst):\n \"\"\"Run the explanation and evaluation for each step and write explanation results into summary.\"\"\"\n inputs, labels, _ = self._unpack_next_element(next_element)\n for idx, inp in enumerate(inputs):\n inp = _EXPAND_DIMS(inp, 0)\n self._manifest['benchmark'] = True\n if isinstance(benchmarker, LabelAgnosticMetric):\n res = benchmarker.evaluate(explainer, inp)\n benchmarker.aggregate(res)\n continue\n saliency_dict = saliency_dict_lst[idx]\n for label, saliency in saliency_dict.items():\n if isinstance(benchmarker, Localization):\n _, _, bboxes = self._unpack_next_element(next_element, True)\n if label in labels[idx]:\n res = benchmarker.evaluate(explainer, inp, targets=label, mask=bboxes[idx][label],\n saliency=saliency)\n benchmarker.aggregate(res, label)\n elif isinstance(benchmarker, LabelSensitiveMetric):\n res = benchmarker.evaluate(explainer, inp, targets=label, saliency=saliency)\n benchmarker.aggregate(res, label)\n else:\n raise TypeError('Benchmarker must be one of LabelSensitiveMetric or LabelAgnosticMetric, but'\n 'receive {}'.format(type(benchmarker)))\n\n @staticmethod\n def _calc_beta_intervals(means, variances, prob=0.95):\n \"\"\"Calculate confidence interval of beta distributions.\"\"\"\n if not isinstance(means, np.ndarray):\n means = np.array(means)\n if not isinstance(variances, np.ndarray):\n variances = np.array(variances)\n with np.errstate(divide='ignore'):\n coef_a = ((means ** 2) * (1 - means) / variances) - means\n coef_b = (coef_a * (1 - means)) / means\n itl_lows, itl_his = beta.interval(prob, coef_a, coef_b)\n sds = np.sqrt(variances)\n for i in range(itl_lows.shape[0]):\n if not np.isfinite(sds[i]) or not np.isfinite(itl_lows[i]) or not np.isfinite(itl_his[i]):\n itl_lows[i] = means[i]\n itl_his[i] = means[i]\n sds[i] = 0\n return itl_lows, itl_his, sds\n\n def _verify(self, flags):\n \"\"\"\n Verify datasets and settings.\n\n Args:\n flags (int): Verification flags, use bitwise or '|' to combine multiple flags.\n Possible bitwise flags are shown as follow.\n\n - _VerifyFlag.ALL: Verify everything.\n - _VerifyFlag.REGISTRATION: Verify explainer module registration.\n - _VerifyFlag.DATA_N_NETWORK: Verify dataset and network.\n - _VerifyFlag.SALIENCY: Verify saliency related settings.\n - _VerifyFlag.HOC: Verify HOC related settings.\n - _VerifyFlag.ENVIRONMENT: Verify the runtime environment.\n\n Raises:\n ValueError: Be raised for any data or settings' value problem.\n TypeError: Be raised for any data or settings' type problem.\n RuntimeError: Be raised for any runtime problem.\n \"\"\"\n if flags & _VerifyFlag.ENVIRONMENT:\n device_target = context.get_context('device_target')\n if device_target not in (\"Ascend\", \"GPU\"):\n raise RuntimeError(f\"Unsupported device_target: '{device_target}', \"\n f\"only 'Ascend' or 'GPU' is supported. \"\n f\"Please call context.set_context(device_target='Ascend') or \"\n f\"context.set_context(device_target='GPU').\")\n if flags & (_VerifyFlag.ENVIRONMENT | _VerifyFlag.SALIENCY):\n if self._is_saliency_registered:\n mode = context.get_context('mode')\n if mode != context.PYNATIVE_MODE:\n raise RuntimeError(\"Context mode: GRAPH_MODE is not supported, \"\n \"please call context.set_context(mode=context.PYNATIVE_MODE).\")\n\n if flags & _VerifyFlag.REGISTRATION:\n if self._is_uncertainty_registered and not self._is_saliency_registered:\n raise ValueError(\"Function register_uncertainty() is called but register_saliency() is not.\")\n if not self._is_saliency_registered and not self._is_hoc_registered:\n raise ValueError(\n \"No explanation module was registered, user should at least call register_saliency() \"\n \"or register_hierarchical_occlusion() once with proper arguments.\")\n\n if flags & (_VerifyFlag.DATA_N_NETWORK | _VerifyFlag.SALIENCY | _VerifyFlag.HOC):\n self._verify_data()\n\n if flags & _VerifyFlag.DATA_N_NETWORK:\n self._verify_network()\n\n if flags & _VerifyFlag.SALIENCY:\n self._verify_saliency()\n\n def _verify_labels(self):\n \"\"\"Verify labels.\"\"\"\n label_set = set()\n if not self._labels:\n raise ValueError(f\"The label list provided is empty.\")\n for i, label in enumerate(self._labels):\n if label.strip() == \"\":\n raise ValueError(f\"Label [{i}] is all whitespaces or empty. Please make sure there is \"\n f\"no empty label.\")\n if label in label_set:\n raise ValueError(f\"Duplicated label:{label}! Please make sure all labels are unique.\")\n label_set.add(label)\n\n def _verify_ds_inputs_shape(self, sample, inputs, labels):\n \"\"\"Verify a dataset sample's input shape.\"\"\"\n\n if len(inputs.shape) > 4 or len(inputs.shape) < 3 or inputs.shape[-3] not in [1, 3, 4]:\n raise ValueError(\n \"Image shape {} is unrecognizable: the dimension of image can only be CHW or NCHW.\".format(\n inputs.shape))\n if len(inputs.shape) == 3:\n log.warning(\n \"Image shape {} is 3-dimensional. All the data will be automatically unsqueezed at the 0-th\"\n \" dimension as batch data.\".format(inputs.shape))\n if len(sample) > 1:\n if len(labels.shape) > 2 and (np.array(labels.shape[1:]) > 1).sum() > 1:\n raise ValueError(\n \"Labels shape {} is unrecognizable: outputs should not have more than two dimensions\"\n \" with length greater than 1.\".format(labels.shape))\n\n if self._is_hoc_registered:\n if inputs.shape[-3] != 3:\n raise ValueError(\n \"Hierarchical occlusion is registered, images must be in 3 channels format, but \"\n \"{} channel(s) is(are) encountered.\".format(inputs.shape[-3]))\n short_side = min(inputs.shape[-2:])\n if short_side < hoc.AUTO_IMAGE_SHORT_SIDE_MIN:\n raise ValueError(\n \"Hierarchical occlusion is registered, images' short side must be equals to or greater then \"\n \"{}, but {} is encountered.\".format(hoc.AUTO_IMAGE_SHORT_SIDE_MIN, short_side))\n\n def _verify_ds_sample(self, sample):\n \"\"\"Verify a dataset sample.\"\"\"\n if len(sample) not in [1, 2, 3]:\n raise ValueError(\"The dataset should provide [images] or [images, labels], [images, labels, bboxes]\"\n \" as columns.\")\n\n if len(sample) == 3:\n inputs, labels, bboxes = sample\n if bboxes.shape[-1] != 4:\n raise ValueError(\"The third element of dataset should be bounding boxes with shape of \"\n \"[batch_size, num_ground_truth, 4].\")\n else:\n if self._benchmarkers is not None:\n if any([isinstance(bench, Localization) for bench in self._benchmarkers]):\n raise ValueError(\"The dataset must provide bboxes if Localization is to be computed.\")\n\n if len(sample) == 2:\n inputs, labels = sample\n if len(sample) == 1:\n inputs = sample[0]\n\n self._verify_ds_inputs_shape(sample, inputs, labels)\n\n def _verify_data(self):\n \"\"\"Verify dataset and labels.\"\"\"\n self._verify_labels()\n\n try:\n sample = next(self._dataset.create_tuple_iterator())\n except StopIteration:\n raise ValueError(\"The dataset provided is empty.\")\n\n self._verify_ds_sample(sample)\n\n def _verify_network(self):\n \"\"\"Verify the network.\"\"\"\n next_element = next(self._dataset.create_tuple_iterator())\n inputs, _, _ = self._unpack_next_element(next_element)\n prop_test = self._full_network(inputs)\n check_value_type(\"output of network in explainer\", prop_test, ms.Tensor)\n if prop_test.shape[1] != len(self._labels):\n raise ValueError(\"The dimension of network output does not match the no. of classes. Please \"\n \"check labels or the network in the explainer again.\")\n\n def _verify_saliency(self):\n \"\"\"Verify the saliency settings.\"\"\"\n if self._explainers:\n explainer_classes = []\n for explainer in self._explainers:\n if explainer.__class__ in explainer_classes:\n raise ValueError(f\"Repeated {explainer.__class__.__name__} explainer! \"\n \"Please make sure all explainers' class is distinct.\")\n if explainer.network is not self._network:\n raise ValueError(f\"The network of {explainer.__class__.__name__} explainer is different \"\n \"instance from network of runner. Please make sure they are the same \"\n \"instance.\")\n explainer_classes.append(explainer.__class__)\n if self._benchmarkers:\n benchmarker_classes = []\n for benchmarker in self._benchmarkers:\n if benchmarker.__class__ in benchmarker_classes:\n raise ValueError(f\"Repeated {benchmarker.__class__.__name__} benchmarker! \"\n \"Please make sure all benchmarkers' class is distinct.\")\n if isinstance(benchmarker, LabelSensitiveMetric) and benchmarker.num_labels != len(self._labels):\n raise ValueError(f\"The num_labels of {benchmarker.__class__.__name__} benchmarker is different \"\n \"from no. of labels of runner. Please make them are the same.\")\n benchmarker_classes.append(benchmarker.__class__)\n\n def _transform_bboxes(self, inputs, labels, bboxes, ifbbox):\n \"\"\"\n Transform the bounding boxes.\n Args:\n inputs (Tensor): the image data\n labels (Tensor): the labels\n bboxes (Tensor): the boudnding boxes data\n ifbbox (bool): whether to preprocess bboxes. If True, a dictionary that indicates bounding boxes w.r.t\n label id will be returned. If False, the returned bboxes is the the parsed bboxes.\n\n Returns:\n bboxes (Union[list[dict], None, Tensor]): the bounding boxes\n \"\"\"\n input_len = len(inputs)\n if bboxes is not None and ifbbox:\n bboxes = ms.Tensor(bboxes, ms.int32)\n masks_lst = []\n labels = labels.asnumpy().reshape([input_len, -1])\n bboxes = bboxes.asnumpy().reshape([input_len, -1, 4])\n for idx, label in enumerate(labels):\n height, width = inputs[idx].shape[-2], inputs[idx].shape[-1]\n masks = {}\n for j, label_item in enumerate(label):\n target = int(label_item)\n if -1 < target < len(self._labels):\n if target not in masks:\n mask = np.zeros((1, 1, height, width))\n else:\n mask = masks[target]\n x_min, y_min, x_len, y_len = bboxes[idx][j].astype(int)\n mask[:, :, x_min:x_min + x_len, y_min:y_min + y_len] = 1\n masks[target] = mask\n masks_lst.append(masks)\n bboxes = masks_lst\n return bboxes\n\n def _transform_data(self, inputs, labels, bboxes, ifbbox):\n \"\"\"\n Transform the data from one iteration of dataset to a unifying form for the follow-up operations.\n\n Args:\n inputs (Tensor): the image data\n labels (Tensor): the labels\n bboxes (Tensor): the boudnding boxes data\n ifbbox (bool): whether to preprocess bboxes. If True, a dictionary that indicates bounding boxes w.r.t\n label id will be returned. If False, the returned bboxes is the the parsed bboxes.\n\n Returns:\n inputs (Tensor): the image data, unified to a 4D Tensor.\n labels (list[list[int]]): the ground truth labels.\n bboxes (Union[list[dict], None, Tensor]): the bounding boxes\n \"\"\"\n inputs = ms.Tensor(inputs, ms.float32)\n if len(inputs.shape) == 3:\n inputs = _EXPAND_DIMS(inputs, 0)\n if isinstance(labels, ms.Tensor):\n labels = ms.Tensor(labels, ms.int32)\n labels = _EXPAND_DIMS(labels, 0)\n if isinstance(bboxes, ms.Tensor):\n bboxes = ms.Tensor(bboxes, ms.int32)\n bboxes = _EXPAND_DIMS(bboxes, 0)\n\n bboxes = self._transform_bboxes(inputs, labels, bboxes, ifbbox)\n\n labels = ms.Tensor(labels, ms.int32)\n if len(labels.shape) == 1:\n labels_lst = [[int(i)] for i in labels.asnumpy()]\n else:\n labels = labels.asnumpy().reshape([len(inputs), -1])\n labels_lst = []\n for item in labels:\n labels_lst.append(list(set(int(i) for i in item if -1 < int(i) < len(self._labels))))\n labels = labels_lst\n return inputs, labels, bboxes\n\n def _unpack_next_element(self, next_element, ifbbox=False):\n \"\"\"\n Unpack a single iteration of dataset.\n\n Args:\n next_element (Tuple): a single element iterated from dataset object.\n ifbbox (bool): whether to preprocess bboxes in self._transform_data.\n\n Returns:\n tuple, a unified Tuple contains image_data, labels, and bounding boxes.\n \"\"\"\n if len(next_element) == 3:\n inputs, labels, bboxes = next_element\n elif len(next_element) == 2:\n inputs, labels = next_element\n bboxes = None\n else:\n inputs = next_element[0]\n labels = [[] for _ in inputs]\n bboxes = None\n inputs, labels, bboxes = self._transform_data(inputs, labels, bboxes, ifbbox)\n return inputs, labels, bboxes\n\n @staticmethod\n def _make_label_batch(labels):\n \"\"\"\n Unify a List of List of labels to be a 2D Tensor with shape (b, m), where b = len(labels) and m is the max\n length of all the rows in labels.\n\n Args:\n labels (List[List]): the union labels of a data batch.\n\n Returns:\n 2D Tensor.\n \"\"\"\n max_len = max([len(label) for label in labels])\n batch_labels = np.zeros((len(labels), max_len))\n\n for idx, _ in enumerate(batch_labels):\n length = len(labels[idx])\n batch_labels[idx, :length] = np.array(labels[idx])\n\n return ms.Tensor(batch_labels, ms.int32)\n\n def _save_manifest(self):\n \"\"\"Save manifest.json underneath datafile directory.\"\"\"\n if self._manifest is None:\n raise RuntimeError(\"Manifest not yet be initialized.\")\n path_tokens = [self._summary_dir,\n self._DATAFILE_DIRNAME_PREFIX + str(self._summary_timestamp)]\n abs_dir_path = self._create_subdir(*path_tokens)\n save_path = os.path.join(abs_dir_path, self._MANIFEST_FILENAME)\n fd = os.open(save_path, os.O_WRONLY | os.O_CREAT)\n file = os.fdopen(fd, \"w\")\n try:\n json.dump(self._manifest, file, indent=4)\n except IOError:\n log.error(f\"Failed to save manifest as {save_path}!\")\n raise\n finally:\n file.close()\n os.chmod(save_path, self._FILE_MODE)\n\n def _save_original_image(self, sample_id, image):\n \"\"\"Save an image to summary directory.\"\"\"\n id_dirname = self._get_sample_dirname(sample_id)\n path_tokens = [self._summary_dir,\n self._DATAFILE_DIRNAME_PREFIX + str(self._summary_timestamp),\n self._ORIGINAL_IMAGE_DIRNAME,\n id_dirname]\n\n abs_dir_path = self._create_subdir(*path_tokens)\n filename = f\"{sample_id}.jpg\"\n save_path = os.path.join(abs_dir_path, filename)\n image.save(save_path)\n os.chmod(save_path, self._FILE_MODE)\n return os.path.join(*path_tokens[1:], filename)\n\n def _save_heatmap(self, explain_method, class_id, sample_id, image):\n \"\"\"Save heatmap image to summary directory.\"\"\"\n id_dirname = self._get_sample_dirname(sample_id)\n path_tokens = [self._summary_dir,\n self._DATAFILE_DIRNAME_PREFIX + str(self._summary_timestamp),\n self._HEATMAP_DIRNAME,\n explain_method,\n id_dirname]\n\n abs_dir_path = self._create_subdir(*path_tokens)\n filename = f\"{sample_id}_{class_id}.jpg\"\n save_path = os.path.join(abs_dir_path, filename)\n image.save(save_path, optimize=True)\n os.chmod(save_path, self._FILE_MODE)\n return os.path.join(*path_tokens[1:], filename)\n\n def _create_subdir(self, *args):\n \"\"\"Recursively create subdirectories.\"\"\"\n abs_path = None\n for token in args:\n if abs_path is None:\n abs_path = os.path.realpath(token)\n else:\n abs_path = os.path.join(abs_path, token)\n # os.makedirs() don't set intermediate dir permission properly, we mkdir() one by one\n try:\n os.mkdir(abs_path, mode=self._DIR_MODE)\n # In some platform, mode may be ignored in os.mkdir(), we have to chmod() again to make sure\n os.chmod(abs_path, mode=self._DIR_MODE)\n except FileExistsError:\n pass\n return abs_path\n\n @classmethod\n def _get_sample_dirname(cls, sample_id):\n \"\"\"Get the name of parent directory of the image id.\"\"\"\n return str(int(sample_id / cls._SAMPLE_PER_DIR) * cls._SAMPLE_PER_DIR)\n\n @staticmethod\n def _extract_timestamp(filename):\n \"\"\"Extract timestamp from summary filename.\"\"\"\n matched = re.search(r\"summary\\.(\\d+)\", filename)\n if matched:\n return int(matched.group(1))\n return None\n\n @classmethod\n def _spaced_print(cls, message):\n \"\"\"Spaced message printing.\"\"\"\n # workaround to print logs starting new line in case line width mismatch.\n print(cls._SPACER.format(message))\n" ]
[ [ "numpy.expand_dims", "numpy.sqrt", "numpy.isfinite", "scipy.stats.beta.interval", "numpy.uint8", "numpy.errstate", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yewon-lee/Crazyflie_New
[ "500f350f665fb5588a8b522e5894c710cfc51487" ]
[ "rotors_control/src/nodes/position_controller_node_ChihChun_flocking.py" ]
[ "#!/usr/bin/env python2\n\n\"\"\"\nROS interface for controlling up to four Cf2.0's and running the flocking algorithm.\n\nThis ROS node subscribes to the following topics:\n/crazyflie2_id/odometry\n\nThis ROS node publishes to the following topics:\n/crazyflie2_id/command/motor_speed\n/crazyflie2_id/goal\n\nWhere id is a number from 0 to 4\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\n# Import ROS libraries\nimport roslib\nimport rospy\nimport numpy as np\nimport math\nimport time\nimport random\n\nimport csv\nimport sys\nimport os\n\n# Import class that computes the desired positions\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler\nfrom position_controller import PositionController\nfrom std_msgs.msg import Empty, String\n\n# Import classes - addendum\n\n#from trajectory_msgs.msg import MultiDOFJointTrajectory, MultiDOFJointTrajectoryPoint # subscribe\nfrom geometry_msgs.msg import PoseStamped # publish\nfrom nav_msgs.msg import Odometry # subscribe\nfrom mav_msgs.msg import Actuators # publish\nfrom roll_pitch_yawrate_thrust_crazyflie import roll_pitch_yawrate_thrust_crazyflie # change?\n\n\nclass position_controller_flock_node(object):\n \"\"\"ROS interface for controlling up to four Cf2.0's in Gazebo and running flocking algorithm.\"\"\"\n\n\n def __init__(self, uav_ids, init, fin, vx_ds, vy_ds, vz_ds, yaw_ds):\n\n self.cf_ids = uav_ids\n self.number_of_agents = np.shape(uav_ids)[0]\n self.takoff_alt = 1.0 # change?\n self._pos = {}\n self._vel = {}\n self._quat = {}\n self._dist_to_goal = {}\n\n self._euler_angles = {}\n self._euler_angular_rates = {}\n self._z_old = {}\n self._z_oold = {}\n self.yaw_old = {}\n\n self.radius = 0.15\n self.d_star = self.radius\n self.MaxVelo = 1.0\n # Tune these\n self.c1 = 7*4.5\n self.c2 = 9*4.5\n self.RepulsiveGradient = 7.5*100\n \n self.previous_time = 0.0\n self.change_time = -100.0\n\n self.controller = PositionController()\n self.init_time = rospy.get_time()\n \n\n ### Publish ###\n # waypoint messages\n self.goal_msg_0, self.goal_msg_1, self.goal_msg_2, self.goal_msg_3 = PoseStamped(), PoseStamped(), PoseStamped(), PoseStamped()\n self.goal_msgs = {\n '0' : self.goal_msg_0,\n '1' : self.goal_msg_1,\n '2' : self.goal_msg_2,\n '3' : self.goal_msg_3\n } \n\n # rotor velocities messages\n # self.cmdV_msg_0, self.cmdV_msg_1, self.cmdV_msg_2, self.cmdV_msg_3 = Actuators(), Actuators(), Actuators(), Actuators()\n # self.rotor_vel_msgs = {\n # '0' : self.cmdV_msg_0,\n # '1' : self.cmdV_msg_1,\n # '2' : self.cmdV_msg_2,\n # '3' : self.cmdV_msg_3\n # } \n\n ### Subscribe ###\n # odometry messages\n self._currpos_callbacks = {\n '0' : self._currpos_callback_0,\n '1' : self._currpos_callback_1,\n '2' : self._currpos_callback_2,\n '3' : self._currpos_callback_3\n }\n\n # set parameters\n self.initials, self.finals = {}, {}\n self.goal_pubs, self.cmdVtemp_pubs, self.cmdV_pubs, self.takeoffs, self.lands = {}, {}, {}, {}, {}\n for index, cf_id in enumerate(self.cf_ids):\n self.initials[str(cf_id)] = init[index][:]\n self._z_old[str(cf_id)] = 0.0\n self._z_oold[str(cf_id)] = 0.0\n self.yaw_old[str(cf_id)] = 0.0\n self.finals[str(cf_id)] = fin[index][:]\n self.goal_pubs[str(cf_id)] = rospy.Publisher('/crazyflie2_' + str(cf_id) +'/goal', PoseStamped, queue_size=1)\n self.cmdV_pubs[str(cf_id)] = rospy.Publisher('/crazyflie2_' + str(cf_id) +'/command/motor_speed', Actuators, queue_size=1)\n rospy.Subscriber(\"/crazyflie2_\" + str(cf_id) + \"/odometry\", Odometry, self._currpos_callbacks[str(cf_id)]) \n self.vx_d = vx_ds\n self.vy_d = vy_ds\n self.vz_d = vz_ds\n self.yaw_d = yaw_ds\n\n\n self.takeoffed = False\n self.reached_1st = False\n\n self.flag = {\n 'flying' : 0,\n 'landed' : 0,\n 'preland': 0\n }\n\n# self.vstate = {\n# 'takeoff' : self.do_takeoff,\n# 'wpnav' : self.do_wpnav,\n# 'land' : self.do_land\n# }\n\n # Position controller\n self.controller = PositionController()\n\n\n def set_rotor_vel(self, pitch_c, roll_c, yaw_rate_c, p, q, r, roll, pitch, yaw, thrust): # change arg names?\n\n # get conversions\n rotorvel_converter = roll_pitch_yawrate_thrust_crazyflie(pitch_c, roll_c, yaw_rate_c, p, q, r, roll, pitch, yaw, thrust)\n rotor_velocities = rotorvel_converter.CalculateRotorVelocities()\n\n # publish rotor velocities to Actuator\n msg = Actuators()\n msg.angular_velocities = rotor_velocities \n msg.header.stamp = self._currpos_msg.header.stamp\n self.pub_rotor_vel.publish(msg)\n\n def _currpos_callback(self, msg):\n self._currpos_msg = msg\n \n def get_data(self,id):\n self._z_oold[str(id)] = self._z_old[str(id)]\n self._z_old[str(id)] = self._pos[str(id)].z # current z\n print(\"self._euler_angles[str(id)]: \", self._euler_angles[str(id)])\n self.yaw_old[str(id)] = self._euler_angles[str(id)][2] # current yaw\n\n def _currpos_callback_0(self, data):\n self._pos['0'] = data.pose.pose.position\n self._vel['0'] = data.twist.twist.linear\n self._quat['0'] = np.array([data.pose.pose.orientation.x,\n data.pose.pose.orientation.y, \n data.pose.pose.orientation.z, \n data.pose.pose.orientation.w]) # gives quaternion object\n #print(\"quat 0: \", self._quat['0'])\n self._euler_angles['0'] = euler_from_quaternion(self._quat['0']) # gives roll, pitch, yaw\n #print(\"euler angles 0: \", self._euler_angles['0'])\n self._euler_angular_rates['0'] = np.asarray(data.twist.twist.angular) # gives p, q, r\n\n def _currpos_callback_1(self, data):\n self._pos['1'] = data.pose.pose.position\n self._vel['1'] = data.twist.twist.linear\n self._quat['1'] = np.array([data.pose.pose.orientation.x,\n data.pose.pose.orientation.y, \n data.pose.pose.orientation.z, \n data.pose.pose.orientation.w]) # gives quaternion object\n self._euler_angles['1'] = euler_from_quaternion(self._quat['1']) # gives roll, pitch, yaw\n #print(self._euler_angles['1'], type(self._euler_angles['1']))\n self._euler_angular_rates['1'] = np.asarray(data.twist.twist.angular) # gives p, q, r\n\n def _currpos_callback_2(self, data):\n self._pos['2'] = data.pose.pose.position\n self._vel['2'] = data.twist.twist.linear\n self._quat['2'] = np.array([data.pose.pose.orientation.x,\n data.pose.pose.orientation.y, \n data.pose.pose.orientation.z, \n data.pose.pose.orientation.w]) # gives quaternion object\n self._euler_angles['2'] = euler_from_quaternion(self._quat['2']) # gives roll, pitch, yaw\n self._euler_angular_rates['2'] = np.asarray(data.twist.twist.angular) # gives p, q, r\n\n def _currpos_callback_3(self, data):\n self._pos['3'] = data.pose.pose.position\n self._vel['3'] = data.twist.twist.linear\n self._quat['3'] = np.array([data.pose.pose.orientation.x,\n data.pose.pose.orientation.y, \n data.pose.pose.orientation.z, \n data.pose.pose.orientation.w]) # gives quaternion object\n self._euler_angles['3'] = euler_from_quaternion(self._quat['3']) # gives roll, pitch, yaw\n self._euler_angular_rates['3'] = np.asarray(data.twist.twist.angular) # gives p, q, r\n\n\n def update_pos(self, id, pos):\n self.goal_msgs[id].header.seq += 1\n self.goal_msgs[id].header.frame_id = '/world' # change?\n self.goal_msgs[id].header.stamp = rospy.Time.now()\n\n self.goal_msgs[id].pose.position.x = pos[0]\n self.goal_msgs[id].pose.position.y = pos[1]\n self.goal_msgs[id].pose.position.z = pos[2]\n\n self.goal_msgs[id].pose.orientation.x = 0\n self.goal_msgs[id].pose.orientation.y = 0 \n self.goal_msgs[id].pose.orientation.z = 0\n self.goal_msgs[id].pose.orientation.w = 1\n \n def update_rotor_vels(self, id):\n # this function takes in the UAV's id and computes+publishes the rotor velocities to that UAV\n\n # get z_oold, z_old, and desired position/yaw\n self.get_data(id) \n\n # get roll/pitch/yawrate/thrust commands from position controller\n roll_c, pitch_c, z_dot_c, yaw_dot_c = self.controller.get_command(\n self._pos[str(id)].x, self._pos[str(id)].y, self._pos[str(id)].z, # x,y,z\n self._euler_angles[str(id)][0], self._euler_angles[str(id)][1], self._euler_angles[str(id)][0][2], # change? roll, pitch, yaw\n self.initials[str(id)][0], self.initials[str(id)][1], self.initials[str(id)][2], # change? xd, yd, zd\n self.vx_d[int(id)], self.vy_d[int(id)], self.vz_d[int(id)], self.yaw_d[int(id)])\n\n # obtain p,q,r/roll,pitch,yaw for UAV id from odometry subscription\n p = self._euler_angular_rates[str(id)][0]\n q = self._euler_angular_rates[str(id)][1]\n r = self._euler_angular_rates[str(id)][2]\n roll = self._euler_angles[str(id)][0]\n pitch = self._euler_angles[str(id)][1]\n yaw = self._euler_angles[str(id)][2]\n\n # convert above commands to rotor velocity commands\n rotorvel_converter = roll_pitch_yawrate_thrust_crazyflie(pitch_c, roll_c, yaw_dot_c, p, q, r, roll, pitch, yaw, z_dot_c)\n rotor_velocities = rotorvel_converter.CalculateRotorVelocities() # this yields a 4-element list\n\n # publish rotor velocities to Actuator\n rotorvel_msg = Actuators()\n rotorvel_msg.angular_velocities = rotor_velocities\n #rotorvel_msg.header.stamp = self._currpos_msg.header.stamp\n self.cmdV_pubs[str(cf_id)].publish(rotorvel_msg)\n\n def publish_msg(self):\n for cf_id in self.cf_ids:\n self.goal_pubs[str(cf_id)].publish(self.goal_msgs[str(cf_id)])\n\n def do_wpnav(self):\n print('Navigating!!')\n # Check all cfs reached assigened takeoff alt\n if not self.takeoffed:\n count = 0\n for cf_id in self.cf_ids:\n if abs(self._pos[str(cf_id)][2] - self.takoff_alt) < 0.05:\n count += 1\n position_d = np.array([self._pos[str(cf_id)][0], self._pos[str(cf_id)][1], self.takoff_alt])\n self.update_pos(str(cf_id), position_d)\n if count == self.number_of_agents:\n self.takeoffed = True\n \n # Check all cfs reached their initial points\n elif not self.reached_1st:\n count = 0\n for cf_id in self.cf_ids:\n if np.linalg.norm(self._pos[str(cf_id)] - self.initials[str(cf_id)]) < 0.1:\n count += 1\n position_d = np.array([self.initials[str(cf_id)][0], self.initials[str(cf_id)][1], self.initials[str(cf_id)][2]])\n self.update_pos(str(cf_id), position_d)\n if count == self.number_of_agents:\n self.reached_1st = True\n print('Initial points reached!!')\n raw_input(\"Press Enter to continue...\") # use input() for python3\n\n # flocking path planning\n else:\n # call flocking and get the desired position of next timestep\n for cf_id in self.cf_ids:\n other_cfs = self.cf_ids[:]\n other_cfs.remove(cf_id)\n self._dist_to_goal[str(cf_id)] = np.linalg.norm(self._pos[str(cf_id)] - self.finals[str(cf_id)])\n force = -self.c1*self._vel[str(cf_id)] - self.c2*(self._pos[str(cf_id)] - self.finals[str(cf_id)])\n for other_cf in other_cfs:\n dist_v = self._pos[str(other_cf)] - self._pos[str(cf_id)]\n dist = np.linalg.norm(dist_v)\n d = 2*self.radius + self.d_star\n CommunicationRadious = np.cbrt(3*np.square(self.MaxVelo)/(2*self.RepulsiveGradient)) + d\n if dist < CommunicationRadious:\n ForceComponent = -self.RepulsiveGradient * np.square(dist - CommunicationRadious)\n force += ForceComponent * (dist_v)/dist \n velocity_d = self._vel[str(cf_id)] + force * self.dt\n position_d = self._pos[str(cf_id)] + velocity_d*self.dt\n position_d[2] = 1.0 # for 2D sim\n self.update_pos(str(cf_id), position_d)\n\n # Check all cfs reached their final points\n if self._dist_to_goal[max(self._dist_to_goal)] < 0.2:\n self.flag['preland'] = 1\n\n def iteration(self, event):\n\n # publish goal messages for each uav\n self.publish_msg()\n\n # publish rotor velocities for each uav\n for index, cf_id in enumerate(self.cf_ids):\n self.update_rotor_vels(cf_id)\n\n\nif __name__ == '__main__':\n # write code to create PositionControllerNode_ChihChun\n\n rospy.init_node('position_controller_node_ChihChun_flocking', disable_signals=True)\n ids = [0,1]\n initials = np.array([[0,0,0],[1,1,0]])\n finals = np.array([[1,1,1],[0,0,1]])\n vx_ds = [0.0,0.0]# desired vx\n vy_ds = [0.0,0.0]# desired vy\n vz_ds = [0.0,0.0]# desired vz\n yaw_ds = [0.0,0.0]# desired yaw\n dt = 1.0/15\n cf_flocking = position_controller_flock_node(ids, initials, finals, vx_ds, vy_ds, vz_ds, yaw_ds)\n rospy.Timer(rospy.Duration(dt), cf_flocking.iteration)\n rospy.spin()\n\n" ]
[ [ "numpy.square", "numpy.asarray", "numpy.linalg.norm", "numpy.shape", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ndalsanto/PDE-DNN
[ "1d4e612660e90546c6d97f4a6c8c1f498e5bfdf9" ]
[ "fem_data_generation.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 5 16:07:28 2019\n\n@author: Niccolo' Dal Santo\n@email : [email protected]\n\"\"\"\n\nimport random\nimport numpy as np\n\n# generate the coordinates randomly or in a tensorial way (if the mesh is structured and the fem dofs are ordered in a certain way)\n\ndef generate_fem_coordinates( number_of_fem_coordinates, min_coord, max_coord, sampling='random', dof_per_direction=0, \\\n possible_coordinates=[] ):\n \n if sampling == 'random':\n\n fem_locations = np.zeros( number_of_fem_coordinates )\n \n if len(possible_coordinates)==0 :\n for iCoord in range( number_of_fem_coordinates ):\n random.seed(9011 * (iCoord + 1) + iCoord + 2)\n fem_locations[ iCoord ] = random.randint(min_coord,max_coord)\n elif len(possible_coordinates) > 0:\n for iCoord in range( number_of_fem_coordinates ):\n random.seed(9011 * (iCoord + 1) + iCoord + 2)\n fem_locations[ iCoord ] = possible_coordinates[random.randint(0,possible_coordinates.shape[0]-1)]\n \n elif sampling == 'tensorial':\n \n fem_locations = np.zeros( number_of_fem_coordinates[0] * number_of_fem_coordinates[1] )\n\n jump_x = np.ceil( float(dof_per_direction) / float( number_of_fem_coordinates[0] + 1 ) )\n jump_from_border_x = np.floor( ( float(dof_per_direction) - jump_x * float( number_of_fem_coordinates[0] + 1 ) ) / 2. )\n\n print( jump_from_border_x )\n\n jump_y = np.ceil( float(dof_per_direction) / float( number_of_fem_coordinates[1] + 1 ) )\n jump_from_border_y = np.floor( ( float(dof_per_direction) - jump_y * float( number_of_fem_coordinates[1] + 1 ) ) / 2. )\n \n print('Choosing tensorial grid selection, with jumps %f, %f and jumps from border %f, %f' \\\n % (jump_x, jump_y, jump_from_border_x, jump_from_border_y) )\n \n fem_location_counter = 0;\n \n for iX in range( number_of_fem_coordinates[0] ):\n for iY in range( number_of_fem_coordinates[1] ):\n \n fem_locations[fem_location_counter] = jump_from_border_x + dof_per_direction * jump_from_border_y \\\n + (iY+1) * jump_y * dof_per_direction \\\n + (iX+1) * jump_x\n \n fem_location_counter = fem_location_counter + 1\n \n fem_locations = np.sort( fem_locations )\n fem_locations = np.unique( fem_locations )\n \n return fem_locations.astype( int )\n\n\n\ndef generate_fem_training_data( ns, fem_coordinates, fem_output_coordinates, snapshot_collector ):\n\n num_locations = fem_coordinates.shape[0]\n num_output_locations = fem_output_coordinates.shape[0]\n\n y_output = np.zeros( ( ns, num_output_locations ) )\n\n # measurements of the solution, should they be noised?\n u_ex_locations = np.zeros( (ns, num_locations) )\n\n for iNs in range( ns ):\n u_ex_locations[iNs, :] = snapshot_collector.get_snapshot_function( iNs, fem_coordinates )\n y_output[iNs, :] = snapshot_collector.get_snapshot_function( iNs, fem_output_coordinates )\n\n return u_ex_locations, y_output\n\n\n\n\n" ]
[ [ "numpy.zeros", "numpy.sort", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fabianegli/biopython
[ "132851bb572c0f6a22cde788d3a56e87ee1f48d2" ]
[ "Tests/test_seq.py" ]
[ "# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\n\"\"\"Tests for seq module.\"\"\"\n\nimport array\nimport copy\nimport unittest\nimport warnings\n\ntry:\n import numpy\nexcept ImportError:\n numpy = None\n\nfrom Bio import BiopythonWarning, BiopythonDeprecationWarning\nfrom Bio import Seq\nfrom Bio.Data.IUPACData import (\n ambiguous_dna_complement,\n ambiguous_rna_complement,\n ambiguous_dna_values,\n ambiguous_rna_values,\n)\nfrom Bio.Data.CodonTable import TranslationError, standard_dna_table\n\ntest_seqs = [\n Seq.Seq(\"TCAAAAGGATGCATCATG\"),\n Seq.Seq(\"T\"),\n Seq.Seq(\"ATGAAACTG\"),\n Seq.Seq(\"ATGAARCTG\"),\n Seq.Seq(\"AWGAARCKG\"), # Note no U or T\n Seq.Seq(\"\".join(ambiguous_rna_values)),\n Seq.Seq(\"\".join(ambiguous_dna_values)),\n Seq.Seq(\"AWGAARCKG\"),\n Seq.Seq(\"AUGAAACUG\"),\n Seq.Seq(\"ATGAAA-CTG\"),\n Seq.Seq(\"ATGAAACTGWN\"),\n Seq.Seq(\"AUGAAA==CUG\"),\n Seq.Seq(\"AUGAAACUGWN\"),\n Seq.Seq(\"AUGAAACTG\"), # U and T\n Seq.MutableSeq(\"ATGAAACTG\"),\n Seq.MutableSeq(\"AUGaaaCUG\"),\n Seq.Seq(\"ACTGTCGTCT\"),\n]\nprotein_seqs = [\n Seq.Seq(\"ATCGPK\"),\n Seq.Seq(\"T.CGPK\"),\n Seq.Seq(\"T-CGPK\"),\n Seq.Seq(\"MEDG-KRXR*\"),\n Seq.MutableSeq(\"ME-K-DRXR*XU\"),\n Seq.Seq(\"MEDG-KRXR@\"),\n Seq.Seq(\"ME-KR@\"),\n Seq.Seq(\"MEDG.KRXR@\"),\n]\n\n\nclass TestSeq(unittest.TestCase):\n def setUp(self):\n self.s = Seq.Seq(\"TCAAAAGGATGCATCATG\")\n\n def test_as_string(self):\n \"\"\"Test converting Seq to string.\"\"\"\n self.assertEqual(\"TCAAAAGGATGCATCATG\", self.s)\n\n def test_seq_construction(self):\n \"\"\"Test Seq object initialization.\"\"\"\n sequence = bytes(self.s)\n s = Seq.Seq(sequence)\n self.assertIsInstance(s, Seq.Seq, \"Creating MutableSeq using bytes\")\n self.assertEqual(s, self.s)\n s = Seq.Seq(bytearray(sequence))\n self.assertIsInstance(s, Seq.Seq, \"Creating MutableSeq using bytearray\")\n self.assertEqual(s, self.s)\n s = Seq.Seq(sequence.decode(\"ASCII\"))\n self.assertIsInstance(s, Seq.Seq, \"Creating MutableSeq using str\")\n self.assertEqual(s, self.s)\n s = Seq.Seq(self.s)\n self.assertIsInstance(s, Seq.Seq, \"Creating MutableSeq using Seq\")\n self.assertEqual(s, self.s)\n s = Seq.Seq(Seq.MutableSeq(sequence))\n self.assertIsInstance(s, Seq.Seq, \"Creating MutableSeq using MutableSeq\")\n self.assertEqual(s, self.s)\n self.assertRaises(\n UnicodeEncodeError, Seq.Seq, \"ÄþÇÐ\"\n ) # All are Latin-1 characters\n self.assertRaises(UnicodeEncodeError, Seq.Seq, \"あいうえお\") # These are not\n\n def test_repr(self):\n \"\"\"Test representation of Seq object.\"\"\"\n self.assertEqual(\"Seq('TCAAAAGGATGCATCATG')\", repr(self.s))\n\n def test_truncated_repr(self):\n seq = \"TCAAAAGGATGCATCATGTCAAAAGGATGCATCATGTCAAAAGGATGCATCATGTCAAAAGGA\"\n expected = \"Seq('TCAAAAGGATGCATCATGTCAAAAGGATGCATCATGTCAAAAGGATGCATCATG...GGA')\"\n self.assertEqual(expected, repr(Seq.Seq(seq)))\n\n def test_length(self):\n \"\"\"Test len method on Seq object.\"\"\"\n self.assertEqual(18, len(self.s))\n\n def test_first_nucleotide(self):\n \"\"\"Test getting first nucleotide of Seq.\"\"\"\n self.assertEqual(\"T\", self.s[0])\n\n def test_last_nucleotide(self):\n \"\"\"Test getting last nucleotide of Seq.\"\"\"\n self.assertEqual(\"G\", self.s[-1])\n\n def test_slicing(self):\n \"\"\"Test slicing of Seq.\"\"\"\n self.assertEqual(\"AA\", self.s[3:5])\n\n def test_reverse(self):\n \"\"\"Test reverse using -1 stride.\"\"\"\n self.assertEqual(\"GTACTACGTAGGAAAACT\", self.s[::-1])\n\n def test_extract_third_nucleotide(self):\n \"\"\"Test extracting every third nucleotide (slicing with stride 3).\"\"\"\n self.assertEqual(\"TAGTAA\", self.s[0::3])\n self.assertEqual(\"CAGGTT\", self.s[1::3])\n self.assertEqual(\"AAACCG\", self.s[2::3])\n\n def test_concatenation_of_seq(self):\n t = Seq.Seq(\"T\")\n u = self.s + t\n self.assertEqual(str(self.s) + \"T\", u)\n self.assertEqual(self.s + Seq.Seq(\"T\"), \"TCAAAAGGATGCATCATGT\")\n\n def test_replace(self):\n self.assertEqual(\"ATCCCA\", Seq.Seq(\"ATC-CCA\").replace(\"-\", \"\"))\n\n\nclass TestSeqStringMethods(unittest.TestCase):\n def setUp(self):\n self.s = Seq.Seq(\"TCAAAAGGATGCATCATG\")\n self.dna = [\n Seq.Seq(\"ATCG\"),\n Seq.Seq(\"gtca\"),\n Seq.MutableSeq(\"GGTCA\"),\n Seq.Seq(\"CTG-CA\"),\n ]\n self.rna = [\n Seq.Seq(\"AUUUCG\"),\n Seq.MutableSeq(\"AUUCG\"),\n Seq.Seq(\"uCAg\"),\n Seq.MutableSeq(\"UC-AG\"),\n Seq.Seq(\"U.CAG\"),\n ]\n self.nuc = [Seq.Seq(\"ATCG\")]\n self.protein = [\n Seq.Seq(\"ATCGPK\"),\n Seq.Seq(\"atcGPK\"),\n Seq.Seq(\"T.CGPK\"),\n Seq.Seq(\"T-CGPK\"),\n Seq.Seq(\"MEDG-KRXR*\"),\n Seq.MutableSeq(\"ME-K-DRXR*XU\"),\n Seq.Seq(\"MEDG-KRXR@\"),\n Seq.Seq(\"ME-KR@\"),\n Seq.Seq(\"MEDG.KRXR@\"),\n ]\n self.test_chars = [\"-\", Seq.Seq(\"-\"), Seq.Seq(\"*\"), \"-X@\"]\n\n def test_string_methods(self):\n for a in self.dna + self.rna + self.nuc + self.protein:\n self.assertEqual(a.lower(), str(a).lower())\n self.assertEqual(a.upper(), str(a).upper())\n self.assertEqual(a.strip(), str(a).strip())\n self.assertEqual(a.lstrip(), str(a).lstrip())\n self.assertEqual(a.rstrip(), str(a).rstrip())\n\n def test_mutableseq_upper_lower(self):\n seq = Seq.MutableSeq(\"ACgt\")\n lseq = seq.lower()\n self.assertEqual(lseq, \"acgt\")\n self.assertEqual(seq, \"ACgt\")\n lseq = seq.lower(inplace=False)\n self.assertEqual(lseq, \"acgt\")\n self.assertEqual(seq, \"ACgt\")\n lseq = seq.lower(inplace=True)\n self.assertEqual(lseq, \"acgt\")\n self.assertIs(lseq, seq)\n seq = Seq.MutableSeq(\"ACgt\")\n useq = seq.upper()\n self.assertEqual(useq, \"ACGT\")\n self.assertEqual(seq, \"ACgt\")\n useq = seq.upper(inplace=False)\n self.assertEqual(useq, \"ACGT\")\n self.assertEqual(seq, \"ACgt\")\n useq = seq.upper(inplace=True)\n self.assertEqual(useq, \"ACGT\")\n self.assertIs(useq, seq)\n\n def test_hash(self):\n with warnings.catch_warnings(record=True):\n hash(self.s)\n\n def test_not_equal_comparsion(self):\n \"\"\"Test __ne__ comparison method.\"\"\"\n self.assertNotEqual(Seq.Seq(\"TCAAA\"), Seq.Seq(\"TCAAAA\"))\n\n def test_less_than_comparison(self):\n \"\"\"Test __lt__ comparison method.\"\"\"\n self.assertLess(self.s[:-1], self.s)\n\n def test_less_than_comparison_of_incompatible_types(self):\n \"\"\"Test incompatible types __lt__ comparison method.\"\"\"\n with self.assertRaises(TypeError):\n self.s < 1\n\n def test_less_than_or_equal_comparison(self):\n \"\"\"Test __le__ comparison method.\"\"\"\n self.assertLessEqual(self.s, self.s)\n\n def test_less_than_or_equal_comparison_of_incompatible_types(self):\n \"\"\"Test incompatible types __le__ comparison method.\"\"\"\n with self.assertRaises(TypeError):\n self.s <= 1\n\n def test_greater_than_comparison(self):\n \"\"\"Test __gt__ comparison method.\"\"\"\n self.assertGreater(self.s, self.s[:-1])\n\n def test_greater_than_comparison_of_incompatible_types(self):\n \"\"\"Test incompatible types __gt__ comparison method.\"\"\"\n with self.assertRaises(TypeError):\n self.s > 1\n\n def test_greater_than_or_equal_comparison(self):\n \"\"\"Test __ge__ comparison method.\"\"\"\n self.assertGreaterEqual(self.s, self.s)\n\n def test_greater_than_or_equal_comparison_of_incompatible_types(self):\n \"\"\"Test incompatible types __ge__ comparison method.\"\"\"\n with self.assertRaises(TypeError):\n self.s >= 1\n\n def test_add_method_using_wrong_object(self):\n with self.assertRaises(TypeError):\n self.s + {}\n\n def test_radd_method_using_wrong_object(self):\n self.assertEqual(self.s.__radd__({}), NotImplemented)\n\n def test_contains_method(self):\n self.assertIn(\"AAAA\", self.s)\n\n def test_startswith(self):\n self.assertTrue(self.s.startswith(\"TCA\"))\n self.assertTrue(self.s.startswith((\"CAA\", \"CTA\"), 1))\n\n def test_endswith(self):\n self.assertTrue(self.s.endswith(\"ATG\"))\n self.assertTrue(self.s.endswith((\"ATG\", \"CTA\")))\n\n def test_append_nucleotides(self):\n self.test_chars.append(Seq.Seq(\"A\"))\n self.assertEqual(5, len(self.test_chars))\n\n def test_append_proteins(self):\n self.test_chars.append(Seq.Seq(\"K\"))\n self.test_chars.append(Seq.Seq(\"K-\"))\n self.test_chars.append(Seq.Seq(\"K@\"))\n\n self.assertEqual(7, len(self.test_chars))\n\n def test_stripping_characters(self):\n for a in self.dna + self.rna + self.nuc + self.protein:\n for char in self.test_chars:\n str_char = str(char)\n self.assertEqual(a.strip(char), str(a).strip(str_char))\n self.assertEqual(a.lstrip(char), str(a).lstrip(str_char))\n self.assertEqual(a.rstrip(char), str(a).rstrip(str_char))\n\n def test_finding_characters(self):\n for a in self.dna + self.rna + self.nuc + self.protein:\n for char in self.test_chars:\n str_char = str(char)\n self.assertEqual(a.find(char), str(a).find(str_char))\n self.assertEqual(a.find(char, 2, -2), str(a).find(str_char, 2, -2))\n self.assertEqual(a.rfind(char), str(a).rfind(str_char))\n self.assertEqual(a.rfind(char, 2, -2), str(a).rfind(str_char, 2, -2))\n\n def test_counting_characters(self):\n for a in self.dna + self.rna + self.nuc + self.protein:\n for char in self.test_chars:\n str_char = str(char)\n self.assertEqual(a.count(char), str(a).count(str_char))\n self.assertEqual(a.count(char, 2, -2), str(a).count(str_char, 2, -2))\n\n def test_splits(self):\n for a in self.dna + self.rna + self.nuc + self.protein:\n for char in self.test_chars:\n str_char = str(char)\n self.assertEqual(a.split(char), str(a).split(str_char))\n self.assertEqual(a.rsplit(char), str(a).rsplit(str_char))\n\n for max_sep in [0, 1, 2, 999]:\n self.assertEqual(\n a.split(char, max_sep), str(a).split(str_char, max_sep)\n )\n\n\nclass TestSeqAddition(unittest.TestCase):\n def setUp(self):\n self.dna = [\n Seq.Seq(\"ATCG\"),\n Seq.Seq(\"gtca\"),\n Seq.MutableSeq(\"GGTCA\"),\n Seq.Seq(\"CTG-CA\"),\n \"TGGTCA\",\n ]\n self.rna = [\n Seq.Seq(\"AUUUCG\"),\n Seq.MutableSeq(\"AUUCG\"),\n Seq.Seq(\"uCAg\"),\n Seq.MutableSeq(\"UC-AG\"),\n Seq.Seq(\"U.CAG\"),\n \"UGCAU\",\n ]\n self.nuc = [Seq.Seq(\"ATCG\"), \"UUUTTTACG\"]\n self.protein = [\n Seq.Seq(\"ATCGPK\"),\n Seq.Seq(\"atcGPK\"),\n Seq.Seq(\"T.CGPK\"),\n Seq.Seq(\"T-CGPK\"),\n Seq.Seq(\"MEDG-KRXR*\"),\n Seq.MutableSeq(\"ME-K-DRXR*XU\"),\n \"TEDDF\",\n ]\n\n def test_addition_dna_rna_with_generic_nucleotides(self):\n for a in self.dna + self.rna:\n for b in self.nuc:\n c = a + b\n self.assertEqual(c, str(a) + str(b))\n\n def test_addition_dna_rna_with_generic_nucleotides_inplace(self):\n for a in self.dna + self.rna:\n for b in self.nuc:\n c = b + a\n b += a # can't change 'a' as need value next iteration\n self.assertEqual(c, b)\n\n def test_addition_rna_with_rna(self):\n self.rna.pop(3)\n for a in self.rna:\n for b in self.rna:\n c = a + b\n self.assertEqual(c, str(a) + str(b))\n\n def test_addition_rna_with_rna_inplace(self):\n self.rna.pop(3)\n for a in self.rna:\n for b in self.rna:\n c = b + a\n b += a\n self.assertEqual(c, b)\n\n def test_addition_dna_with_dna(self):\n for a in self.dna:\n for b in self.dna:\n c = a + b\n self.assertEqual(c, str(a) + str(b))\n\n def test_addition_dna_with_dna_inplace(self):\n for a in self.dna:\n for b in self.dna:\n c = b + a\n b += a\n self.assertEqual(c, b)\n\n def test_addition_dna_with_rna(self):\n self.dna.pop(4)\n self.rna.pop(5)\n for a in self.dna:\n for b in self.rna:\n self.assertEqual(str(a) + str(b), a + b)\n self.assertEqual(str(b) + str(a), b + a)\n # Check in place works\n c = a\n c += b\n self.assertEqual(c, str(a) + str(b))\n c = b\n c += a\n self.assertEqual(c, str(b) + str(a))\n\n def test_addition_proteins(self):\n self.protein.pop(2)\n for a in self.protein:\n for b in self.protein:\n c = a + b\n self.assertEqual(c, str(a) + str(b))\n\n def test_addition_proteins_inplace(self):\n self.protein.pop(2)\n for a in self.protein:\n for b in self.protein:\n c = b + a\n b += a\n self.assertEqual(c, b)\n\n def test_adding_protein_with_nucleotides(self):\n for a in self.protein[0:5]:\n for b in self.dna[0:3] + self.rna[0:4]:\n self.assertEqual(str(a) + str(b), a + b)\n a += b\n\n def test_adding_generic_nucleotide_with_other_nucleotides(self):\n for a in self.nuc:\n for b in self.dna + self.rna + self.nuc:\n c = a + b\n self.assertEqual(c, str(a) + str(b))\n\n def test_adding_generic_nucleotide_with_other_nucleotides_inplace(self):\n for a in self.nuc:\n for b in self.dna + self.rna + self.nuc:\n c = b + a\n b += a\n self.assertEqual(c, b)\n\n\nclass TestSeqMultiplication(unittest.TestCase):\n def test_mul_method(self):\n \"\"\"Test mul method; relies on addition method.\"\"\"\n for seq in test_seqs + protein_seqs:\n self.assertEqual(seq * 3, seq + seq + seq)\n if numpy is not None:\n factor = numpy.intc(3) # numpy integer\n for seq in test_seqs + protein_seqs:\n self.assertEqual(seq * factor, seq + seq + seq)\n\n def test_mul_method_exceptions(self):\n \"\"\"Test mul method exceptions.\"\"\"\n for seq in test_seqs + protein_seqs:\n with self.assertRaises(TypeError):\n seq * 3.0\n with self.assertRaises(TypeError):\n seq * \"\"\n\n def test_rmul_method(self):\n \"\"\"Test rmul method; relies on addition method.\"\"\"\n for seq in test_seqs + protein_seqs:\n self.assertEqual(3 * seq, seq + seq + seq)\n if numpy is not None:\n factor = numpy.intc(3) # numpy integer\n for seq in test_seqs + protein_seqs:\n self.assertEqual(factor * seq, seq + seq + seq)\n\n def test_rmul_method_exceptions(self):\n \"\"\"Test rmul method exceptions.\"\"\"\n for seq in test_seqs + protein_seqs:\n with self.assertRaises(TypeError):\n 3.0 * seq\n with self.assertRaises(TypeError):\n \"\" * seq\n\n def test_imul_method(self):\n \"\"\"Test imul method; relies on addition and mull methods.\"\"\"\n for seq in test_seqs + protein_seqs:\n original_seq = seq * 1 # make a copy\n seq *= 3\n self.assertEqual(seq, original_seq + original_seq + original_seq)\n if numpy is not None:\n factor = numpy.intc(3) # numpy integer\n for seq in test_seqs + protein_seqs:\n original_seq = seq * 1 # make a copy\n seq *= factor\n self.assertEqual(seq, original_seq + original_seq + original_seq)\n\n def test_imul_method_exceptions(self):\n \"\"\"Test imul method exceptions.\"\"\"\n for seq in test_seqs + protein_seqs:\n with self.assertRaises(TypeError):\n seq *= 3.0\n with self.assertRaises(TypeError):\n seq *= \"\"\n\n\nclass TestMutableSeq(unittest.TestCase):\n def setUp(self):\n sequence = b\"TCAAAAGGATGCATCATG\"\n self.s = Seq.Seq(sequence)\n self.mutable_s = Seq.MutableSeq(sequence)\n\n def test_mutableseq_construction(self):\n \"\"\"Test MutableSeq object initialization.\"\"\"\n sequence = bytes(self.s)\n mutable_s = Seq.MutableSeq(sequence)\n self.assertIsInstance(\n mutable_s, Seq.MutableSeq, \"Initializing MutableSeq from bytes\"\n )\n self.assertEqual(mutable_s, self.s)\n mutable_s = Seq.MutableSeq(bytearray(sequence))\n self.assertIsInstance(\n mutable_s, Seq.MutableSeq, \"Initializing MutableSeq from bytearray\"\n )\n self.assertEqual(mutable_s, self.s)\n mutable_s = Seq.MutableSeq(sequence.decode(\"ASCII\"))\n self.assertIsInstance(\n mutable_s, Seq.MutableSeq, \"Initializing MutableSeq from str\"\n )\n self.assertEqual(mutable_s, self.s)\n mutable_s = Seq.MutableSeq(self.s)\n self.assertIsInstance(\n mutable_s, Seq.MutableSeq, \"Initializing MutableSeq from Seq\"\n )\n self.assertEqual(mutable_s, self.s)\n mutable_s = Seq.MutableSeq(Seq.MutableSeq(sequence))\n self.assertEqual(mutable_s, self.s)\n self.assertIsInstance(\n mutable_s, Seq.MutableSeq, \"Initializing MutableSeq from MutableSeq\"\n )\n # Deprecated:\n with self.assertWarns(BiopythonDeprecationWarning):\n mutable_s = Seq.MutableSeq(array.array(\"u\", sequence.decode(\"ASCII\")))\n self.assertIsInstance(\n mutable_s, Seq.MutableSeq, \"Creating MutableSeq using array\"\n )\n self.assertEqual(mutable_s, self.s)\n self.assertRaises(\n UnicodeEncodeError, Seq.MutableSeq, \"ÄþÇÐ\"\n ) # All are Latin-1 characters\n self.assertRaises(UnicodeEncodeError, Seq.MutableSeq, \"あいうえお\") # These are not\n\n def test_repr(self):\n self.assertEqual(\"MutableSeq('TCAAAAGGATGCATCATG')\", repr(self.mutable_s))\n\n def test_truncated_repr(self):\n seq = \"TCAAAAGGATGCATCATGTCAAAAGGATGCATCATGTCAAAAGGATGCATCATGTCAAAAGGA\"\n expected = (\n \"MutableSeq('TCAAAAGGATGCATCATGTCAAAAGGATGCATCATGTCAAAAGGATGCATCATG...GGA')\"\n )\n self.assertEqual(expected, repr(Seq.MutableSeq(seq)))\n\n def test_equal_comparison(self):\n \"\"\"Test __eq__ comparison method.\"\"\"\n self.assertEqual(self.mutable_s, \"TCAAAAGGATGCATCATG\")\n\n def test_not_equal_comparison(self):\n \"\"\"Test __ne__ comparison method.\"\"\"\n self.assertNotEqual(self.mutable_s, \"other thing\")\n\n def test_less_than_comparison(self):\n \"\"\"Test __lt__ comparison method.\"\"\"\n self.assertLess(self.mutable_s[:-1], self.mutable_s)\n\n def test_less_than_comparison_of_incompatible_types(self):\n with self.assertRaises(TypeError):\n self.mutable_s < 1\n\n def test_less_than_comparison_with_str(self):\n self.assertLessEqual(self.mutable_s[:-1], \"TCAAAAGGATGCATCATG\")\n\n def test_less_than_or_equal_comparison(self):\n \"\"\"Test __le__ comparison method.\"\"\"\n self.assertLessEqual(self.mutable_s[:-1], self.mutable_s)\n\n def test_less_than_or_equal_comparison_of_incompatible_types(self):\n with self.assertRaises(TypeError):\n self.mutable_s <= 1\n\n def test_less_than_or_equal_comparison_with_str(self):\n self.assertLessEqual(self.mutable_s[:-1], \"TCAAAAGGATGCATCATG\")\n\n def test_greater_than_comparison(self):\n \"\"\"Test __gt__ comparison method.\"\"\"\n self.assertGreater(self.mutable_s, self.mutable_s[:-1])\n\n def test_greater_than_comparison_of_incompatible_types(self):\n with self.assertRaises(TypeError):\n self.mutable_s > 1\n\n def test_greater_than_comparison_with_str(self):\n self.assertGreater(self.mutable_s, \"TCAAAAGGATGCATCAT\")\n\n def test_greater_than_or_equal_comparison(self):\n \"\"\"Test __ge__ comparison method.\"\"\"\n self.assertGreaterEqual(self.mutable_s, self.mutable_s)\n\n def test_greater_than_or_equal_comparison_of_incompatible_types(self):\n with self.assertRaises(TypeError):\n self.mutable_s >= 1\n\n def test_greater_than_or_equal_comparison_with_str(self):\n self.assertGreaterEqual(self.mutable_s, \"TCAAAAGGATGCATCATG\")\n\n def test_add_method(self):\n \"\"\"Test adding wrong type to MutableSeq.\"\"\"\n with self.assertRaises(TypeError):\n self.mutable_s + 1234\n\n def test_radd_method_wrong_type(self):\n self.assertEqual(self.mutable_s.__radd__(1234), NotImplemented)\n\n def test_contains_method(self):\n self.assertIn(\"AAAA\", self.mutable_s)\n\n def test_startswith(self):\n self.assertTrue(self.mutable_s.startswith(\"TCA\"))\n self.assertTrue(self.mutable_s.startswith((\"CAA\", \"CTA\"), 1))\n\n def test_endswith(self):\n self.assertTrue(self.mutable_s.endswith(\"ATG\"))\n self.assertTrue(self.mutable_s.endswith((\"ATG\", \"CTA\")))\n\n def test_as_string(self):\n self.assertEqual(\"TCAAAAGGATGCATCATG\", self.mutable_s)\n\n def test_length(self):\n self.assertEqual(18, len(self.mutable_s))\n\n def test_converting_to_immutable(self):\n self.assertIsInstance(Seq.Seq(self.mutable_s), Seq.Seq)\n\n def test_first_nucleotide(self):\n self.assertEqual(\"T\", self.mutable_s[0])\n\n def test_setting_slices(self):\n self.assertEqual(\n Seq.MutableSeq(\"CAAA\"), self.mutable_s[1:5], \"Slice mutable seq\"\n )\n\n self.mutable_s[1:3] = \"GAT\"\n self.assertEqual(\n Seq.MutableSeq(\"TGATAAAGGATGCATCATG\"),\n self.mutable_s,\n \"Set slice with string and adding extra nucleotide\",\n )\n\n self.mutable_s[1:3] = self.mutable_s[5:7]\n self.assertEqual(\n Seq.MutableSeq(\"TAATAAAGGATGCATCATG\"),\n self.mutable_s,\n \"Set slice with MutableSeq\",\n )\n if numpy is not None:\n one, three, five, seven = numpy.array([1, 3, 5, 7]) # numpy integers\n self.assertEqual(\n Seq.MutableSeq(\"AATA\"), self.mutable_s[one:five], \"Slice mutable seq\"\n )\n\n self.mutable_s[one:three] = \"GAT\"\n self.assertEqual(\n Seq.MutableSeq(\"TGATTAAAGGATGCATCATG\"),\n self.mutable_s,\n \"Set slice with string and adding extra nucleotide\",\n )\n\n self.mutable_s[one:three] = self.mutable_s[five:seven]\n self.assertEqual(\n Seq.MutableSeq(\"TAATTAAAGGATGCATCATG\"),\n self.mutable_s,\n \"Set slice with MutableSeq\",\n )\n\n def test_setting_item(self):\n self.mutable_s[3] = \"G\"\n self.assertEqual(Seq.MutableSeq(\"TCAGAAGGATGCATCATG\"), self.mutable_s)\n if numpy is not None:\n i = numpy.intc(3)\n self.mutable_s[i] = \"X\"\n self.assertEqual(Seq.MutableSeq(\"TCAXAAGGATGCATCATG\"), self.mutable_s)\n\n def test_deleting_slice(self):\n del self.mutable_s[4:5]\n self.assertEqual(Seq.MutableSeq(\"TCAAAGGATGCATCATG\"), self.mutable_s)\n\n def test_deleting_item(self):\n del self.mutable_s[3]\n self.assertEqual(Seq.MutableSeq(\"TCAAAGGATGCATCATG\"), self.mutable_s)\n\n def test_appending(self):\n self.mutable_s.append(\"C\")\n self.assertEqual(Seq.MutableSeq(\"TCAAAAGGATGCATCATGC\"), self.mutable_s)\n\n def test_inserting(self):\n self.mutable_s.insert(4, \"G\")\n self.assertEqual(Seq.MutableSeq(\"TCAAGAAGGATGCATCATG\"), self.mutable_s)\n\n def test_popping_last_item(self):\n self.assertEqual(\"G\", self.mutable_s.pop())\n\n def test_remove_items(self):\n self.mutable_s.remove(\"G\")\n self.assertEqual(\n Seq.MutableSeq(\"TCAAAAGATGCATCATG\"), self.mutable_s, \"Remove first G\"\n )\n\n self.assertRaises(ValueError, self.mutable_s.remove, \"Z\")\n\n def test_count(self):\n self.assertEqual(7, self.mutable_s.count(\"A\"))\n self.assertEqual(2, self.mutable_s.count(\"AA\"))\n\n def test_index(self):\n self.assertEqual(2, self.mutable_s.index(\"A\"))\n self.assertRaises(ValueError, self.mutable_s.index, \"8888\")\n\n def test_reverse(self):\n \"\"\"Test using reverse method.\"\"\"\n self.mutable_s.reverse()\n self.assertEqual(Seq.MutableSeq(\"GTACTACGTAGGAAAACT\"), self.mutable_s)\n\n def test_reverse_with_stride(self):\n \"\"\"Test reverse using -1 stride.\"\"\"\n self.assertEqual(Seq.MutableSeq(\"GTACTACGTAGGAAAACT\"), self.mutable_s[::-1])\n\n def test_complement_old(self):\n # old approach\n with self.assertWarns(BiopythonDeprecationWarning):\n self.mutable_s.complement()\n self.assertEqual(\"AGTTTTCCTACGTAGTAC\", self.mutable_s)\n\n def test_complement(self):\n # new approach\n self.mutable_s.complement(inplace=True)\n self.assertEqual(\"AGTTTTCCTACGTAGTAC\", self.mutable_s)\n\n def test_complement_rna(self):\n m = self.mutable_s.complement_rna()\n self.assertEqual(self.mutable_s, \"TCAAAAGGATGCATCATG\")\n self.assertIsInstance(m, Seq.MutableSeq)\n self.assertEqual(m, \"AGUUUUCCUACGUAGUAC\")\n m = self.mutable_s.complement_rna(inplace=True)\n self.assertEqual(self.mutable_s, \"AGUUUUCCUACGUAGUAC\")\n self.assertIsInstance(m, Seq.MutableSeq)\n self.assertEqual(m, \"AGUUUUCCUACGUAGUAC\")\n\n def test_reverse_complement_rna(self):\n m = self.mutable_s.reverse_complement_rna()\n self.assertEqual(self.mutable_s, \"TCAAAAGGATGCATCATG\")\n self.assertIsInstance(m, Seq.MutableSeq)\n self.assertEqual(m, \"CAUGAUGCAUCCUUUUGA\")\n m = self.mutable_s.reverse_complement_rna(inplace=True)\n self.assertEqual(self.mutable_s, \"CAUGAUGCAUCCUUUUGA\")\n self.assertIsInstance(m, Seq.MutableSeq)\n self.assertEqual(m, \"CAUGAUGCAUCCUUUUGA\")\n\n def test_transcribe(self):\n r = self.mutable_s.transcribe()\n self.assertEqual(self.mutable_s, \"TCAAAAGGATGCATCATG\")\n self.assertIsInstance(r, Seq.MutableSeq)\n self.assertEqual(r, \"UCAAAAGGAUGCAUCAUG\")\n r = self.mutable_s.transcribe(inplace=True)\n self.assertEqual(self.mutable_s, \"UCAAAAGGAUGCAUCAUG\")\n self.assertIsInstance(r, Seq.MutableSeq)\n self.assertEqual(r, \"UCAAAAGGAUGCAUCAUG\")\n d = self.mutable_s.back_transcribe()\n self.assertEqual(self.mutable_s, \"UCAAAAGGAUGCAUCAUG\")\n self.assertIsInstance(d, Seq.MutableSeq)\n self.assertEqual(d, \"TCAAAAGGATGCATCATG\")\n d = self.mutable_s.back_transcribe(inplace=True)\n self.assertEqual(self.mutable_s, \"TCAAAAGGATGCATCATG\")\n self.assertIsInstance(d, Seq.MutableSeq)\n self.assertEqual(d, \"TCAAAAGGATGCATCATG\")\n\n def test_complement_mixed_aphabets(self):\n # new approach\n seq = Seq.MutableSeq(\"AUGaaaCTG\")\n seq.complement_rna(inplace=True)\n self.assertEqual(\"UACuuuGAC\", seq)\n # old approach\n seq = Seq.MutableSeq(\"AUGaaaCTG\")\n with self.assertWarns(BiopythonDeprecationWarning):\n with self.assertRaises(ValueError):\n seq.complement()\n\n def test_complement_rna_string(self):\n # new approach\n seq = Seq.MutableSeq(\"AUGaaaCUG\")\n seq.complement_rna(inplace=True)\n self.assertEqual(\"UACuuuGAC\", seq)\n # old approach\n seq = Seq.MutableSeq(\"AUGaaaCUG\")\n with self.assertWarns(BiopythonDeprecationWarning):\n seq.complement()\n self.assertEqual(\"UACuuuGAC\", seq)\n\n def test_complement_dna_string(self):\n # new approach\n seq = Seq.MutableSeq(\"ATGaaaCTG\")\n seq.complement(inplace=True)\n self.assertEqual(\"TACtttGAC\", seq)\n # old approach\n seq = Seq.MutableSeq(\"ATGaaaCTG\")\n with self.assertWarns(BiopythonDeprecationWarning):\n seq.complement()\n self.assertEqual(\"TACtttGAC\", seq)\n\n def test_reverse_complement(self):\n # new approach\n self.mutable_s.reverse_complement(inplace=True)\n self.assertEqual(\"CATGATGCATCCTTTTGA\", self.mutable_s)\n\n def test_reverse_complement_old(self):\n # old approach\n with self.assertWarns(BiopythonDeprecationWarning):\n self.mutable_s.reverse_complement()\n self.assertEqual(\"CATGATGCATCCTTTTGA\", self.mutable_s)\n\n def test_extend_method(self):\n self.mutable_s.extend(\"GAT\")\n self.assertEqual(Seq.MutableSeq(\"TCAAAAGGATGCATCATGGAT\"), self.mutable_s)\n\n def test_extend_with_mutable_seq(self):\n self.mutable_s.extend(Seq.MutableSeq(\"TTT\"))\n self.assertEqual(Seq.MutableSeq(\"TCAAAAGGATGCATCATGTTT\"), self.mutable_s)\n\n def test_delete_stride_slice(self):\n del self.mutable_s[4 : 6 - 1]\n self.assertEqual(Seq.MutableSeq(\"TCAAAGGATGCATCATG\"), self.mutable_s)\n\n def test_extract_third_nucleotide(self):\n \"\"\"Test extracting every third nucleotide (slicing with stride 3).\"\"\"\n self.assertEqual(Seq.MutableSeq(\"TAGTAA\"), self.mutable_s[0::3])\n self.assertEqual(Seq.MutableSeq(\"CAGGTT\"), self.mutable_s[1::3])\n self.assertEqual(Seq.MutableSeq(\"AAACCG\"), self.mutable_s[2::3])\n\n def test_set_wobble_codon_to_n(self):\n \"\"\"Test setting wobble codon to N (set slice with stride 3).\"\"\"\n self.mutable_s[2::3] = \"N\" * len(self.mutable_s[2::3])\n self.assertEqual(Seq.MutableSeq(\"TCNAANGGNTGNATNATN\"), self.mutable_s)\n if numpy is not None:\n start, step = numpy.array([2, 3]) # numpy integers\n self.mutable_s[start::step] = \"X\" * len(self.mutable_s[2::3])\n self.assertEqual(Seq.MutableSeq(\"TCXAAXGGXTGXATXATX\"), self.mutable_s)\n\n\nclass TestUnknownSeq(unittest.TestCase):\n def setUp(self):\n warnings.simplefilter(\"ignore\", BiopythonDeprecationWarning)\n self.s = Seq.UnknownSeq(6)\n self.u = Seq.Seq(None, length=6)\n\n def tearDown(self):\n warnings.simplefilter(\"default\", BiopythonDeprecationWarning)\n\n def test_unknownseq_construction(self):\n self.assertEqual(\"??????\", Seq.UnknownSeq(6))\n self.assertEqual(\"NNNNNN\", Seq.UnknownSeq(6, character=\"N\"))\n self.assertEqual(\"XXXXXX\", Seq.UnknownSeq(6, character=\"X\"))\n self.assertEqual(\"??????\", Seq.UnknownSeq(6, character=\"?\"))\n with self.assertRaises(ValueError):\n \"??????\" == self.u\n with self.assertRaises(ValueError):\n self.u == \"??????\"\n\n with self.assertRaises(ValueError):\n Seq.UnknownSeq(-10)\n\n with self.assertRaises(ValueError):\n Seq.Seq(None, length=-10)\n\n with self.assertRaises(ValueError):\n Seq.UnknownSeq(6, character=\"??\")\n\n def test_length(self):\n self.assertEqual(6, len(self.s))\n self.assertEqual(6, len(self.u))\n\n def test_repr(self):\n self.assertEqual(\"UnknownSeq(6, character='?')\", repr(self.s))\n self.assertEqual(\"Seq(None, length=6)\", repr(self.u))\n\n def test_add_method(self):\n seq1 = Seq.UnknownSeq(3, character=\"N\")\n self.assertEqual(\"??????NNN\", self.s + seq1)\n\n seq2 = Seq.UnknownSeq(3, character=\"N\")\n self.assertEqual(\"NNNNNN\", seq1 + seq2)\n\n def test_getitem_method(self):\n self.assertEqual(\"\", self.s[-1:-1])\n self.assertEqual(\"?\", self.s[1])\n self.assertEqual(\"?\", self.s[5:])\n self.assertEqual(\"?\", self.s[:1])\n self.assertEqual(\"??\", self.s[1:3])\n self.assertEqual(\"???\", self.s[1:6:2])\n self.assertEqual(\"????\", self.s[1:-1])\n with self.assertRaises(ValueError):\n self.s[1:6:0]\n with self.assertRaises(ValueError):\n self.u[1:6:0]\n\n def test_count(self):\n self.assertEqual(6, self.s.count(\"?\"))\n self.assertEqual(3, self.s.count(\"??\"))\n self.assertEqual(0, Seq.UnknownSeq(6, character=\"N\").count(\"?\"))\n self.assertEqual(0, Seq.UnknownSeq(6, character=\"N\").count(\"??\"))\n self.assertEqual(4, Seq.UnknownSeq(6, character=\"?\").count(\"?\", start=2))\n self.assertEqual(2, Seq.UnknownSeq(6, character=\"?\").count(\"??\", start=2))\n self.assertRaises(ValueError, self.u.count, \"?\")\n\n def test_complement(self):\n self.s.complement()\n self.assertEqual(\"??????\", self.s)\n t = self.u.complement()\n self.assertEqual(len(t), 6)\n self.assertRaises(ValueError, str, t)\n\n def test_reverse_complement(self):\n self.s.reverse_complement()\n self.assertEqual(\"??????\", self.s)\n t = self.u.reverse_complement()\n self.assertEqual(len(t), 6)\n self.assertRaises(ValueError, str, t)\n\n def test_transcribe(self):\n self.assertEqual(\"??????\", self.s.transcribe())\n t = self.u.transcribe()\n self.assertEqual(len(t), 6)\n self.assertRaises(ValueError, str, t)\n\n def test_back_transcribe(self):\n self.assertEqual(\"??????\", self.s.back_transcribe())\n t = self.u.back_transcribe()\n self.assertEqual(len(t), 6)\n self.assertRaises(ValueError, str, t)\n\n def test_upper(self):\n seq = Seq.UnknownSeq(6, character=\"N\")\n self.assertEqual(\"NNNNNN\", seq.upper())\n self.assertEqual(\"Seq(None, length=6)\", repr(self.u.upper()))\n\n def test_lower(self):\n seq = Seq.UnknownSeq(6, character=\"N\")\n self.assertEqual(\"nnnnnn\", seq.lower())\n self.assertEqual(\"Seq(None, length=6)\", repr(self.u.lower()))\n\n def test_translation(self):\n self.assertEqual(\"XX\", self.s.translate())\n t = self.u.translate()\n self.assertEqual(len(t), 2)\n self.assertRaises(ValueError, str, t)\n\n def test_ungap(self):\n seq = Seq.UnknownSeq(7, character=\"N\")\n self.assertEqual(\"NNNNNNN\", seq.ungap(\"-\"))\n\n seq = Seq.UnknownSeq(20, character=\"-\")\n self.assertEqual(\"\", seq.ungap(\"-\"))\n\n\nclass TestAmbiguousComplements(unittest.TestCase):\n def test_ambiguous_values(self):\n \"\"\"Test that other tests do not introduce characters to our values.\"\"\"\n self.assertNotIn(\"-\", ambiguous_dna_values)\n self.assertNotIn(\"?\", ambiguous_dna_values)\n\n\nclass TestComplement(unittest.TestCase):\n def test_complement_ambiguous_dna_values(self):\n for ambig_char, values in sorted(ambiguous_dna_values.items()):\n compl_values = Seq.Seq(values).complement()\n ambig_values = ambiguous_dna_values[ambiguous_dna_complement[ambig_char]]\n self.assertCountEqual(compl_values, ambig_values)\n\n def test_complement_ambiguous_rna_values(self):\n for ambig_char, values in sorted(ambiguous_rna_values.items()):\n # Will default to DNA if neither T nor U found...\n if \"u\" in values or \"U\" in values:\n compl_values = Seq.Seq(values).complement_rna().transcribe()\n else:\n compl_values = Seq.Seq(values).complement().transcribe()\n ambig_values = ambiguous_rna_values[ambiguous_rna_complement[ambig_char]]\n self.assertCountEqual(compl_values, ambig_values)\n\n def test_complement_incompatible_letters(self):\n seq = Seq.Seq(\"CAGGTU\")\n # new approach\n dna = seq.complement(inplace=False) # TODO: remove inplace=False\n self.assertEqual(\"GTCCAA\", dna)\n rna = seq.complement_rna()\n self.assertEqual(\"GUCCAA\", rna)\n # old approach\n with self.assertWarns(BiopythonDeprecationWarning):\n with self.assertRaises(ValueError):\n seq.complement()\n\n def test_complement_of_mixed_dna_rna(self):\n seq = \"AUGAAACTG\" # U and T\n self.assertRaises(ValueError, Seq.complement, seq)\n\n def test_complement_of_rna(self):\n seq = \"AUGAAACUG\"\n # new approach\n rna = Seq.complement_rna(seq)\n self.assertEqual(\"UACUUUGAC\", rna)\n # old approach\n with self.assertWarns(BiopythonDeprecationWarning):\n rna = Seq.complement(seq)\n self.assertEqual(\"UACUUUGAC\", rna)\n\n def test_complement_of_dna(self):\n seq = \"ATGAAACTG\"\n self.assertEqual(\"TACTTTGAC\", Seq.complement(seq))\n\n def test_immutable(self):\n from Bio.SeqRecord import SeqRecord\n\n r = SeqRecord(Seq.Seq(\"ACGT\"))\n with self.assertRaises(TypeError) as cm:\n Seq.complement(r, inplace=True)\n self.assertEqual(str(cm.exception), \"SeqRecords are immutable\")\n with self.assertRaises(TypeError) as cm:\n Seq.complement(\"ACGT\", inplace=True)\n self.assertEqual(str(cm.exception), \"strings are immutable\")\n with self.assertRaises(TypeError) as cm:\n Seq.complement_rna(r, inplace=True)\n self.assertEqual(str(cm.exception), \"SeqRecords are immutable\")\n with self.assertRaises(TypeError) as cm:\n Seq.complement_rna(\"ACGT\", inplace=True)\n self.assertEqual(str(cm.exception), \"strings are immutable\")\n\n\nclass TestReverseComplement(unittest.TestCase):\n def test_reverse_complement(self):\n test_seqs_copy = copy.copy(test_seqs)\n test_seqs_copy.pop(13)\n\n for nucleotide_seq in test_seqs_copy:\n if not isinstance(nucleotide_seq, Seq.Seq):\n continue\n if \"u\" in nucleotide_seq or \"U\" in nucleotide_seq:\n expected = Seq.reverse_complement_rna(nucleotide_seq)\n self.assertEqual(\n repr(expected), repr(nucleotide_seq.reverse_complement_rna())\n )\n self.assertEqual(\n repr(expected[::-1]), repr(nucleotide_seq.complement_rna())\n )\n self.assertEqual(\n nucleotide_seq.complement_rna(),\n Seq.reverse_complement_rna(nucleotide_seq)[::-1],\n )\n self.assertEqual(\n nucleotide_seq.reverse_complement_rna(),\n Seq.reverse_complement_rna(nucleotide_seq),\n )\n else:\n expected = Seq.reverse_complement(nucleotide_seq)\n self.assertEqual(\n repr(expected), repr(nucleotide_seq.reverse_complement())\n )\n self.assertEqual(\n repr(expected[::-1]), repr(nucleotide_seq.complement())\n )\n self.assertEqual(\n nucleotide_seq.complement(),\n Seq.reverse_complement(nucleotide_seq)[::-1],\n )\n self.assertEqual(\n nucleotide_seq.reverse_complement(),\n Seq.reverse_complement(nucleotide_seq),\n )\n\n def test_reverse_complement_of_mixed_dna_rna(self):\n seq = \"AUGAAACTG\" # U and T\n self.assertRaises(ValueError, Seq.reverse_complement, seq)\n\n def test_reverse_complement_of_rna(self):\n # old approach\n seq = \"AUGAAACUG\"\n with self.assertWarns(BiopythonDeprecationWarning):\n rna = Seq.reverse_complement(seq)\n self.assertEqual(\"CAGUUUCAU\", rna)\n # new approach\n dna = Seq.reverse_complement(seq, inplace=False) # TODO: remove inplace=False\n self.assertEqual(\"CAGTTTCAT\", dna)\n\n def test_reverse_complement_of_dna(self):\n seq = \"ATGAAACTG\"\n self.assertEqual(\"CAGTTTCAT\", Seq.reverse_complement(seq))\n\n def test_immutable(self):\n from Bio.SeqRecord import SeqRecord\n\n r = SeqRecord(Seq.Seq(\"ACGT\"))\n with self.assertRaises(TypeError) as cm:\n Seq.reverse_complement(r, inplace=True)\n self.assertEqual(str(cm.exception), \"SeqRecords are immutable\")\n with self.assertRaises(TypeError) as cm:\n Seq.reverse_complement(\"ACGT\", inplace=True)\n self.assertEqual(str(cm.exception), \"strings are immutable\")\n with self.assertRaises(TypeError) as cm:\n Seq.reverse_complement_rna(r, inplace=True)\n self.assertEqual(str(cm.exception), \"SeqRecords are immutable\")\n with self.assertRaises(TypeError) as cm:\n Seq.reverse_complement_rna(\"ACGT\", inplace=True)\n self.assertEqual(str(cm.exception), \"strings are immutable\")\n\n\nclass TestDoubleReverseComplement(unittest.TestCase):\n def test_reverse_complements(self):\n \"\"\"Test double reverse complement preserves the sequence.\"\"\"\n sorted_amb_rna = sorted(ambiguous_rna_values)\n sorted_amb_dna = sorted(ambiguous_dna_values)\n for sequence in [\n Seq.Seq(\"\".join(sorted_amb_dna)),\n Seq.Seq(\"\".join(sorted_amb_dna).replace(\"X\", \"\")),\n Seq.Seq(\"AWGAARCKG\"), # Note no U or T\n ]:\n reversed_sequence = sequence.reverse_complement()\n self.assertEqual(sequence, reversed_sequence.reverse_complement())\n for sequence in [\n Seq.Seq(\"\".join(sorted_amb_rna)),\n Seq.Seq(\"\".join(sorted_amb_rna).replace(\"X\", \"\")),\n Seq.Seq(\"AWGAARCKG\"), # Note no U or T\n ]:\n reversed_sequence = sequence.reverse_complement_rna()\n self.assertEqual(sequence, reversed_sequence.reverse_complement_rna())\n\n\nclass TestTranscription(unittest.TestCase):\n def test_transcription_dna_into_rna(self):\n for nucleotide_seq in test_seqs:\n expected = Seq.transcribe(nucleotide_seq)\n self.assertEqual(\n str(nucleotide_seq).replace(\"t\", \"u\").replace(\"T\", \"U\"), expected\n )\n\n def test_transcription_dna_string_into_rna(self):\n seq = \"ATGAAACTG\"\n self.assertEqual(\"AUGAAACUG\", Seq.transcribe(seq))\n\n def test_seq_object_transcription_method(self):\n for nucleotide_seq in test_seqs:\n if isinstance(nucleotide_seq, Seq.Seq):\n self.assertEqual(\n repr(Seq.transcribe(nucleotide_seq)),\n repr(nucleotide_seq.transcribe()),\n )\n\n def test_back_transcribe_rna_into_dna(self):\n for nucleotide_seq in test_seqs:\n expected = Seq.back_transcribe(nucleotide_seq)\n self.assertEqual(\n str(nucleotide_seq).replace(\"u\", \"t\").replace(\"U\", \"T\"), expected\n )\n\n def test_back_transcribe_rna_string_into_dna(self):\n seq = \"AUGAAACUG\"\n self.assertEqual(\"ATGAAACTG\", Seq.back_transcribe(seq))\n\n def test_seq_object_back_transcription_method(self):\n for nucleotide_seq in test_seqs:\n if isinstance(nucleotide_seq, Seq.Seq):\n expected = Seq.back_transcribe(nucleotide_seq)\n self.assertEqual(repr(nucleotide_seq.back_transcribe()), repr(expected))\n\n\nclass TestTranslating(unittest.TestCase):\n def setUp(self):\n self.test_seqs = [\n Seq.Seq(\"TCAAAAGGATGCATCATG\"),\n Seq.Seq(\"ATGAAACTG\"),\n Seq.Seq(\"ATGAARCTG\"),\n Seq.Seq(\"AWGAARCKG\"), # Note no U or T\n Seq.Seq(\"\".join(ambiguous_rna_values)),\n Seq.Seq(\"\".join(ambiguous_dna_values)),\n Seq.Seq(\"AUGAAACUG\"),\n Seq.Seq(\"ATGAAACTGWN\"),\n Seq.Seq(\"AUGAAACUGWN\"),\n Seq.MutableSeq(\"ATGAAACTG\"),\n Seq.MutableSeq(\"AUGaaaCUG\"),\n ]\n\n def test_translation(self):\n for nucleotide_seq in self.test_seqs:\n nucleotide_seq = nucleotide_seq[: 3 * (len(nucleotide_seq) // 3)]\n if \"X\" not in nucleotide_seq:\n expected = Seq.translate(nucleotide_seq)\n self.assertEqual(expected, nucleotide_seq.translate())\n\n def test_gapped_seq_with_gap_char_given(self):\n seq = Seq.Seq(\"ATG---AAACTG\")\n self.assertEqual(\"M-KL\", seq.translate(gap=\"-\"))\n self.assertRaises(TranslationError, seq.translate, gap=\"~\")\n\n seq = Seq.Seq(\"GTG---GCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG\")\n self.assertEqual(\"V-AIVMGR*KGAR*\", seq.translate(gap=\"-\"))\n self.assertRaises(TranslationError, seq.translate, gap=None)\n\n seq = Seq.Seq(\"ATG~~~AAACTG\")\n self.assertRaises(TranslationError, seq.translate, gap=\"-\")\n\n seq = Seq.Seq(\"ATG---AAACTGTAG\")\n self.assertEqual(\"M-KL*\", seq.translate(gap=\"-\"))\n self.assertEqual(\"M-KL@\", seq.translate(gap=\"-\", stop_symbol=\"@\"))\n self.assertRaises(TranslationError, seq.translate, gap=\"~\")\n\n seq = Seq.Seq(\"ATG~~~AAACTGTAG\")\n self.assertRaises(TranslationError, seq.translate, gap=\"-\")\n\n def test_gapped_seq_no_gap_char_given(self):\n seq = Seq.Seq(\"ATG---AAACTG\")\n self.assertRaises(TranslationError, seq.translate, gap=None)\n\n def test_translation_wrong_type(self):\n \"\"\"Test translation table cannot be CodonTable.\"\"\"\n seq = Seq.Seq(\"ATCGTA\")\n with self.assertRaises(ValueError):\n seq.translate(table=ambiguous_dna_complement)\n\n def test_translation_of_string(self):\n seq = \"GTGGCCATTGTAATGGGCCGC\"\n self.assertEqual(\"VAIVMGR\", Seq.translate(seq))\n\n def test_translation_of_gapped_string_with_gap_char_given(self):\n seq = \"GTG---GCCATTGTAATGGGCCGC\"\n expected = \"V-AIVMGR\"\n self.assertEqual(expected, Seq.translate(seq, gap=\"-\"))\n self.assertRaises(TypeError, Seq.translate, seq, gap=[])\n self.assertRaises(ValueError, Seq.translate, seq, gap=\"-*\")\n\n def test_translation_of_gapped_string_no_gap_char_given(self):\n seq = \"GTG---GCCATTGTAATGGGCCGC\"\n self.assertRaises(TranslationError, Seq.translate, seq)\n\n def test_translation_to_stop(self):\n for nucleotide_seq in self.test_seqs:\n nucleotide_seq = nucleotide_seq[: 3 * (len(nucleotide_seq) // 3)]\n if \"X\" not in nucleotide_seq:\n short = Seq.translate(nucleotide_seq, to_stop=True)\n self.assertEqual(short, Seq.translate(nucleotide_seq).split(\"*\")[0])\n\n seq = \"GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG\"\n self.assertEqual(\"VAIVMGRWKGAR\", Seq.translate(seq, table=2, to_stop=True))\n\n def test_translation_on_proteins(self):\n \"\"\"Check translation fails on a protein.\"\"\"\n for s in protein_seqs:\n if len(s) % 3 != 0:\n with self.assertWarns(BiopythonWarning):\n with self.assertRaises(TranslationError):\n Seq.translate(s)\n\n with self.assertWarns(BiopythonWarning):\n with self.assertRaises(TranslationError):\n s.translate()\n else:\n with self.assertRaises(TranslationError):\n Seq.translate(s)\n\n with self.assertRaises(TranslationError):\n s.translate()\n\n def test_translation_of_invalid_codon(self):\n for codon in [\"TA?\", \"N-N\", \"AC_\", \"Ac_\"]:\n with self.assertRaises(TranslationError):\n Seq.translate(codon)\n\n def test_translation_of_glutamine(self):\n for codon in [\"SAR\", \"SAG\", \"SAA\"]:\n self.assertEqual(\"Z\", Seq.translate(codon))\n\n def test_translation_of_asparagine(self):\n for codon in [\"RAY\", \"RAT\", \"RAC\"]:\n self.assertEqual(\"B\", Seq.translate(codon))\n\n def test_translation_of_leucine(self):\n for codon in [\"WTA\", \"MTY\", \"MTT\", \"MTW\", \"MTM\", \"MTH\", \"MTA\", \"MTC\", \"HTA\"]:\n self.assertEqual(\"J\", Seq.translate(codon))\n\n def test_translation_with_bad_table_argument(self):\n table = {}\n with self.assertRaises(ValueError) as cm:\n Seq.translate(\"GTGGCCATTGTAATGGGCCGC\", table=table)\n self.assertEqual(str(cm.exception), \"Bad table argument\")\n table = b\"0x\"\n with self.assertRaises(TypeError) as cm:\n Seq.translate(\"GTGGCCATTGTAATGGGCCGC\", table=table)\n self.assertEqual(str(cm.exception), \"table argument must be integer or string\")\n\n def test_translation_with_codon_table_as_table_argument(self):\n table = standard_dna_table\n self.assertEqual(\"VAIVMGR\", Seq.translate(\"GTGGCCATTGTAATGGGCCGC\", table=table))\n\n def test_translation_incomplete_codon(self):\n with self.assertWarns(BiopythonWarning):\n Seq.translate(\"GTGGCCATTGTAATGGGCCG\")\n\n def test_translation_extra_stop_codon(self):\n seq = \"GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAGTAG\"\n with self.assertRaises(TranslationError):\n Seq.translate(seq, table=2, cds=True)\n\n def test_translation_using_cds(self):\n seq = \"GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG\"\n self.assertEqual(\"MAIVMGRWKGAR\", Seq.translate(seq, table=2, cds=True))\n\n seq = \"GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCG\" # not multiple of three\n with self.assertRaises(TranslationError):\n Seq.translate(seq, table=2, cds=True)\n\n seq = \"GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGA\" # no stop codon\n with self.assertRaises(TranslationError):\n Seq.translate(seq, table=2, cds=True)\n\n seq = \"GCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG\" # no start codon\n with self.assertRaises(TranslationError):\n Seq.translate(seq, table=2, cds=True)\n\n def test_translation_using_tables_with_ambiguous_stop_codons(self):\n \"\"\"Check for error and warning messages.\n\n Here, 'ambiguous stop codons' means codons of unambiguous sequence\n but with a context sensitive encoding as STOP or an amino acid.\n Thus, these codons appear within the codon table in the forward\n table as well as in the list of stop codons.\n \"\"\"\n seq = \"ATGGGCTGA\"\n with self.assertRaises(ValueError):\n Seq.translate(seq, table=28, to_stop=True)\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n Seq.translate(seq, table=28)\n message = str(w[-1].message)\n self.assertTrue(message.startswith(\"This table contains\"))\n self.assertTrue(message.endswith(\"be translated as amino acid.\"))\n\n\nclass TestStopCodons(unittest.TestCase):\n def setUp(self):\n self.misc_stops = \"TAATAGTGAAGAAGG\"\n\n def test_stops(self):\n for nucleotide_seq in [self.misc_stops, Seq.Seq(self.misc_stops)]:\n self.assertEqual(\"***RR\", Seq.translate(nucleotide_seq))\n self.assertEqual(\"***RR\", Seq.translate(nucleotide_seq, table=1))\n self.assertEqual(\"***RR\", Seq.translate(nucleotide_seq, table=\"SGC0\"))\n self.assertEqual(\"**W**\", Seq.translate(nucleotide_seq, table=2))\n self.assertEqual(\n \"**WRR\", Seq.translate(nucleotide_seq, table=\"Yeast Mitochondrial\")\n )\n self.assertEqual(\"**WSS\", Seq.translate(nucleotide_seq, table=5))\n self.assertEqual(\"**WSS\", Seq.translate(nucleotide_seq, table=9))\n self.assertEqual(\n \"**CRR\", Seq.translate(nucleotide_seq, table=\"Euplotid Nuclear\")\n )\n self.assertEqual(\"***RR\", Seq.translate(nucleotide_seq, table=11))\n self.assertEqual(\"***RR\", Seq.translate(nucleotide_seq, table=\"Bacterial\"))\n\n def test_translation_of_stops(self):\n self.assertEqual(Seq.translate(\"TAT\"), \"Y\")\n self.assertEqual(Seq.translate(\"TAR\"), \"*\")\n self.assertEqual(Seq.translate(\"TAN\"), \"X\")\n self.assertEqual(Seq.translate(\"NNN\"), \"X\")\n\n self.assertEqual(Seq.translate(\"TAt\"), \"Y\")\n self.assertEqual(Seq.translate(\"TaR\"), \"*\")\n self.assertEqual(Seq.translate(\"TaN\"), \"X\")\n self.assertEqual(Seq.translate(\"nnN\"), \"X\")\n\n self.assertEqual(Seq.translate(\"tat\"), \"Y\")\n self.assertEqual(Seq.translate(\"tar\"), \"*\")\n self.assertEqual(Seq.translate(\"tan\"), \"X\")\n self.assertEqual(Seq.translate(\"nnn\"), \"X\")\n\n\nclass TestAttributes(unittest.TestCase):\n def test_seq(self):\n s = Seq.Seq(\"ACGT\")\n with self.assertRaises(AttributeError):\n s.dog\n s.dog = \"woof\"\n self.assertIn(\"dog\", dir(s))\n self.assertEqual(s.dog, \"woof\")\n del s.dog\n with self.assertRaises(AttributeError):\n s.dog\n self.assertNotIn(\"dog\", dir(s))\n with self.assertRaises(AttributeError):\n s.cat\n s.dog = \"woof\"\n s.cat = \"meow\"\n self.assertIn(\"dog\", dir(s))\n self.assertIn(\"cat\", dir(s))\n self.assertEqual(s.dog, \"woof\")\n self.assertEqual(s.cat, \"meow\")\n del s.dog\n with self.assertRaises(AttributeError):\n s.dog\n self.assertNotIn(\"dog\", dir(s))\n self.assertIn(\"cat\", dir(s))\n self.assertEqual(s.cat, \"meow\")\n del s.cat\n with self.assertRaises(AttributeError):\n s.cat\n self.assertNotIn(\"cat\", dir(s))\n s.dog = \"woof\"\n s.dog = \"bark\"\n self.assertIn(\"dog\", dir(s))\n self.assertEqual(s.dog, \"bark\")\n del s.dog\n with self.assertRaises(AttributeError):\n s.dog\n self.assertNotIn(\"dog\", dir(s))\n\n def test_mutable_seq(self):\n s = Seq.MutableSeq(\"ACGT\")\n with self.assertRaises(AttributeError):\n s.dog\n s.dog = \"woof\"\n self.assertIn(\"dog\", dir(s))\n self.assertEqual(s.dog, \"woof\")\n del s.dog\n with self.assertRaises(AttributeError):\n s.dog\n self.assertNotIn(\"dog\", dir(s))\n with self.assertRaises(AttributeError):\n s.cat\n s.dog = \"woof\"\n s.cat = \"meow\"\n self.assertIn(\"dog\", dir(s))\n self.assertIn(\"cat\", dir(s))\n self.assertEqual(s.dog, \"woof\")\n self.assertEqual(s.cat, \"meow\")\n del s.dog\n with self.assertRaises(AttributeError):\n s.dog\n self.assertNotIn(\"dog\", dir(s))\n self.assertIn(\"cat\", dir(s))\n self.assertEqual(s.cat, \"meow\")\n del s.cat\n with self.assertRaises(AttributeError):\n s.cat\n self.assertNotIn(\"cat\", dir(s))\n s.dog = \"woof\"\n s.dog = \"bark\"\n self.assertIn(\"dog\", dir(s))\n self.assertEqual(s.dog, \"bark\")\n del s.dog\n with self.assertRaises(AttributeError):\n s.dog\n self.assertNotIn(\"dog\", dir(s))\n\n\nif __name__ == \"__main__\":\n runner = unittest.TextTestRunner(verbosity=2)\n unittest.main(testRunner=runner)\n" ]
[ [ "numpy.array", "numpy.intc" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
baregawi/haystack
[ "f70d35279b9b35002012890399b5f20d6d35df8e" ]
[ "test/test_document_store.py" ]
[ "import numpy as np\nimport pandas as pd\nimport pytest\nimport json\nimport responses\nfrom responses import matchers\nfrom unittest.mock import Mock\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.exceptions import RequestError\n\nfrom conftest import (\n deepset_cloud_fixture,\n get_document_store,\n MOCK_DC,\n DC_API_ENDPOINT,\n DC_API_KEY,\n DC_TEST_INDEX,\n SAMPLES_PATH,\n)\nfrom haystack.document_stores import WeaviateDocumentStore, DeepsetCloudDocumentStore\nfrom haystack.document_stores.base import BaseDocumentStore\nfrom haystack.errors import DuplicateDocumentError\nfrom haystack.schema import Document, Label, Answer, Span\nfrom haystack.document_stores.elasticsearch import ElasticsearchDocumentStore\nfrom haystack.document_stores.faiss import FAISSDocumentStore\nfrom haystack.nodes import EmbeddingRetriever\nfrom haystack.pipelines import DocumentSearchPipeline\n\n\nDOCUMENTS = [\n {\n \"meta\": {\"name\": \"name_1\", \"year\": \"2020\", \"month\": \"01\"},\n \"content\": \"text_1\",\n \"embedding\": np.random.rand(768).astype(np.float32),\n },\n {\n \"meta\": {\"name\": \"name_2\", \"year\": \"2020\", \"month\": \"02\"},\n \"content\": \"text_2\",\n \"embedding\": np.random.rand(768).astype(np.float32),\n },\n {\n \"meta\": {\"name\": \"name_3\", \"year\": \"2020\", \"month\": \"03\"},\n \"content\": \"text_3\",\n \"embedding\": np.random.rand(768).astype(np.float64),\n },\n {\n \"meta\": {\"name\": \"name_4\", \"year\": \"2021\", \"month\": \"01\"},\n \"content\": \"text_4\",\n \"embedding\": np.random.rand(768).astype(np.float32),\n },\n {\n \"meta\": {\"name\": \"name_5\", \"year\": \"2021\", \"month\": \"02\"},\n \"content\": \"text_5\",\n \"embedding\": np.random.rand(768).astype(np.float32),\n },\n {\n \"meta\": {\"name\": \"name_6\", \"year\": \"2021\", \"month\": \"03\"},\n \"content\": \"text_6\",\n \"embedding\": np.random.rand(768).astype(np.float64),\n },\n]\n\n\[email protected]\ndef test_init_elastic_client():\n # defaults\n _ = ElasticsearchDocumentStore()\n\n # list of hosts + single port\n _ = ElasticsearchDocumentStore(host=[\"localhost\", \"127.0.0.1\"], port=9200)\n\n # list of hosts + list of ports (wrong)\n with pytest.raises(Exception):\n _ = ElasticsearchDocumentStore(host=[\"localhost\", \"127.0.0.1\"], port=[9200])\n\n # list of hosts + list\n _ = ElasticsearchDocumentStore(host=[\"localhost\", \"127.0.0.1\"], port=[9200, 9200])\n\n # only api_key\n with pytest.raises(Exception):\n _ = ElasticsearchDocumentStore(host=[\"localhost\"], port=[9200], api_key=\"test\")\n\n # api_key + id\n _ = ElasticsearchDocumentStore(host=[\"localhost\"], port=[9200], api_key=\"test\", api_key_id=\"test\")\n\n\[email protected]\ndef test_init_elastic_doc_store_with_index_recreation():\n index_name = \"test_index_recreation\"\n label_index_name = \"test_index_recreation_labels\"\n\n document_store = ElasticsearchDocumentStore(index=index_name, label_index=label_index_name)\n documents = [Document(content=\"Doc1\")]\n labels = [\n Label(\n query=\"query\",\n document=documents[0],\n is_correct_document=True,\n is_correct_answer=False,\n origin=\"user-feedback\",\n answer=None,\n )\n ]\n document_store.write_documents(documents, index=index_name)\n document_store.write_labels(labels, index=label_index_name)\n\n document_store = ElasticsearchDocumentStore(index=index_name, label_index=label_index_name, recreate_index=True)\n docs = document_store.get_all_documents(index=index_name)\n labels = document_store.get_all_labels(index=label_index_name)\n\n assert len(docs) == 0\n assert len(labels) == 0\n\n\ndef test_write_with_duplicate_doc_ids(document_store):\n duplicate_documents = [\n Document(content=\"Doc1\", id_hash_keys=[\"content\"]),\n Document(content=\"Doc1\", id_hash_keys=[\"content\"]),\n ]\n document_store.write_documents(duplicate_documents, duplicate_documents=\"skip\")\n assert len(document_store.get_all_documents()) == 1\n with pytest.raises(Exception):\n document_store.write_documents(duplicate_documents, duplicate_documents=\"fail\")\n\n\[email protected](\"document_store\", [\"elasticsearch\", \"faiss\", \"memory\", \"milvus\", \"weaviate\"], indirect=True)\ndef test_write_with_duplicate_doc_ids_custom_index(document_store):\n duplicate_documents = [\n Document(content=\"Doc1\", id_hash_keys=[\"content\"]),\n Document(content=\"Doc1\", id_hash_keys=[\"content\"]),\n ]\n document_store.delete_documents(index=\"haystack_custom_test\")\n document_store.write_documents(duplicate_documents, index=\"haystack_custom_test\", duplicate_documents=\"skip\")\n assert len(document_store.get_all_documents(index=\"haystack_custom_test\")) == 1\n with pytest.raises(DuplicateDocumentError):\n document_store.write_documents(duplicate_documents, index=\"haystack_custom_test\", duplicate_documents=\"fail\")\n\n # Weaviate manipulates document objects in-place when writing them to an index.\n # It generates a uuid based on the provided id and the index name where the document is added to.\n # We need to get rid of these generated uuids for this test and therefore reset the document objects.\n # As a result, the documents will receive a fresh uuid based on their id_hash_keys and a different index name.\n if isinstance(document_store, WeaviateDocumentStore):\n duplicate_documents = [\n Document(content=\"Doc1\", id_hash_keys=[\"content\"]),\n Document(content=\"Doc1\", id_hash_keys=[\"content\"]),\n ]\n # writing to the default, empty index should still work\n document_store.write_documents(duplicate_documents, duplicate_documents=\"fail\")\n\n\ndef test_get_all_documents_without_filters(document_store_with_docs):\n documents = document_store_with_docs.get_all_documents()\n assert all(isinstance(d, Document) for d in documents)\n assert len(documents) == 3\n assert {d.meta[\"name\"] for d in documents} == {\"filename1\", \"filename2\", \"filename3\"}\n assert {d.meta[\"meta_field\"] for d in documents} == {\"test1\", \"test2\", \"test3\"}\n\n\ndef test_get_all_document_filter_duplicate_text_value(document_store):\n documents = [\n Document(content=\"Doc1\", meta={\"f1\": \"0\"}, id_hash_keys=[\"meta\"]),\n Document(content=\"Doc1\", meta={\"f1\": \"1\", \"meta_id\": \"0\"}, id_hash_keys=[\"meta\"]),\n Document(content=\"Doc2\", meta={\"f3\": \"0\"}, id_hash_keys=[\"meta\"]),\n ]\n document_store.write_documents(documents)\n documents = document_store.get_all_documents(filters={\"f1\": [\"1\"]})\n assert documents[0].content == \"Doc1\"\n assert len(documents) == 1\n assert {d.meta[\"meta_id\"] for d in documents} == {\"0\"}\n\n documents = document_store.get_all_documents(filters={\"f1\": [\"0\"]})\n assert documents[0].content == \"Doc1\"\n assert len(documents) == 1\n assert documents[0].meta.get(\"meta_id\") is None\n\n documents = document_store.get_all_documents(filters={\"f3\": [\"0\"]})\n assert documents[0].content == \"Doc2\"\n assert len(documents) == 1\n assert documents[0].meta.get(\"meta_id\") is None\n\n\ndef test_get_all_documents_with_correct_filters(document_store_with_docs):\n documents = document_store_with_docs.get_all_documents(filters={\"meta_field\": [\"test2\"]})\n assert len(documents) == 1\n assert documents[0].meta[\"name\"] == \"filename2\"\n\n documents = document_store_with_docs.get_all_documents(filters={\"meta_field\": [\"test1\", \"test3\"]})\n assert len(documents) == 2\n assert {d.meta[\"name\"] for d in documents} == {\"filename1\", \"filename3\"}\n assert {d.meta[\"meta_field\"] for d in documents} == {\"test1\", \"test3\"}\n\n\ndef test_get_all_documents_with_correct_filters_legacy_sqlite(test_docs_xs, tmp_path):\n document_store_with_docs = get_document_store(\"sql\", tmp_path)\n document_store_with_docs.write_documents(test_docs_xs)\n\n document_store_with_docs.use_windowed_query = False\n documents = document_store_with_docs.get_all_documents(filters={\"meta_field\": [\"test2\"]})\n assert len(documents) == 1\n assert documents[0].meta[\"name\"] == \"filename2\"\n\n documents = document_store_with_docs.get_all_documents(filters={\"meta_field\": [\"test1\", \"test3\"]})\n assert len(documents) == 2\n assert {d.meta[\"name\"] for d in documents} == {\"filename1\", \"filename3\"}\n assert {d.meta[\"meta_field\"] for d in documents} == {\"test1\", \"test3\"}\n\n\ndef test_get_all_documents_with_incorrect_filter_name(document_store_with_docs):\n documents = document_store_with_docs.get_all_documents(filters={\"incorrect_meta_field\": [\"test2\"]})\n assert len(documents) == 0\n\n\ndef test_get_all_documents_with_incorrect_filter_value(document_store_with_docs):\n documents = document_store_with_docs.get_all_documents(filters={\"meta_field\": [\"incorrect_value\"]})\n assert len(documents) == 0\n\n\ndef test_get_document_by_id(document_store_with_docs):\n documents = document_store_with_docs.get_all_documents()\n doc = document_store_with_docs.get_document_by_id(documents[0].id)\n assert doc.id == documents[0].id\n assert doc.content == documents[0].content\n\n\ndef test_get_documents_by_id(document_store):\n # generate more documents than the elasticsearch default query size limit of 10\n docs_to_generate = 15\n documents = [{\"content\": \"doc-\" + str(i)} for i in range(docs_to_generate)]\n doc_idx = \"green_fields\"\n document_store.write_documents(documents, index=doc_idx)\n\n all_docs = document_store.get_all_documents(index=doc_idx)\n all_ids = [doc.id for doc in all_docs]\n\n retrieved_by_id = document_store.get_documents_by_id(all_ids, index=doc_idx)\n retrieved_ids = [doc.id for doc in retrieved_by_id]\n\n # all documents in the index should be retrieved when passing all document ids in the index\n assert set(retrieved_ids) == set(all_ids)\n\n\ndef test_get_document_count(document_store):\n documents = [\n {\"content\": \"text1\", \"id\": \"1\", \"meta_field_for_count\": \"a\"},\n {\"content\": \"text2\", \"id\": \"2\", \"meta_field_for_count\": \"b\"},\n {\"content\": \"text3\", \"id\": \"3\", \"meta_field_for_count\": \"b\"},\n {\"content\": \"text4\", \"id\": \"4\", \"meta_field_for_count\": \"b\"},\n ]\n document_store.write_documents(documents)\n assert document_store.get_document_count() == 4\n assert document_store.get_document_count(filters={\"meta_field_for_count\": [\"a\"]}) == 1\n assert document_store.get_document_count(filters={\"meta_field_for_count\": [\"b\"]}) == 3\n\n\ndef test_get_all_documents_generator(document_store):\n documents = [\n {\"content\": \"text1\", \"id\": \"1\", \"meta_field_for_count\": \"a\"},\n {\"content\": \"text2\", \"id\": \"2\", \"meta_field_for_count\": \"b\"},\n {\"content\": \"text3\", \"id\": \"3\", \"meta_field_for_count\": \"b\"},\n {\"content\": \"text4\", \"id\": \"4\", \"meta_field_for_count\": \"b\"},\n {\"content\": \"text5\", \"id\": \"5\", \"meta_field_for_count\": \"b\"},\n ]\n\n document_store.write_documents(documents)\n assert len(list(document_store.get_all_documents_generator(batch_size=2))) == 5\n\n\[email protected](\"update_existing_documents\", [True, False])\ndef test_update_existing_documents(document_store, update_existing_documents):\n original_docs = [\n {\"content\": \"text1_orig\", \"id\": \"1\", \"meta_field_for_count\": \"a\"},\n ]\n\n updated_docs = [\n {\"content\": \"text1_new\", \"id\": \"1\", \"meta_field_for_count\": \"a\"},\n ]\n\n document_store.write_documents(original_docs)\n assert document_store.get_document_count() == 1\n\n if update_existing_documents:\n document_store.write_documents(updated_docs, duplicate_documents=\"overwrite\")\n else:\n with pytest.raises(Exception):\n document_store.write_documents(updated_docs, duplicate_documents=\"fail\")\n\n stored_docs = document_store.get_all_documents()\n assert len(stored_docs) == 1\n if update_existing_documents:\n assert stored_docs[0].content == updated_docs[0][\"content\"]\n else:\n assert stored_docs[0].content == original_docs[0][\"content\"]\n\n\ndef test_write_document_meta(document_store):\n documents = [\n {\"content\": \"dict_without_meta\", \"id\": \"1\"},\n {\"content\": \"dict_with_meta\", \"meta_field\": \"test2\", \"name\": \"filename2\", \"id\": \"2\"},\n Document(content=\"document_object_without_meta\", id=\"3\"),\n Document(content=\"document_object_with_meta\", meta={\"meta_field\": \"test4\", \"name\": \"filename3\"}, id=\"4\"),\n ]\n document_store.write_documents(documents)\n documents_in_store = document_store.get_all_documents()\n assert len(documents_in_store) == 4\n\n assert not document_store.get_document_by_id(\"1\").meta\n assert document_store.get_document_by_id(\"2\").meta[\"meta_field\"] == \"test2\"\n assert not document_store.get_document_by_id(\"3\").meta\n assert document_store.get_document_by_id(\"4\").meta[\"meta_field\"] == \"test4\"\n\n\ndef test_write_document_index(document_store):\n documents = [\n {\"content\": \"text1\", \"id\": \"1\"},\n {\"content\": \"text2\", \"id\": \"2\"},\n ]\n document_store.write_documents([documents[0]], index=\"haystack_test_one\")\n assert len(document_store.get_all_documents(index=\"haystack_test_one\")) == 1\n\n document_store.write_documents([documents[1]], index=\"haystack_test_two\")\n assert len(document_store.get_all_documents(index=\"haystack_test_two\")) == 1\n\n assert len(document_store.get_all_documents(index=\"haystack_test_one\")) == 1\n assert len(document_store.get_all_documents()) == 0\n\n\ndef test_document_with_embeddings(document_store):\n documents = [\n {\"content\": \"text1\", \"id\": \"1\", \"embedding\": np.random.rand(768).astype(np.float32)},\n {\"content\": \"text2\", \"id\": \"2\", \"embedding\": np.random.rand(768).astype(np.float64)},\n {\"content\": \"text3\", \"id\": \"3\", \"embedding\": np.random.rand(768).astype(np.float32).tolist()},\n {\"content\": \"text4\", \"id\": \"4\", \"embedding\": np.random.rand(768).astype(np.float32)},\n ]\n document_store.write_documents(documents, index=\"haystack_test_one\")\n assert len(document_store.get_all_documents(index=\"haystack_test_one\")) == 4\n\n if not isinstance(document_store, WeaviateDocumentStore):\n # weaviate is excluded because it would return dummy vectors instead of None\n documents_without_embedding = document_store.get_all_documents(\n index=\"haystack_test_one\", return_embedding=False\n )\n assert documents_without_embedding[0].embedding is None\n\n documents_with_embedding = document_store.get_all_documents(index=\"haystack_test_one\", return_embedding=True)\n assert isinstance(documents_with_embedding[0].embedding, (list, np.ndarray))\n\n\[email protected](\"retriever\", [\"embedding\"], indirect=True)\ndef test_update_embeddings(document_store, retriever):\n documents = []\n for i in range(6):\n documents.append({\"content\": f\"text_{i}\", \"id\": str(i), \"meta_field\": f\"value_{i}\"})\n documents.append({\"content\": \"text_0\", \"id\": \"6\", \"meta_field\": \"value_0\"})\n\n document_store.write_documents(documents, index=\"haystack_test_one\")\n document_store.update_embeddings(retriever, index=\"haystack_test_one\", batch_size=3)\n documents = document_store.get_all_documents(index=\"haystack_test_one\", return_embedding=True)\n assert len(documents) == 7\n for doc in documents:\n assert type(doc.embedding) is np.ndarray\n\n documents = document_store.get_all_documents(\n index=\"haystack_test_one\",\n filters={\"meta_field\": [\"value_0\"]},\n return_embedding=True,\n )\n assert len(documents) == 2\n for doc in documents:\n assert doc.meta[\"meta_field\"] == \"value_0\"\n np.testing.assert_array_almost_equal(documents[0].embedding, documents[1].embedding, decimal=4)\n\n documents = document_store.get_all_documents(\n index=\"haystack_test_one\",\n filters={\"meta_field\": [\"value_0\", \"value_5\"]},\n return_embedding=True,\n )\n documents_with_value_0 = [doc for doc in documents if doc.meta[\"meta_field\"] == \"value_0\"]\n documents_with_value_5 = [doc for doc in documents if doc.meta[\"meta_field\"] == \"value_5\"]\n np.testing.assert_raises(\n AssertionError,\n np.testing.assert_array_equal,\n documents_with_value_0[0].embedding,\n documents_with_value_5[0].embedding,\n )\n\n doc = {\n \"content\": \"text_7\",\n \"id\": \"7\",\n \"meta_field\": \"value_7\",\n \"embedding\": retriever.embed_queries(texts=[\"a random string\"])[0],\n }\n document_store.write_documents([doc], index=\"haystack_test_one\")\n\n documents = []\n for i in range(8, 11):\n documents.append({\"content\": f\"text_{i}\", \"id\": str(i), \"meta_field\": f\"value_{i}\"})\n document_store.write_documents(documents, index=\"haystack_test_one\")\n\n doc_before_update = document_store.get_all_documents(\n index=\"haystack_test_one\", filters={\"meta_field\": [\"value_7\"]}\n )[0]\n embedding_before_update = doc_before_update.embedding\n\n # test updating only documents without embeddings\n if not isinstance(document_store, WeaviateDocumentStore):\n # All the documents in Weaviate store have an embedding by default. \"update_existing_embeddings=False\" is not allowed\n document_store.update_embeddings(\n retriever, index=\"haystack_test_one\", batch_size=3, update_existing_embeddings=False\n )\n doc_after_update = document_store.get_all_documents(\n index=\"haystack_test_one\", filters={\"meta_field\": [\"value_7\"]}\n )[0]\n embedding_after_update = doc_after_update.embedding\n np.testing.assert_array_equal(embedding_before_update, embedding_after_update)\n\n # test updating with filters\n if isinstance(document_store, FAISSDocumentStore):\n with pytest.raises(Exception):\n document_store.update_embeddings(\n retriever, index=\"haystack_test_one\", update_existing_embeddings=True, filters={\"meta_field\": [\"value\"]}\n )\n else:\n document_store.update_embeddings(\n retriever, index=\"haystack_test_one\", batch_size=3, filters={\"meta_field\": [\"value_0\", \"value_1\"]}\n )\n doc_after_update = document_store.get_all_documents(\n index=\"haystack_test_one\", filters={\"meta_field\": [\"value_7\"]}\n )[0]\n embedding_after_update = doc_after_update.embedding\n np.testing.assert_array_equal(embedding_before_update, embedding_after_update)\n\n # test update all embeddings\n document_store.update_embeddings(\n retriever, index=\"haystack_test_one\", batch_size=3, update_existing_embeddings=True\n )\n assert document_store.get_embedding_count(index=\"haystack_test_one\") == 11\n doc_after_update = document_store.get_all_documents(index=\"haystack_test_one\", filters={\"meta_field\": [\"value_7\"]})[\n 0\n ]\n embedding_after_update = doc_after_update.embedding\n np.testing.assert_raises(\n AssertionError, np.testing.assert_array_equal, embedding_before_update, embedding_after_update\n )\n\n # test update embeddings for newly added docs\n documents = []\n for i in range(12, 15):\n documents.append({\"content\": f\"text_{i}\", \"id\": str(i), \"meta_field\": f\"value_{i}\"})\n document_store.write_documents(documents, index=\"haystack_test_one\")\n\n if not isinstance(document_store, WeaviateDocumentStore):\n # All the documents in Weaviate store have an embedding by default. \"update_existing_embeddings=False\" is not allowed\n document_store.update_embeddings(\n retriever, index=\"haystack_test_one\", batch_size=3, update_existing_embeddings=False\n )\n assert document_store.get_embedding_count(index=\"haystack_test_one\") == 14\n\n\[email protected](\"retriever\", [\"table_text_retriever\"], indirect=True)\[email protected]_dim(512)\ndef test_update_embeddings_table_text_retriever(document_store, retriever):\n documents = []\n for i in range(3):\n documents.append(\n {\"content\": f\"text_{i}\", \"id\": f\"pssg_{i}\", \"meta_field\": f\"value_text_{i}\", \"content_type\": \"text\"}\n )\n documents.append(\n {\n \"content\": pd.DataFrame(columns=[f\"col_{i}\", f\"col_{i+1}\"], data=[[f\"cell_{i}\", f\"cell_{i+1}\"]]),\n \"id\": f\"table_{i}\",\n f\"meta_field\": f\"value_table_{i}\",\n \"content_type\": \"table\",\n }\n )\n documents.append({\"content\": \"text_0\", \"id\": \"pssg_4\", \"meta_field\": \"value_text_0\", \"content_type\": \"text\"})\n documents.append(\n {\n \"content\": pd.DataFrame(columns=[\"col_0\", \"col_1\"], data=[[\"cell_0\", \"cell_1\"]]),\n \"id\": \"table_4\",\n \"meta_field\": \"value_table_0\",\n \"content_type\": \"table\",\n }\n )\n\n document_store.write_documents(documents, index=\"haystack_test_one\")\n document_store.update_embeddings(retriever, index=\"haystack_test_one\", batch_size=3)\n documents = document_store.get_all_documents(index=\"haystack_test_one\", return_embedding=True)\n assert len(documents) == 8\n for doc in documents:\n assert type(doc.embedding) is np.ndarray\n\n # Check if Documents with same content (text) get same embedding\n documents = document_store.get_all_documents(\n index=\"haystack_test_one\",\n filters={\"meta_field\": [\"value_text_0\"]},\n return_embedding=True,\n )\n assert len(documents) == 2\n for doc in documents:\n assert doc.meta[\"meta_field\"] == \"value_text_0\"\n np.testing.assert_array_almost_equal(documents[0].embedding, documents[1].embedding, decimal=4)\n\n # Check if Documents with same content (table) get same embedding\n documents = document_store.get_all_documents(\n index=\"haystack_test_one\",\n filters={\"meta_field\": [\"value_table_0\"]},\n return_embedding=True,\n )\n assert len(documents) == 2\n for doc in documents:\n assert doc.meta[\"meta_field\"] == \"value_table_0\"\n np.testing.assert_array_almost_equal(documents[0].embedding, documents[1].embedding, decimal=4)\n\n # Check if Documents wih different content (text) get different embedding\n documents = document_store.get_all_documents(\n index=\"haystack_test_one\",\n filters={\"meta_field\": [\"value_text_1\", \"value_text_2\"]},\n return_embedding=True,\n )\n np.testing.assert_raises(\n AssertionError, np.testing.assert_array_equal, documents[0].embedding, documents[1].embedding\n )\n\n # Check if Documents with different content (table) get different embeddings\n documents = document_store.get_all_documents(\n index=\"haystack_test_one\",\n filters={\"meta_field\": [\"value_table_1\", \"value_table_2\"]},\n return_embedding=True,\n )\n np.testing.assert_raises(\n AssertionError, np.testing.assert_array_equal, documents[0].embedding, documents[1].embedding\n )\n\n # Check if Documents with different content (table + text) get different embeddings\n documents = document_store.get_all_documents(\n index=\"haystack_test_one\",\n filters={\"meta_field\": [\"value_text_1\", \"value_table_1\"]},\n return_embedding=True,\n )\n np.testing.assert_raises(\n AssertionError, np.testing.assert_array_equal, documents[0].embedding, documents[1].embedding\n )\n\n\ndef test_delete_all_documents(document_store_with_docs):\n assert len(document_store_with_docs.get_all_documents()) == 3\n\n document_store_with_docs.delete_documents()\n documents = document_store_with_docs.get_all_documents()\n assert len(documents) == 0\n\n\ndef test_delete_documents(document_store_with_docs):\n assert len(document_store_with_docs.get_all_documents()) == 3\n\n document_store_with_docs.delete_documents()\n documents = document_store_with_docs.get_all_documents()\n assert len(documents) == 0\n\n\ndef test_delete_documents_with_filters(document_store_with_docs):\n document_store_with_docs.delete_documents(filters={\"meta_field\": [\"test1\", \"test2\"]})\n documents = document_store_with_docs.get_all_documents()\n assert len(documents) == 1\n assert documents[0].meta[\"meta_field\"] == \"test3\"\n\n\ndef test_delete_documents_by_id(document_store_with_docs):\n docs_to_delete = document_store_with_docs.get_all_documents(filters={\"meta_field\": [\"test1\", \"test2\"]})\n docs_not_to_delete = document_store_with_docs.get_all_documents(filters={\"meta_field\": [\"test3\"]})\n\n document_store_with_docs.delete_documents(ids=[doc.id for doc in docs_to_delete])\n all_docs_left = document_store_with_docs.get_all_documents()\n assert len(all_docs_left) == 1\n assert all_docs_left[0].meta[\"meta_field\"] == \"test3\"\n\n all_ids_left = [doc.id for doc in all_docs_left]\n assert all(doc.id in all_ids_left for doc in docs_not_to_delete)\n\n\ndef test_delete_documents_by_id_with_filters(document_store_with_docs):\n docs_to_delete = document_store_with_docs.get_all_documents(filters={\"meta_field\": [\"test1\", \"test2\"]})\n docs_not_to_delete = document_store_with_docs.get_all_documents(filters={\"meta_field\": [\"test3\"]})\n\n document_store_with_docs.delete_documents(ids=[doc.id for doc in docs_to_delete], filters={\"meta_field\": [\"test1\"]})\n\n all_docs_left = document_store_with_docs.get_all_documents()\n assert len(all_docs_left) == 2\n assert all(doc.meta[\"meta_field\"] != \"test1\" for doc in all_docs_left)\n\n all_ids_left = [doc.id for doc in all_docs_left]\n assert all(doc.id in all_ids_left for doc in docs_not_to_delete)\n\n\n# exclude weaviate because it does not support storing labels\[email protected](\"document_store\", [\"elasticsearch\", \"faiss\", \"memory\", \"milvus\"], indirect=True)\ndef test_labels(document_store):\n label = Label(\n query=\"question1\",\n answer=Answer(\n answer=\"answer\",\n type=\"extractive\",\n score=0.0,\n context=\"something\",\n offsets_in_document=[Span(start=12, end=14)],\n offsets_in_context=[Span(start=12, end=14)],\n ),\n is_correct_answer=True,\n is_correct_document=True,\n document=Document(content=\"something\", id=\"123\"),\n no_answer=False,\n origin=\"gold-label\",\n )\n document_store.write_labels([label], index=\"haystack_test_label\")\n labels = document_store.get_all_labels(index=\"haystack_test_label\")\n assert len(labels) == 1\n assert label == labels[0]\n\n # different index\n labels = document_store.get_all_labels()\n assert len(labels) == 0\n\n # write second label + duplicate\n label2 = Label(\n query=\"question2\",\n answer=Answer(\n answer=\"another answer\",\n type=\"extractive\",\n score=0.0,\n context=\"something\",\n offsets_in_document=[Span(start=12, end=14)],\n offsets_in_context=[Span(start=12, end=14)],\n ),\n is_correct_answer=True,\n is_correct_document=True,\n document=Document(content=\"something\", id=\"324\"),\n no_answer=False,\n origin=\"gold-label\",\n )\n document_store.write_labels([label, label2], index=\"haystack_test_label\")\n labels = document_store.get_all_labels(index=\"haystack_test_label\")\n\n # check that second label has been added but not the duplicate\n assert len(labels) == 2\n assert label in labels\n assert label2 in labels\n\n # delete filtered label2 by id\n document_store.delete_labels(index=\"haystack_test_label\", ids=[labels[1].id])\n labels = document_store.get_all_labels(index=\"haystack_test_label\")\n assert label == labels[0]\n assert len(labels) == 1\n\n # re-add label2\n document_store.write_labels([label2], index=\"haystack_test_label\")\n labels = document_store.get_all_labels(index=\"haystack_test_label\")\n assert len(labels) == 2\n\n # delete filtered label2 by query text\n document_store.delete_labels(index=\"haystack_test_label\", filters={\"query\": [labels[1].query]})\n labels = document_store.get_all_labels(index=\"haystack_test_label\")\n assert label == labels[0]\n assert len(labels) == 1\n\n # re-add label2\n document_store.write_labels([label2], index=\"haystack_test_label\")\n labels = document_store.get_all_labels(index=\"haystack_test_label\")\n assert len(labels) == 2\n\n # delete intersection of filters and ids, which is empty\n document_store.delete_labels(index=\"haystack_test_label\", ids=[labels[0].id], filters={\"query\": [labels[1].query]})\n labels = document_store.get_all_labels(index=\"haystack_test_label\")\n assert len(labels) == 2\n assert label in labels\n assert label2 in labels\n\n # delete all labels\n document_store.delete_labels(index=\"haystack_test_label\")\n labels = document_store.get_all_labels(index=\"haystack_test_label\")\n assert len(labels) == 0\n\n\n# exclude weaviate because it does not support storing labels\[email protected](\"document_store\", [\"elasticsearch\", \"faiss\", \"memory\", \"milvus\"], indirect=True)\ndef test_multilabel(document_store):\n labels = [\n Label(\n id=\"standard\",\n query=\"question\",\n answer=Answer(answer=\"answer1\", offsets_in_document=[Span(start=12, end=18)]),\n document=Document(content=\"some\", id=\"123\"),\n is_correct_answer=True,\n is_correct_document=True,\n no_answer=False,\n origin=\"gold-label\",\n ),\n # different answer in same doc\n Label(\n id=\"diff-answer-same-doc\",\n query=\"question\",\n answer=Answer(answer=\"answer2\", offsets_in_document=[Span(start=12, end=18)]),\n document=Document(content=\"some\", id=\"123\"),\n is_correct_answer=True,\n is_correct_document=True,\n no_answer=False,\n origin=\"gold-label\",\n ),\n # answer in different doc\n Label(\n id=\"diff-answer-diff-doc\",\n query=\"question\",\n answer=Answer(answer=\"answer3\", offsets_in_document=[Span(start=12, end=18)]),\n document=Document(content=\"some other\", id=\"333\"),\n is_correct_answer=True,\n is_correct_document=True,\n no_answer=False,\n origin=\"gold-label\",\n ),\n # 'no answer', should be excluded from MultiLabel\n Label(\n id=\"4-no-answer\",\n query=\"question\",\n answer=Answer(answer=\"\", offsets_in_document=[Span(start=0, end=0)]),\n document=Document(content=\"some\", id=\"777\"),\n is_correct_answer=True,\n is_correct_document=True,\n no_answer=True,\n origin=\"gold-label\",\n ),\n # is_correct_answer=False, should be excluded from MultiLabel if \"drop_negatives = True\"\n Label(\n id=\"5-negative\",\n query=\"question\",\n answer=Answer(answer=\"answer5\", offsets_in_document=[Span(start=12, end=18)]),\n document=Document(content=\"some\", id=\"123\"),\n is_correct_answer=False,\n is_correct_document=True,\n no_answer=False,\n origin=\"gold-label\",\n ),\n ]\n document_store.write_labels(labels, index=\"haystack_test_multilabel\")\n # regular labels - not aggregated\n list_labels = document_store.get_all_labels(index=\"haystack_test_multilabel\")\n assert list_labels == labels\n assert len(list_labels) == 5\n\n # Currently we don't enforce writing (missing) docs automatically when adding labels and there's no DB relationship between the two.\n # We should introduce this when we refactored the logic of \"index\" to be rather a \"collection\" of labels+documents\n # docs = document_store.get_all_documents(index=\"haystack_test_multilabel\")\n # assert len(docs) == 3\n\n # Multi labels (open domain)\n multi_labels_open = document_store.get_all_labels_aggregated(\n index=\"haystack_test_multilabel\", open_domain=True, drop_negative_labels=True\n )\n\n # for open-domain we group all together as long as they have the same question\n assert len(multi_labels_open) == 1\n # all labels are in there except the negative one and the no_answer\n assert len(multi_labels_open[0].labels) == 4\n assert len(multi_labels_open[0].answers) == 3\n assert \"5-negative\" not in [l.id for l in multi_labels_open[0].labels]\n assert len(multi_labels_open[0].document_ids) == 3\n\n # Don't drop the negative label\n multi_labels_open = document_store.get_all_labels_aggregated(\n index=\"haystack_test_multilabel\", open_domain=True, drop_no_answers=False, drop_negative_labels=False\n )\n assert len(multi_labels_open[0].labels) == 5\n assert len(multi_labels_open[0].answers) == 4\n assert len(multi_labels_open[0].document_ids) == 4\n\n # Drop no answer + negative\n multi_labels_open = document_store.get_all_labels_aggregated(\n index=\"haystack_test_multilabel\", open_domain=True, drop_no_answers=True, drop_negative_labels=True\n )\n assert len(multi_labels_open[0].labels) == 3\n assert len(multi_labels_open[0].answers) == 3\n assert len(multi_labels_open[0].document_ids) == 3\n\n # for closed domain we group by document so we expect 3 multilabels with 2,1,1 labels each (negative dropped again)\n multi_labels = document_store.get_all_labels_aggregated(\n index=\"haystack_test_multilabel\", open_domain=False, drop_negative_labels=True\n )\n assert len(multi_labels) == 3\n label_counts = set([len(ml.labels) for ml in multi_labels])\n assert label_counts == set([2, 1, 1])\n\n assert len(multi_labels[0].answers) == len(multi_labels[0].document_ids)\n\n # make sure there' nothing stored in another index\n multi_labels = document_store.get_all_labels_aggregated()\n assert len(multi_labels) == 0\n docs = document_store.get_all_documents()\n assert len(docs) == 0\n\n\n# exclude weaviate because it does not support storing labels\[email protected](\"document_store\", [\"elasticsearch\", \"faiss\", \"memory\", \"milvus\"], indirect=True)\ndef test_multilabel_no_answer(document_store):\n labels = [\n Label(\n query=\"question\",\n answer=Answer(answer=\"\"),\n is_correct_answer=True,\n is_correct_document=True,\n document=Document(content=\"some\", id=\"777\"),\n no_answer=True,\n origin=\"gold-label\",\n ),\n # no answer in different doc\n Label(\n query=\"question\",\n answer=Answer(answer=\"\"),\n is_correct_answer=True,\n is_correct_document=True,\n document=Document(content=\"some\", id=\"123\"),\n no_answer=True,\n origin=\"gold-label\",\n ),\n # no answer in same doc, should be excluded\n Label(\n query=\"question\",\n answer=Answer(answer=\"\"),\n is_correct_answer=True,\n is_correct_document=True,\n document=Document(content=\"some\", id=\"777\"),\n no_answer=True,\n origin=\"gold-label\",\n ),\n # no answer with is_correct_answer=False, should be excluded\n Label(\n query=\"question\",\n answer=Answer(answer=\"\"),\n is_correct_answer=False,\n is_correct_document=True,\n document=Document(content=\"some\", id=\"777\"),\n no_answer=True,\n origin=\"gold-label\",\n ),\n ]\n\n document_store.write_labels(labels, index=\"haystack_test_multilabel_no_answer\")\n\n labels = document_store.get_all_labels(index=\"haystack_test_multilabel_no_answer\")\n assert len(labels) == 4\n\n multi_labels = document_store.get_all_labels_aggregated(\n index=\"haystack_test_multilabel_no_answer\", open_domain=True, drop_no_answers=False, drop_negative_labels=True\n )\n assert len(multi_labels) == 1\n assert multi_labels[0].no_answer == True\n assert len(multi_labels[0].document_ids) == 0\n assert len(multi_labels[0].answers) == 1\n\n multi_labels = document_store.get_all_labels_aggregated(\n index=\"haystack_test_multilabel_no_answer\", open_domain=True, drop_no_answers=False, drop_negative_labels=False\n )\n assert len(multi_labels) == 1\n assert multi_labels[0].no_answer == True\n assert len(multi_labels[0].document_ids) == 0\n assert len(multi_labels[0].labels) == 3\n assert len(multi_labels[0].answers) == 1\n\n\[email protected](\"document_store\", [\"elasticsearch\", \"faiss\", \"milvus\", \"weaviate\"], indirect=True)\n# Currently update_document_meta() is not implemented for Memory doc store\ndef test_update_meta(document_store):\n documents = [\n Document(content=\"Doc1\", meta={\"meta_key_1\": \"1\", \"meta_key_2\": \"1\"}),\n Document(content=\"Doc2\", meta={\"meta_key_1\": \"2\", \"meta_key_2\": \"2\"}),\n Document(content=\"Doc3\", meta={\"meta_key_1\": \"3\", \"meta_key_2\": \"3\"}),\n ]\n document_store.write_documents(documents)\n document_2 = document_store.get_all_documents(filters={\"meta_key_2\": [\"2\"]})[0]\n document_store.update_document_meta(document_2.id, meta={\"meta_key_1\": \"99\", \"meta_key_2\": \"2\"})\n updated_document = document_store.get_document_by_id(document_2.id)\n assert len(updated_document.meta.keys()) == 2\n assert updated_document.meta[\"meta_key_1\"] == \"99\"\n assert updated_document.meta[\"meta_key_2\"] == \"2\"\n\n\[email protected](\"document_store_type\", [\"elasticsearch\", \"memory\"])\ndef test_custom_embedding_field(document_store_type, tmp_path):\n document_store = get_document_store(\n document_store_type=document_store_type, tmp_path=tmp_path, embedding_field=\"custom_embedding_field\"\n )\n doc_to_write = {\"content\": \"test\", \"custom_embedding_field\": np.random.rand(768).astype(np.float32)}\n document_store.write_documents([doc_to_write])\n documents = document_store.get_all_documents(return_embedding=True)\n assert len(documents) == 1\n assert documents[0].content == \"test\"\n np.testing.assert_array_equal(doc_to_write[\"custom_embedding_field\"], documents[0].embedding)\n\n\[email protected](\"document_store\", [\"elasticsearch\"], indirect=True)\ndef test_get_meta_values_by_key(document_store):\n documents = [\n Document(content=\"Doc1\", meta={\"meta_key_1\": \"1\", \"meta_key_2\": \"11\"}),\n Document(content=\"Doc2\", meta={\"meta_key_1\": \"2\", \"meta_key_2\": \"22\"}),\n Document(content=\"Doc3\", meta={\"meta_key_1\": \"3\", \"meta_key_2\": \"33\"}),\n ]\n document_store.write_documents(documents)\n\n # test without filters or query\n result = document_store.get_metadata_values_by_key(key=\"meta_key_1\")\n for bucket in result:\n assert bucket[\"value\"] in [\"1\", \"2\", \"3\"]\n assert bucket[\"count\"] == 1\n\n # test with filters but no query\n result = document_store.get_metadata_values_by_key(key=\"meta_key_1\", filters={\"meta_key_2\": [\"11\", \"22\"]})\n for bucket in result:\n assert bucket[\"value\"] in [\"1\", \"2\"]\n assert bucket[\"count\"] == 1\n\n # test with filters & query\n result = document_store.get_metadata_values_by_key(key=\"meta_key_1\", query=\"Doc1\")\n for bucket in result:\n assert bucket[\"value\"] in [\"1\"]\n assert bucket[\"count\"] == 1\n\n\[email protected]\ndef test_elasticsearch_custom_fields():\n client = Elasticsearch()\n client.indices.delete(index=\"haystack_test_custom\", ignore=[404])\n document_store = ElasticsearchDocumentStore(\n index=\"haystack_test_custom\", content_field=\"custom_text_field\", embedding_field=\"custom_embedding_field\"\n )\n\n doc_to_write = {\"custom_text_field\": \"test\", \"custom_embedding_field\": np.random.rand(768).astype(np.float32)}\n document_store.write_documents([doc_to_write])\n documents = document_store.get_all_documents(return_embedding=True)\n assert len(documents) == 1\n assert documents[0].content == \"test\"\n np.testing.assert_array_equal(doc_to_write[\"custom_embedding_field\"], documents[0].embedding)\n\n\[email protected]\ndef test_elasticsearch_delete_index():\n client = Elasticsearch()\n index_name = \"haystack_test_deletion\"\n\n document_store = ElasticsearchDocumentStore(index=index_name)\n\n # the index should exist\n index_exists = client.indices.exists(index=index_name)\n assert index_exists\n\n document_store.delete_index(index_name)\n\n # the index was deleted and should not exist\n index_exists = client.indices.exists(index=index_name)\n assert not index_exists\n\n\[email protected]\ndef test_get_document_count_only_documents_without_embedding_arg():\n documents = [\n {\n \"content\": \"text1\",\n \"id\": \"1\",\n \"embedding\": np.random.rand(768).astype(np.float32),\n \"meta_field_for_count\": \"a\",\n },\n {\n \"content\": \"text2\",\n \"id\": \"2\",\n \"embedding\": np.random.rand(768).astype(np.float64),\n \"meta_field_for_count\": \"b\",\n },\n {\"content\": \"text3\", \"id\": \"3\", \"embedding\": np.random.rand(768).astype(np.float32).tolist()},\n {\"content\": \"text4\", \"id\": \"4\", \"meta_field_for_count\": \"b\"},\n {\"content\": \"text5\", \"id\": \"5\", \"meta_field_for_count\": \"b\"},\n {\"content\": \"text6\", \"id\": \"6\", \"meta_field_for_count\": \"c\"},\n {\n \"content\": \"text7\",\n \"id\": \"7\",\n \"embedding\": np.random.rand(768).astype(np.float64),\n \"meta_field_for_count\": \"c\",\n },\n ]\n\n _index: str = \"haystack_test_count\"\n document_store = ElasticsearchDocumentStore(index=_index)\n document_store.delete_documents(index=_index)\n\n document_store.write_documents(documents)\n\n assert document_store.get_document_count() == 7\n assert document_store.get_document_count(only_documents_without_embedding=True) == 3\n assert (\n document_store.get_document_count(\n only_documents_without_embedding=True, filters={\"meta_field_for_count\": [\"c\"]}\n )\n == 1\n )\n assert (\n document_store.get_document_count(\n only_documents_without_embedding=True, filters={\"meta_field_for_count\": [\"b\"]}\n )\n == 2\n )\n\n\[email protected]\ndef test_skip_missing_embeddings():\n documents = [\n {\"content\": \"text1\", \"id\": \"1\"}, # a document without embeddings\n {\"content\": \"text2\", \"id\": \"2\", \"embedding\": np.random.rand(768).astype(np.float64)},\n {\"content\": \"text3\", \"id\": \"3\", \"embedding\": np.random.rand(768).astype(np.float32).tolist()},\n {\"content\": \"text4\", \"id\": \"4\", \"embedding\": np.random.rand(768).astype(np.float32)},\n ]\n document_store = ElasticsearchDocumentStore(index=\"skip_missing_embedding_index\")\n document_store.write_documents(documents)\n\n document_store.skip_missing_embeddings = True\n retrieved_docs = document_store.query_by_embedding(np.random.rand(768).astype(np.float32))\n assert len(retrieved_docs) == 3\n\n document_store.skip_missing_embeddings = False\n with pytest.raises(RequestError):\n document_store.query_by_embedding(np.random.rand(768).astype(np.float32))\n\n # Test scenario with no embeddings for the entire index\n documents = [\n {\"content\": \"text1\", \"id\": \"1\"},\n {\"content\": \"text2\", \"id\": \"2\"},\n {\"content\": \"text3\", \"id\": \"3\"},\n {\"content\": \"text4\", \"id\": \"4\"},\n ]\n\n document_store.delete_documents()\n document_store.write_documents(documents)\n\n document_store.skip_missing_embeddings = True\n with pytest.raises(RequestError):\n document_store.query_by_embedding(np.random.rand(768).astype(np.float32))\n\n\[email protected]\ndef test_elasticsearch_synonyms():\n synonyms = [\"i-pod, i pod, ipod\", \"sea biscuit, sea biscit, seabiscuit\", \"foo, foo bar, baz\"]\n synonym_type = \"synonym_graph\"\n\n client = Elasticsearch()\n client.indices.delete(index=\"haystack_synonym_arg\", ignore=[404])\n document_store = ElasticsearchDocumentStore(\n index=\"haystack_synonym_arg\", synonyms=synonyms, synonym_type=synonym_type\n )\n indexed_settings = client.indices.get_settings(index=\"haystack_synonym_arg\")\n\n assert (\n synonym_type\n == indexed_settings[\"haystack_synonym_arg\"][\"settings\"][\"index\"][\"analysis\"][\"filter\"][\"synonym\"][\"type\"]\n )\n assert (\n synonyms\n == indexed_settings[\"haystack_synonym_arg\"][\"settings\"][\"index\"][\"analysis\"][\"filter\"][\"synonym\"][\"synonyms\"]\n )\n\n\[email protected](\n \"document_store_with_docs\", [\"memory\", \"faiss\", \"milvus\", \"weaviate\", \"elasticsearch\"], indirect=True\n)\[email protected]_dim(384)\ndef test_similarity_score(document_store_with_docs):\n retriever = EmbeddingRetriever(\n document_store=document_store_with_docs, embedding_model=\"sentence-transformers/paraphrase-MiniLM-L3-v2\"\n )\n document_store_with_docs.update_embeddings(retriever)\n pipeline = DocumentSearchPipeline(retriever)\n prediction = pipeline.run(\"Paul lives in New York\")\n scores = [document.score for document in prediction[\"documents\"]]\n assert scores == pytest.approx([0.9102500000000191, 0.6491700000000264, 0.6321699999999737], abs=1e-3)\n\n\[email protected](\n \"document_store_dot_product_with_docs\", [\"memory\", \"faiss\", \"milvus\", \"elasticsearch\"], indirect=True\n)\[email protected]_dim(384)\ndef test_similarity_score_dot_product(document_store_dot_product_with_docs):\n retriever = EmbeddingRetriever(\n document_store=document_store_dot_product_with_docs,\n embedding_model=\"sentence-transformers/paraphrase-MiniLM-L3-v2\",\n )\n document_store_dot_product_with_docs.update_embeddings(retriever)\n pipeline = DocumentSearchPipeline(retriever)\n prediction = pipeline.run(\"Paul lives in New York\")\n scores = [document.score for document in prediction[\"documents\"]]\n assert scores == pytest.approx([0.5526493562767626, 0.5189836204008691, 0.5179697571274173], abs=1e-3)\n\n\ndef test_custom_headers(document_store_with_docs: BaseDocumentStore):\n mock_client = None\n if isinstance(document_store_with_docs, ElasticsearchDocumentStore):\n es_document_store: ElasticsearchDocumentStore = document_store_with_docs\n mock_client = Mock(wraps=es_document_store.client)\n es_document_store.client = mock_client\n custom_headers = {\"X-My-Custom-Header\": \"header-value\"}\n if not mock_client:\n with pytest.raises(NotImplementedError):\n documents = document_store_with_docs.get_all_documents(headers=custom_headers)\n else:\n documents = document_store_with_docs.get_all_documents(headers=custom_headers)\n mock_client.search.assert_called_once()\n args, kwargs = mock_client.search.call_args\n assert \"headers\" in kwargs\n assert kwargs[\"headers\"] == custom_headers\n assert len(documents) > 0\n\n\[email protected](deepset_cloud_fixture.__name__)\[email protected]\ndef test_DeepsetCloudDocumentStore_init_with_dot_product():\n document_store = DeepsetCloudDocumentStore(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY, index=DC_TEST_INDEX)\n assert document_store.return_embedding == False\n assert document_store.similarity == \"dot_product\"\n\n\[email protected](deepset_cloud_fixture.__name__)\[email protected]\ndef test_DeepsetCloudDocumentStore_init_with_cosine():\n document_store = DeepsetCloudDocumentStore(\n api_endpoint=DC_API_ENDPOINT,\n api_key=DC_API_KEY,\n index=DC_TEST_INDEX,\n similarity=\"cosine\",\n return_embedding=True,\n )\n assert document_store.return_embedding == True\n assert document_store.similarity == \"cosine\"\n\n\[email protected](deepset_cloud_fixture.__name__)\[email protected]\ndef test_DeepsetCloudDocumentStore_invalid_token():\n if MOCK_DC:\n responses.add(\n method=responses.GET,\n url=f\"{DC_API_ENDPOINT}/workspaces/default/indexes/{DC_TEST_INDEX}\",\n match=[matchers.header_matcher({\"authorization\": \"Bearer invalid_token\"})],\n body=\"Internal Server Error\",\n status=500,\n )\n\n with pytest.raises(\n Exception,\n match=f\"Could not connect to Deepset Cloud:\\nGET {DC_API_ENDPOINT}/workspaces/default/indexes/{DC_TEST_INDEX} failed: HTTP 500 - Internal Server Error\",\n ):\n DeepsetCloudDocumentStore(api_endpoint=DC_API_ENDPOINT, api_key=\"invalid_token\", index=DC_TEST_INDEX)\n\n\[email protected](deepset_cloud_fixture.__name__)\[email protected]\ndef test_DeepsetCloudDocumentStore_invalid_api_endpoint():\n if MOCK_DC:\n responses.add(\n method=responses.GET,\n url=f\"{DC_API_ENDPOINT}00/workspaces/default/indexes/{DC_TEST_INDEX}\",\n body=\"Not Found\",\n status=404,\n )\n\n with pytest.raises(\n Exception,\n match=f\"Could not connect to Deepset Cloud:\\nGET {DC_API_ENDPOINT}00/workspaces/default/indexes/{DC_TEST_INDEX} failed: HTTP 404 - Not Found\",\n ):\n DeepsetCloudDocumentStore(api_endpoint=f\"{DC_API_ENDPOINT}00\", api_key=DC_API_KEY, index=DC_TEST_INDEX)\n\n\[email protected](deepset_cloud_fixture.__name__)\[email protected]\ndef test_DeepsetCloudDocumentStore_invalid_index():\n if MOCK_DC:\n responses.add(\n method=responses.GET,\n url=f\"{DC_API_ENDPOINT}/workspaces/default/indexes/invalid_index\",\n body=\"Not Found\",\n status=404,\n )\n\n with pytest.raises(\n Exception,\n match=f\"Could not connect to Deepset Cloud:\\nGET {DC_API_ENDPOINT}/workspaces/default/indexes/invalid_index failed: HTTP 404 - Not Found\",\n ):\n DeepsetCloudDocumentStore(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY, index=\"invalid_index\")\n\n\[email protected]\ndef test_DeepsetCloudDocumentStore_documents(deepset_cloud_document_store):\n if MOCK_DC:\n with open(SAMPLES_PATH / \"dc\" / \"documents-stream.response\", \"r\") as f:\n documents_stream_response = f.read()\n docs = [json.loads(l) for l in documents_stream_response.splitlines()]\n filtered_docs = [doc for doc in docs if doc[\"meta\"][\"file_id\"] == docs[0][\"meta\"][\"file_id\"]]\n documents_stream_filtered_response = \"\\n\".join([json.dumps(d) for d in filtered_docs])\n\n responses.add(\n method=responses.POST,\n url=f\"{DC_API_ENDPOINT}/workspaces/default/indexes/{DC_TEST_INDEX}/documents-stream\",\n body=documents_stream_response,\n status=200,\n )\n\n responses.add(\n method=responses.POST,\n url=f\"{DC_API_ENDPOINT}/workspaces/default/indexes/{DC_TEST_INDEX}/documents-stream\",\n match=[\n matchers.json_params_matcher(\n {\"filters\": {\"file_id\": [docs[0][\"meta\"][\"file_id\"]]}, \"return_embedding\": False}\n )\n ],\n body=documents_stream_filtered_response,\n status=200,\n )\n\n for doc in filtered_docs:\n responses.add(\n method=responses.GET,\n url=f\"{DC_API_ENDPOINT}/workspaces/default/indexes/{DC_TEST_INDEX}/documents/{doc['id']}\",\n json=doc,\n status=200,\n )\n else:\n responses.add_passthru(DC_API_ENDPOINT)\n\n docs = deepset_cloud_document_store.get_all_documents()\n assert len(docs) > 1\n assert isinstance(docs[0], Document)\n\n first_doc = next(deepset_cloud_document_store.get_all_documents_generator())\n assert isinstance(first_doc, Document)\n assert first_doc.meta[\"file_id\"] is not None\n\n filtered_docs = deepset_cloud_document_store.get_all_documents(filters={\"file_id\": [first_doc.meta[\"file_id\"]]})\n assert len(filtered_docs) > 0\n assert len(filtered_docs) < len(docs)\n\n ids = [doc.id for doc in filtered_docs]\n single_doc_by_id = deepset_cloud_document_store.get_document_by_id(ids[0])\n assert single_doc_by_id is not None\n assert single_doc_by_id.meta[\"file_id\"] == first_doc.meta[\"file_id\"]\n\n docs_by_id = deepset_cloud_document_store.get_documents_by_id(ids)\n assert len(docs_by_id) == len(filtered_docs)\n for doc in docs_by_id:\n assert doc.meta[\"file_id\"] == first_doc.meta[\"file_id\"]\n\n\[email protected]\ndef test_DeepsetCloudDocumentStore_query(deepset_cloud_document_store):\n if MOCK_DC:\n with open(SAMPLES_PATH / \"dc\" / \"query_winterfell.response\", \"r\") as f:\n query_winterfell_response = f.read()\n query_winterfell_docs = json.loads(query_winterfell_response)\n query_winterfell_filtered_docs = [\n doc\n for doc in query_winterfell_docs\n if doc[\"meta\"][\"file_id\"] == query_winterfell_docs[0][\"meta\"][\"file_id\"]\n ]\n query_winterfell_filtered_response = json.dumps(query_winterfell_filtered_docs)\n\n responses.add(\n method=responses.POST,\n url=f\"{DC_API_ENDPOINT}/workspaces/default/indexes/{DC_TEST_INDEX}/documents-query\",\n match=[matchers.json_params_matcher({\"query\": \"winterfell\", \"top_k\": 50})],\n status=200,\n body=query_winterfell_response,\n )\n\n responses.add(\n method=responses.POST,\n url=f\"{DC_API_ENDPOINT}/workspaces/default/indexes/{DC_TEST_INDEX}/documents-query\",\n match=[\n matchers.json_params_matcher(\n {\n \"query\": \"winterfell\",\n \"top_k\": 50,\n \"filters\": {\"file_id\": [query_winterfell_docs[0][\"meta\"][\"file_id\"]]},\n }\n )\n ],\n status=200,\n body=query_winterfell_filtered_response,\n )\n else:\n responses.add_passthru(DC_API_ENDPOINT)\n\n docs = deepset_cloud_document_store.query(\"winterfell\", top_k=50)\n assert docs is not None\n assert len(docs) > 0\n\n first_doc = docs[0]\n filtered_docs = deepset_cloud_document_store.query(\n \"winterfell\", top_k=50, filters={\"file_id\": [first_doc.meta[\"file_id\"]]}\n )\n assert len(filtered_docs) > 0\n assert len(filtered_docs) < len(docs)\n\n\[email protected]\ndef test_DeepsetCloudDocumentStore_query_by_embedding(deepset_cloud_document_store):\n query_emb = np.random.randn(768)\n if MOCK_DC:\n responses.add(\n method=responses.POST,\n url=f\"{DC_API_ENDPOINT}/workspaces/default/indexes/{DC_TEST_INDEX}/documents-query\",\n match=[\n matchers.json_params_matcher(\n {\n \"query_emb\": query_emb.tolist(),\n \"top_k\": 10,\n \"return_embedding\": False,\n \"similarity\": \"dot_product\",\n }\n )\n ],\n json=[],\n status=200,\n )\n else:\n responses.add_passthru(DC_API_ENDPOINT)\n\n emb_docs = deepset_cloud_document_store.query_by_embedding(query_emb)\n assert len(emb_docs) == 0\n\n\[email protected]\ndef test_elasticsearch_search_field_mapping():\n\n client = Elasticsearch()\n client.indices.delete(index=\"haystack_search_field_mapping\", ignore=[404])\n\n index_data = [\n {\n \"title\": \"Green tea components\",\n \"meta\": {\n \"content\": \"The green tea plant contains a range of healthy compounds that make it into the final drink\",\n \"sub_content\": \"Drink tip\",\n },\n \"id\": \"1\",\n },\n {\n \"title\": \"Green tea catechin\",\n \"meta\": {\n \"content\": \"Green tea contains a catechin called epigallocatechin-3-gallate (EGCG).\",\n \"sub_content\": \"Ingredients tip\",\n },\n \"id\": \"2\",\n },\n {\n \"title\": \"Minerals in Green tea\",\n \"meta\": {\n \"content\": \"Green tea also has small amounts of minerals that can benefit your health.\",\n \"sub_content\": \"Minerals tip\",\n },\n \"id\": \"3\",\n },\n {\n \"title\": \"Green tea Benefits\",\n \"meta\": {\n \"content\": \"Green tea does more than just keep you alert, it may also help boost brain function.\",\n \"sub_content\": \"Health tip\",\n },\n \"id\": \"4\",\n },\n ]\n\n document_store = ElasticsearchDocumentStore(\n index=\"haystack_search_field_mapping\", search_fields=[\"content\", \"sub_content\"], content_field=\"title\"\n )\n document_store.write_documents(index_data)\n\n indexed_settings = client.indices.get_mapping(index=\"haystack_search_field_mapping\")\n\n assert indexed_settings[\"haystack_search_field_mapping\"][\"mappings\"][\"properties\"][\"content\"][\"type\"] == \"text\"\n assert indexed_settings[\"haystack_search_field_mapping\"][\"mappings\"][\"properties\"][\"sub_content\"][\"type\"] == \"text\"\n" ]
[ [ "pandas.DataFrame", "numpy.testing.assert_array_equal", "numpy.random.randn", "numpy.testing.assert_raises", "numpy.random.rand", "numpy.testing.assert_array_almost_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
msheller/topologies
[ "c6457a677410fb028167f05b4e313dbffcab3d24" ]
[ "3D_UNet/model.py" ]
[ "#!/usr/bin/python\n\n# ----------------------------------------------------------------------------\n# Copyright 2018 Intel\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ----------------------------------------------------------------------------\n\nimport os.path\nimport numpy as np\nimport tensorflow as tf\nimport keras # Something is wrong with 3D concat layer of tf.keras (but Keras API works)\n\ndef dice_coef(target, prediction, axis=(1, 2, 3), smooth=1e-5):\n\t'''\n\tSorenson Dice\n\t'''\n\tintersection = tf.reduce_sum(prediction * target, axis=axis)\n\tp = tf.reduce_sum(prediction, axis=axis)\n\tt = tf.reduce_sum(target, axis=axis)\n\tdice = (2. * intersection + smooth) / (t + p + smooth)\n\n\treturn tf.reduce_mean(dice)\n\ndef dice_coef_loss(target, prediction, axis=(1,2,3), smooth=1e-5):\n\t'''\n\tSorenson Dice loss\n\tUsing -log(Dice) as the loss since it is better behaved.\n\tAlso, the log allows avoidance of the division which\n\tcan help prevent underflow when the numbers are very small.\n\t'''\n\tintersection = tf.reduce_sum(prediction * target, axis=axis)\n\tp = tf.reduce_sum(prediction, axis=axis)\n\tt = tf.reduce_sum(target, axis=axis)\n\tnumerator = tf.reduce_mean(2. * intersection + smooth)\n\tdenominator = tf.reduce_mean(t + p + smooth)\n\tdice_loss = -tf.log(numerator) + tf.log(denominator)\n\n\treturn dice_loss\n\nCHANNEL_LAST = True\nif CHANNEL_LAST:\n\tconcat_axis = -1\n\tdata_format = \"channels_last\"\n\nelse:\n\tconcat_axis = 1\n\tdata_format = \"channels_first\"\n\ndef is_power_of_2(num):\n\t'''\n\tCheck if number is a power of 2\n\tIf not, then the U-Net model might not compile\n\tbecause the feature maps round down on odd lengths.\n\tFor example, if the image is 10x10x10. The first MaxPooling3D\n\twill reduce it to 5x5x5. The second MaxPooling3D will make it 2x2x2.\n\tHowever, the UpSampling3D will take the 2x2x2 and make it 4x4x4.\n\tThat means you try to concatenate a 5x5x5 on the encoder with a\n\t4x4x4 on the decoder (which gives an error).\n\t'''\n\treturn ((num & (num - 1)) == 0) and num > 0\n\ndef define_model(input_img, use_upsampling=False, learning_rate=0.001, n_cl_out=1, dropout=0.2, print_summary = False):\n\n\t# [b,h,w,d,c] = input_img.shape\n\t# if not is_power_of_2(h) or \\\n\t# \tnot is_power_of_2(w) or \\\n\t# \tnot is_power_of_2(d):\n\t# \tprint(\"ERROR: Image dimension lengths must be a power of 2. e.g. 16x256x32\")\n\n\t# inputs = keras.layers.Input(shape=(h,w,d,c), name=\"Input_Image\")\n\n\t# Set keras learning phase to train\n\tkeras.backend.set_learning_phase(True)\n\n\t# Don't initialize variables on the fly\n\tkeras.backend.manual_variable_initialization(False)\n\n\tinputs = keras.layers.Input(tensor=input_img, name=\"Input_Image\")\n\n\tparams = dict(kernel_size=(3, 3, 3), activation=\"relu\",\n\t\t\t\t padding=\"same\", data_format=data_format,\n\t\t\t\t kernel_initializer=\"he_uniform\")\n\n\tconv1 = keras.layers.Conv3D(name=\"conv1a\", filters=32, **params)(inputs)\n\tconv1 = keras.layers.Conv3D(name=\"conv1b\", filters=64, **params)(conv1)\n\tpool1 = keras.layers.MaxPooling3D(name=\"pool1\", pool_size=(2, 2, 2))(conv1)\n\n\tconv2 = keras.layers.Conv3D(name=\"conv2a\", filters=64, **params)(pool1)\n\tconv2 = keras.layers.Conv3D(name=\"conv2b\", filters=128, **params)(conv2)\n\tpool2 = keras.layers.MaxPooling3D(name=\"pool2\", pool_size=(2, 2, 2))(conv2)\n\n\tconv3 = keras.layers.Conv3D(name=\"conv3a\", filters=128, **params)(pool2)\n\tconv3 = keras.layers.Dropout(dropout)(conv3) ### Trying dropout layers earlier on, as indicated in the paper\n\tconv3 = keras.layers.Conv3D(name=\"conv3b\", filters=256, **params)(conv3)\n\tpool3 = keras.layers.MaxPooling3D(name=\"pool3\", pool_size=(2, 2, 2))(conv3)\n\n\tconv4 = keras.layers.Conv3D(name=\"conv4a\", filters=256, **params)(pool3)\n\tconv4 = keras.layers.Dropout(dropout)(conv4) ### Trying dropout layers earlier on, as indicated in the paper\n\tconv4 = keras.layers.Conv3D(name=\"conv4b\", filters=512, **params)(conv4)\n\n\tif use_upsampling:\n\t\tup4 = keras.layers.concatenate([keras.layers.UpSampling3D(name=\"up4\", size=(2, 2, 2))(conv4), conv3], axis=concat_axis)\n\telse:\n\t\tup4 = keras.layers.concatenate([keras.layers.Conv3DTranspose(name=\"transConv4\", filters=512, data_format=data_format,\n\t\t\t\t\t\t kernel_size=(2, 2, 2), strides=(2, 2, 2), padding=\"same\")(conv4), conv3], axis=concat_axis)\n\n\n\tconv5 = keras.layers.Conv3D(name=\"conv5a\", filters=256, **params)(up4)\n\tconv5 = keras.layers.Conv3D(name=\"conv5b\", filters=256, **params)(conv5)\n\n\tif use_upsampling:\n\t\tup5 = keras.layers.concatenate([keras.layers.UpSampling3D(name=\"up5\", size=(2, 2, 2))(conv5), conv2], axis=concat_axis)\n\telse:\n\t\tup5 = keras.layers.concatenate([keras.layers.Conv3DTranspose(name=\"transConv5\", filters=256, data_format=data_format,\n\t\t\t\t\t\t kernel_size=(2, 2, 2), strides=(2, 2, 2), padding=\"same\")(conv5), conv2], axis=concat_axis)\n\n\tconv6 = keras.layers.Conv3D(name=\"conv6a\", filters=128, **params)(up5)\n\tconv6 = keras.layers.Conv3D(name=\"conv6b\", filters=128, **params)(conv6)\n\n\tif use_upsampling:\n\t\tup6 = keras.layers.concatenate([keras.layers.UpSampling3D(name=\"up6\", size=(2, 2, 2))(conv6), conv1], axis=concat_axis)\n\telse:\n\t\tup6 = keras.layers.concatenate([keras.layers.Conv3DTranspose(name=\"transConv6\", filters=128, data_format=data_format,\n\t\t\t\t\t\t kernel_size=(2, 2, 2), strides=(2, 2, 2), padding=\"same\")(conv6), conv1], axis=concat_axis)\n\n\tconv7 = keras.layers.Conv3D(name=\"conv7a\", filters=128, **params)(up6)\n\tconv7 = keras.layers.Conv3D(name=\"conv7b\", filters=128, **params)(conv7)\n\tpred = keras.layers.Conv3D(name=\"Prediction_Mask\", filters=n_cl_out, kernel_size=(1, 1, 1),\n\t\t\t\t\tdata_format=data_format, activation=\"sigmoid\")(conv7)\n\n\tmodel = keras.models.Model(inputs=[inputs], outputs=[pred])\n\n\tif print_summary:\n\t\tmodel.summary()\n\n\t# optimizer = tf.train.AdamOptimizer(learning_rate)\n\t# model.compile(optimizer=optimizer, loss=dice_coef_loss, metrics=[dice_coef])\n\n\treturn pred #model\n\n\ndef sensitivity(target, prediction, axis=(1,2,3), smooth = 1e-5 ):\n\n\tintersection = tf.reduce_sum(prediction * target, axis=axis)\n\tcoef = (intersection + smooth) / (tf.reduce_sum(prediction, axis=axis) + smooth)\n\treturn tf.reduce_mean(coef)\n\ndef specificity(target, prediction, axis=(1,2,3), smooth = 1e-5 ):\n\n\tintersection = tf.reduce_sum(prediction * target, axis=axis)\n\tcoef = (intersection + smooth) / (tf.reduce_sum(prediction, axis=axis) + smooth)\n\treturn tf.reduce_mean(coef)\n" ]
[ [ "tensorflow.reduce_sum", "tensorflow.log", "tensorflow.reduce_mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
pauljaffe/task-dyva
[ "dbf778635cdb5f1d42149da82ca07ee3919296fa" ]
[ "task_dyva/taskdataset.py" ]
[ "\"\"\"Classes and utility functions for processing game data.\n\nEbbFlowGameData: Container for data from a single game.\nEbbFlowDataset: Subclass of PyTorch Dataset,\n container for data from multiple games.\nEbbFlowStats: Subclass of EbbFlowDataset,\n provides extra functionality for analysis.\n\"\"\"\nimport os\nimport random\nimport copy\nimport pickle\nfrom itertools import product\nfrom collections import defaultdict\nimport pdb\n\nimport torch\nfrom torch.utils.data import Dataset\nimport numpy as np\nimport pandas as pd\nimport dill\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.stats import gaussian_kde, bernoulli\n\nfrom . import transforms as T\nfrom .utils import z_pca\n\n\nclass EbbFlowDataset(Dataset):\n \"\"\"Container for an Ebb and Flow dataset; also provides \n functionality for processing the data and interfacing with PyTorch.\n Data are stored in two separate formats with a one-to-one correspondence: \n self.xu contains the model inputs: this is the \"continuous\" format.\n self.discrete contains the same data in an alternative format\n that facilitates analysis.\n\n Args\n ----\n experiment_dir (str): Root directory containing info for current\n model training run. \n params (dict): Processing parameters; see e.g. train_transform_kwargs\n in default config (config/model_config.yaml). \n preprocessed (dict): Preprocessed game data.\n split (str): Can be either 'train', 'val', or 'test'; \n split is incorporated into the file name of the processed data \n upon saving.\n processed_dir (str): Directory in which to save the processed data.\n pre_transform (list of callables, optional): Transformations which \n are applied before the processed data is saved. \n transform (list of callables, optional): Transformations which are applied\n 'online' on each iteration of the training loop. \n pre_transform_params (list of dicts, optional): List of parameters to \n pass to the pre_transforms. This should be a list of dictionaries\n with the same length as pre_transform.\n transform_params (list of dicts, optional): List of parameters to \n pass to the transforms. This should be a list of dictionaries\n with the same length as transform.\n \"\"\"\n\n needs_data_augmentation = ('kde', 'kde_no_switch_cost', 'adaptive_gaussian')\n\n def __init__(self, experiment_dir, params, preprocessed, split,\n processed_dir, pre_transform=None, transform=None,\n pre_transform_params=None, transform_params=None):\n self.experiment_dir = experiment_dir\n self.processed_dir = processed_dir\n self.params = params\n # rename a couple keys\n preprocessed['urt_ms'] = preprocessed.pop('resp_time')\n preprocessed['urespdir'] = preprocessed.pop('resp_dir')\n self.preprocessed = preprocessed\n self.split = split\n self.resampling_type = self.params.get('data_augmentation_type', None)\n self._build_transforms(transform, transform_params,\n pre_transform, pre_transform_params)\n self.process() # also saves processed data\n self.xu = torch.load(self.processed_paths[0]) # xu = model inputs\n with open(self.processed_paths[1], 'rb') as path:\n other_data = pickle.load(path)\n self.discrete = other_data['discrete']\n self.game_ids = other_data['game_ids']\n self.resampling_info = other_data['resampling']\n\n def _build_transforms(self, transform, transform_params,\n pre_transform, pre_transform_params):\n\n # build pre transform - usually just outlier filtering\n if pre_transform is not None:\n pre = [t(p) for t, p in zip(pre_transform, pre_transform_params)]\n self.pre_transform = T.Compose(pre)\n\n # build transform\n default_transform = [T.SmoothResponses(self.params)]\n if transform is not None:\n supplied_t = [t(p) for t, p in zip(transform, transform_params)]\n tr = default_transform + supplied_t\n else:\n tr = default_transform\n self.transform_list = tr\n\n def update_smoothing(self, epoch):\n # Update the kernel used to smoothing the response template\n self.transform_list[0]._update_sm_kernel(epoch)\n self.transform = T.Compose(self.transform_list)\n\n def get_processed_sample(self, idx):\n \"\"\"Return an EbbFlowGameData instance with data from a single game.\n\n Args\n ----\n idx (int): Index of the game to return.\n\n Returns\n -------\n An EbbFlowGameData instance containing the data for this game.\n\n \"\"\"\n\n discrete = {key: vals[idx] for key, vals in self.discrete.items()}\n self.update_smoothing(9999999)\n cnp = self[idx].numpy()\n continuous = {'urespdir': cnp[:, :4], 'point_dir': cnp[:, 4:8],\n 'mv_dir': cnp[:, 8:12], 'task_cue': cnp[:, 12:]}\n game_id = self.game_ids[idx]\n return EbbFlowGameData.processed_format(discrete, continuous, \n self.params, game_id)\n\n def __getitem__(self, idx):\n # Return a single sample (game) to train the model (continuous format).\n # Called by PyTorch during model training. \n xu_idx = self.xu[:, idx, :]\n xu_idx = xu_idx if self.transform is None else self.transform(xu_idx)\n return xu_idx\n\n def __len__(self):\n # Return the number of samples (games) in the dataset.\n return self.xu.shape[1]\n\n @property\n def processed_paths(self):\n \"\"\"Return the full paths to the processed data.\"\"\"\n return [os.path.join(self.processed_dir, f) \n for f in self.processed_file_names]\n\n @property\n def processed_file_names(self):\n \"\"\"Return the names of the processed data files.\"\"\"\n return [f'{self.split}_model_inputs.pt', \n f'{self.split}_other_data.pkl']\n\n def process(self):\n \"\"\"Prepare an Ebb and Flow dataset for model training. \n Apply pretransforms and filtering criteria; \n determine the continuous and discrete formats of the dataset.\n \"\"\"\n\n if _files_exist(self.processed_paths):\n return\n os.makedirs(self.processed_dir, exist_ok=True)\n\n # Do an initial processing run to get info for resampling\n # and/or response smoothing. \n if ((self.resampling_type in self.needs_data_augmentation)\n or (self.params['smoothing_type'] in \n self.needs_data_augmentation)):\n throwaway_data = self._get_preprocessed_games(for_resampling=True)\n [td.standard_prep() for td in throwaway_data]\n\n # Remove outliers if specified in pre-transform \n # before resampler estimation\n out_method = self.params['outlier_params'].get('method', None)\n if out_method is not None:\n out_filter = T.FilterOutliers(self.params)\n rs_pre_transform = T.Compose([T._Trim(), out_filter])\n else:\n rs_pre_transform = T.Compose([T._Trim()])\n\n throwaway_data = [rs_pre_transform(td) for td in throwaway_data]\n throwaway_data = [td for td in throwaway_data if td.is_valid]\n resampling_info, sm_params = self._get_resampling_sm_info(\n throwaway_data)\n self.resampling_info = resampling_info\n else:\n sm_params = copy.deepcopy(self.params)\n\n # Process each game\n data_list = self._get_preprocessed_games()\n [d.standard_prep() for d in data_list]\n data_list = [self.pre_transform(d) for d in data_list]\n self.excluded_list = [d for d in data_list if not d.is_valid]\n data_list = [d for d in data_list if d.is_valid]\n self._collate(data_list)\n self._save_processed_data()\n\n def _get_preprocessed_games(self, for_resampling=False):\n data_list = []\n resampling_info = getattr(self, 'resampling_info', None)\n start_times = self.params['start_times']\n if for_resampling:\n upscale_mult = 1\n else:\n upscale_mult = self.params['upscale_mult']\n\n for start_time, game_ind, _ in product(\n start_times, range(len(self.preprocessed['urt_ms'])),\n range(upscale_mult)):\n\n preprocessed_game = {key: self.preprocessed[key][game_ind] \n for key in self.preprocessed.keys()}\n\n data_list.append(\n EbbFlowGameData.preprocessed_format(\n preprocessed_game, self.params, start_time, \n resampling_info=resampling_info))\n return data_list\n\n def _remove_switch_cost(self, rts_typed):\n # Translate RTs to eliminate switch cost\n con_rt_diff = np.mean(rts_typed[2]) - np.mean(rts_typed[0])\n incon_rt_diff = np.mean(rts_typed[3]) - np.mean(rts_typed[1])\n orig_mean_typed_rt = np.mean([np.mean(rts_typed[i]) for i in range(4)])\n rts_typed[2] = np.array(rts_typed[2]) - con_rt_diff\n rts_typed[3] = np.array(rts_typed[3]) - incon_rt_diff\n # Translate RTs again so that mean RT is the same\n new_mean_typed_rt = np.mean([np.mean(rts_typed[i]) for i in range(4)])\n mean_rt_diff = orig_mean_typed_rt - new_mean_typed_rt\n for ttype in range(4):\n rts_typed[ttype] += mean_rt_diff\n return rts_typed\n\n def _get_resampling_sm_info(self, data):\n # Trial types: \n # 0 = congruent + stay\n # 1 = incongruent + stay\n # 2 = congruent + switch\n # 3 = incongruent + switch\n resampling_dists = {}\n acc = {}\n rts_typed = {}\n sm_params = {'step_size': self.params['step_size'],\n 'smoothing_type': self.params.get('smoothing_type', \n 'gaussian'),\n 'kernel_sd': self.params.get('kernel_sd', 50),\n 'params': {}}\n\n for ttype in range(4):\n this_rts = []\n this_correct = []\n for d in data:\n d.get_extra_stats()\n this_rts.extend(d._get_field_by_trial_type(ttype, 'urt_ms'))\n this_correct.extend(d._get_field_by_trial_type(\n ttype, 'ucorrect'))\n rts_typed[ttype] = this_rts\n\n # Resampling info\n bw = self.params.get('data_aug_kernel_bandwidth', 0.25)\n if self.resampling_type == 'kde_no_switch_cost':\n rts_typed = self._remove_switch_cost(rts_typed)\n\n for ttype in range(4):\n this_rts = rts_typed[ttype]\n if self.resampling_type in ['kde', 'kde_no_switch_cost']:\n this_resampling = gaussian_kde(this_rts, bw_method=bw)\n else:\n this_resampling = None\n\n # Smoothing info\n if self.params['smoothing_type'] == 'adaptive_gaussian':\n this_sm = np.std(this_rts)\n elif self.params['smoothing_type'] == 'kde':\n bw = self.params.get('data_aug_kernel_bandwidth', 0.25)\n this_sm = gaussian_kde(this_rts, bw_method=bw)\n else:\n this_sm = None\n\n resampling_dists[ttype] = this_resampling\n sm_params['params'][ttype] = this_sm\n acc[ttype] = np.mean(this_correct)\n if resampling_dists[0] is not None:\n resampling_info = {'rts': resampling_dists, 'acc': acc}\n else:\n resampling_info = None\n\n return resampling_info, sm_params\n\n def _save_processed_data(self):\n other_data = {'discrete': self.discrete,\n 'excluded': self.excluded_list,\n 'resampling': getattr(self, 'resampling_info', None),\n 'game_ids': self.game_ids}\n torch.save(self.xu, self.processed_paths[0])\n with open(self.processed_paths[1], 'xb') as path:\n dill.dump(other_data, path, protocol=4)\n\n def _collate(self, data_list):\n # Continuous format (model inputs)\n con_keys = ['urespdir', 'point_dir', 'mv_dir', 'task_cue']\n xu_split = [torch.cat([torch.tensor(d.continuous[key]).unsqueeze(1) \n for d in data_list], 1)\n for key in con_keys]\n self.xu = torch.cat([d for d in xu_split], 2).to(dtype=torch.float32)\n\n # Discrete format\n disc_keys = data_list[0].discrete_fields\n self.discrete = {key: [d.discrete[key] for d in data_list]\n for key in disc_keys}\n self.game_ids = [d.game_id for d in data_list]\n\n\nclass EbbFlowStats(EbbFlowDataset):\n \"\"\"Extends EbbFlowDataset with extra functionality for \n analyzing user and model behavior.\n\n Args\n ----\n output_rates (PyTorch tensor): The models outputs (responses for each of \n the four directions). Has dimensions n_timesteps x n_samples x 4.\n dataset (EbbFlowDataset instance): The dataset to be analyzed.\n latents (PyTorch tensor, optional): The model latent state variables.\n Has dimensions n_timesteps x n_samples x latent_dim.\n n_pcs (int, optional): The number of principal components to keep\n in the PCA transformed latent state variables (self.pca_latents).\n **kwargs (optional): Extra options to be supplied for calls to\n EbbFlowGameData.get_extra_stats().\n \"\"\"\n\n def __init__(self, output_rates, dataset, latents=None,\n n_pcs=3, **kwargs):\n self.rates = output_rates.cpu().detach().numpy()\n self.xu = dataset.xu\n self.discrete = dataset.discrete\n self.transform = dataset.transform\n self.params = dataset.params\n self.step = self.params['step_size']\n self.game_ids = dataset.game_ids\n td_kwargs = {'t_pre': 100, 't_post': 1600}\n td_kwargs.update(kwargs)\n self.trial_data_kwargs = td_kwargs\n self.n_pre = np.round(td_kwargs['t_pre'] / self.step).astype('int')\n self.n_post = np.round(td_kwargs['t_post'] / self.step).astype('int')\n self.t_axis = self.step * np.arange(-self.n_pre, self.n_post, 1) \n if latents is not None:\n self.latents = latents.cpu().detach().numpy()\n pca_latents, explained_var, pca_obj = z_pca(self.latents, n_pcs)\n self.pca_latents = pca_latents\n self.pca_explained_var = explained_var\n self.pca_obj = pca_obj\n else:\n self.latents = None\n self.pca_latents = None\n self.pca_explained_var = None\n self.pca_obj = None\n if kwargs['alphas'] is not None:\n self.alphas = kwargs['alphas'].cpu().detach().numpy()\n self.As = kwargs['As'].cpu().detach().numpy()\n self.Bs = kwargs['Bs'].cpu().detach().numpy()\n self.Cs = kwargs['Cs'].cpu().detach().numpy()\n self.windowed = None\n self._get_trial_data()\n\n def _get_trial_data(self):\n # Transform the discrete dataset to a pandas data frame;\n # get extra stats in the process. Also window and concatenate\n # the output rates and optionally the model latents from each trial.\n dfs = []\n for d in range(self.rates.shape[1]):\n this_game = self.get_processed_sample(d)\n this_rates = np.squeeze(self.rates[:, d, :])\n if self.latents is not None:\n this_latents = np.squeeze(self.latents[:, d, :])\n this_pca_latents = np.squeeze(self.pca_latents[:, d, :])\n win_vars = this_game.get_extra_stats(\n output_rates=this_rates, latents=this_latents, \n pca_latents=this_pca_latents, **self.trial_data_kwargs)\n else:\n win_vars = this_game.get_extra_stats(\n output_rates=this_rates, **self.trial_data_kwargs)\n self._concat_windowed(win_vars)\n dfs.append(this_game._to_pandas())\n self.df = pd.concat(dfs, ignore_index=True)\n\n def _concat_windowed(self, win_vars):\n if self.windowed is None:\n self.windowed = win_vars\n else:\n for key, val in win_vars.items():\n if val is None:\n continue\n self.windowed[key] = np.concatenate(\n (self.windowed[key], val), 1)\n\n def select(self, df=None, **kwargs):\n \"\"\"Select a subset of trials using the criteria specified in **kwargs.\n\n Args\n ----\n df (pandas DataFrame, optional): Data to be selected from. \n If None (default), self.df is used. \n **kwargs (optional): Selection criteria specified as a dictionary.\n Each key should correspond to one of the fields in the discrete\n data format. \n\n Returns\n -------\n trial_inds (NumPy array): The indices of the selected trials. \n\n Example\n -------\n Select all congruent switch trials:\n >>> trial_inds = self.select(**{'is_switch': 1, 'is_congruent': 1})\n \"\"\"\n\n select_df = self.df if df is None else df\n query_str = ''\n for key, val in kwargs.items():\n query_str += f'({key} == {val}) & '\n query_str = query_str[:-3]\n trial_selection = select_df.query(query_str)\n trial_inds = trial_selection.index.to_numpy()\n return trial_inds\n\n def switch_cost(self):\n \"\"\"Calculate switch cost summary statistics for user and model.\n\n Returns\n -------\n stats (dict): Switch cost statistics; has the following keys:\n u_switch_cost: The user's mean response time on switch trials\n minus the user's mean response time on stay trials (ms).\n m_switch_cost: As above, but for the model's responses.\n u_acc_switch_cost: The user's mean accuracy on stay trials\n minus the user's mean accuracy on switch trials.\n m_acc_switch_cost: As above, but for the model's responses.\n \"\"\"\n\n stats = {}\n stay_inds = self.select(**{'is_switch': 0})\n switch_inds = self.select(**{'is_switch': 1})\n # response times\n u_stay_rts = self.df['urt_ms'][stay_inds]\n m_stay_rts = self.df['mrt_ms'][stay_inds]\n u_switch_rts = self.df['urt_ms'][switch_inds]\n m_switch_rts = self.df['mrt_ms'][switch_inds]\n stats['u_switch_cost'] = u_switch_rts.mean() - u_stay_rts.mean()\n stats['m_switch_cost'] = m_switch_rts.mean() - m_stay_rts.mean()\n # accuracy\n u_stay_c = self.df['ucorrect'][stay_inds]\n m_stay_c = self.df['mcorrect'][stay_inds]\n u_switch_c = self.df['ucorrect'][switch_inds]\n m_switch_c = self.df['mcorrect'][switch_inds]\n stats['u_acc_switch_cost'] = u_stay_c.mean() - u_switch_c.mean()\n stats['m_acc_switch_cost'] = m_stay_c.mean() - m_switch_c.mean()\n return stats\n\n def congruency_effect(self):\n \"\"\"Calculate congruency effect summary statistics for user and model.\n\n Returns\n -------\n stats (dict): Congruency effect statistics; has the following keys:\n u_con_effect: The user's mean response time on incongruent trials\n minus the user's mean response time on congruent trials.\n m_con_effect: As above, but for the model's responses. \n u_acc_con_effect: The user's mean accuracy on congruent trials\n minus the user's mean accuracy on incongruent trials.\n m_acc_con_effect: As above, but for the model's responses. \n \"\"\"\n\n stats = {}\n con_inds = self.select(**{'is_congruent': 1})\n incon_inds = self.select(**{'is_congruent': 0})\n # response times\n u_con_rts = self.df['urt_ms'][con_inds]\n m_con_rts = self.df['mrt_ms'][con_inds]\n u_incon_rts = self.df['urt_ms'][incon_inds]\n m_incon_rts = self.df['mrt_ms'][incon_inds]\n stats['u_con_effect'] = u_incon_rts.mean() - u_con_rts.mean()\n stats['m_con_effect'] = m_incon_rts.mean() - m_con_rts.mean()\n # accuracy\n u_con_c = self.df['ucorrect'][con_inds]\n m_con_c = self.df['mcorrect'][con_inds]\n u_incon_c = self.df['ucorrect'][incon_inds]\n m_incon_c = self.df['mcorrect'][incon_inds]\n stats['u_acc_con_effect'] = u_con_c.mean() - u_incon_c.mean()\n stats['m_acc_con_effect'] = m_con_c.mean() - m_incon_c.mean()\n return stats\n\n def get_stats(self):\n \"\"\"Calculate summary statistics for user and model behavior.\n\n Returns\n -------\n stats (dict): Summary statistics on the accuracy and\n response times for both user and model (e.g. the switch cost \n and congruency effect).\n \"\"\"\n\n stats = {}\n stats.update(self.switch_cost())\n stats.update(self.congruency_effect())\n stats['u_accuracy'] = self.df['ucorrect'].mean()\n stats['m_accuracy'] = self.df['mcorrect'].mean()\n urts = self.df['urt_ms']\n mrts = self.df['mrt_ms']\n stats['u_mean_rt'] = urts.mean()\n stats['m_mean_rt'] = mrts.mean()\n stats['u_rt_sd'] = urts.std()\n stats['m_rt_sd'] = mrts.std()\n self.summary_stats = stats\n return stats\n\n\nclass EbbFlowGameData():\n \"\"\"Container for data from a single Ebb and Flow game. Also has support \n for processing game data. This can be instantiated using one of two class \n constructors: \n preprocessed_format: For data that needs to be processed\n (transformed, filtered, etc.). \n processed_format: For data that has already been processed. \n\n Data are maintained in two formats which have a one-to-one \n correspondence: a discrete format, which is easier to analyze, and a \n continuous format, which is the format supplied to the model. \n If instantiated with the preprocessed constructor, discrete and continuous \n are initialized as empty arrays and populated sequentially during \n processing. \n\n Args\n ----\n preprocessed (dict): Preprocessed game data. Set to None\n if instantiated with the processed_format constructor.\n discrete (dict): The discrete format of the data. See discrete_fields. \n continuous (dict): The continuous format of the data. \n See continuous_fields.\n params (dict): Processing parameters; see e.g. train_transform_kwargs\n in default config (config/model_config.yaml). \n start_time (int): The time within the game to start collecting trials.\n Set to None if instantiated with the processed_format constructor.\n resampling_info (dict): Information used to generate resampled responses.\n Set to None if instantiated with the processed_format constructor.\n game_id (int): The ID of this gameplay as it is stored \n in the Lumosity database. \n \"\"\"\n\n continuous_fields = ('urespdir', 'point_dir', 'mv_dir', 'task_cue')\n discrete_fields = ('onset', 'offset', 'urespdir', 'point_dir',\n 'mv_dir', 'task_cue', 'urt_samples', 'urt_ms', \n 'trial_type')\n stats_fields = ('prev_point_dir', 'prev_mv_dir', 'prev_task_cue',\n 'm_prev_correct', 'u_prev_correct',\n 'is_switch', 'is_congruent', 'correct_dir',\n 'mrespdir', 'mcorrect', 'ucorrect', 'mrt_ms', 'mrt_abs')\n dims = (4, 4, 4, 2)\n direction_labels = ('L', 'R', 'U', 'D')\n task_labels = ('M', 'P')\n extra_time_for_smooth = 2500 # ms\n supported_resampling = ('kde')\n\n def __init__(self, preprocessed, discrete, continuous, \n params, start_time, resampling_info, \n game_id):\n self.preprocessed = preprocessed\n self.discrete = discrete\n self.continuous = continuous\n self.start_time = start_time\n self.step = params['step_size']\n self.num_steps_short_win = int(\n np.rint(params['duration'] / self.step))\n self.num_steps_long_win = int(\n np.rint((params['duration'] \n + self.extra_time_for_smooth) / self.step))\n self.max_t = params['duration'] + self.extra_time_for_smooth\n self.params = params\n self.resampling_type = params.get('data_augmentation_type', None)\n self.resampling_info = resampling_info\n # If trials are being resampled during processing, rs_frac is \n # the proportion of games which are resampled. The rest are \n # not resampled (i.e., the original sequence of trials is preserved). \n rs_frac = params.get('aug_resample_frac', 0.75)\n self.do_resampling = bernoulli(rs_frac).rvs(1)[0]\n self.is_valid = True\n self.game_id = game_id\n # Optionally match the accuracy of the resampled data \n # to the user's accuracy.\n self.match_accuracy = params.get('match_accuracy', False)\n # rt_tol is the min time after stim onset that a response can occur\n # (see _get_model_rt).\n self.rt_tol = 100 / self.step # samples\n\n @property\n def _n_trials(self):\n if self.resampling_info is None or not self.do_resampling:\n return len(self.preprocessed['urespdir'])\n else:\n return float('inf')\n\n @classmethod\n def processed_format(cls, discrete, continuous, params, game_id):\n \"\"\"Return a class instance for game data that has already \n been processed. See class docstring.\n \"\"\"\n\n preprocessed = None\n start_time = None\n resampler = None\n return cls(preprocessed, discrete, continuous, params, start_time,\n resampler, game_id)\n\n @classmethod\n def preprocessed_format(cls, preprocessed, params, start_time, \n resampling_info=None):\n \"\"\"Return a class instance for game data that needs to be processed.\n See class docstring.\n \"\"\"\n\n discrete, continuous = cls._initialize_arrays(params)\n game_id = preprocessed['game_result_id']\n return cls(preprocessed, discrete, continuous, params, start_time, \n resampling_info, game_id)\n\n @classmethod\n def _initialize_arrays(cls, params):\n discrete = defaultdict(list)\n num_samples = int(np.rint((params['duration'] \n + cls.extra_time_for_smooth)\n / params['step_size']))\n continuous = {key: np.zeros((num_samples, d))\n for key, d in zip(cls.continuous_fields,\n cls.dims)}\n return discrete, continuous\n\n def standard_prep(self):\n \"\"\"Process the game data; optionally resample trials. \n This is only called if instantiated with the preprocessed_format \n constructor. The discrete and continuous arrays are populated \n sequentially one trial at a time. \n \"\"\"\n\n trial_ind = self._check_first_trial()\n if np.isnan(trial_ind):\n self.is_valid = False\n return\n\n abs_offset_ms = 0\n if self.resampling_info is None:\n if trial_ind > 0:\n prev_cue_str = self.preprocessed['task_cue'][trial_ind - 1]\n prev_cue = self._map_str_to_num(prev_cue_str, 'task_cue')\n else: \n self.is_valid = False\n return\n else:\n prev_cue = self._map_str_to_num(\n random.sample(self.task_labels, 1)[0],\n 'task_cue')\n while trial_ind < self._n_trials:\n trial_info, abs_offset_ms = self._get_trial_info_preprocessed(\n trial_ind, abs_offset_ms, prev_cue)\n prev_cue = trial_info['task_cue']\n trial_ind += 1\n\n if trial_info['offset'] < self.num_steps_short_win: \n self._update_discrete(trial_info)\n if trial_info['offset'] < self.num_steps_long_win: \n self._update_continuous(trial_info)\n if abs_offset_ms > self.max_t:\n break\n\n if len(self.discrete['mv_dir']) < self.params['min_trials']:\n self.is_valid = False\n\n def _map_str_to_num(self, str_val, key):\n # Map stimulus/response direction string to numeric value\n if key in ['urespdir', 'mv_dir', 'point_dir']:\n labels = self.direction_labels\n else:\n labels = self.task_labels\n num_val = [ind for ind, val in enumerate(labels) if val == str_val]\n return num_val[0]\n\n def _get_trial_info_preprocessed(self, trial_ind, abs_offset_ms, \n prev_cue):\n if self.resampling_info is not None and self.do_resampling:\n trial_info = {key: None for key in self.continuous_fields}\n trial_info, prev_cue = self._resample_trial(trial_info, prev_cue)\n trial_info = self._get_resampler_trial_type(trial_info, prev_cue)\n if self.match_accuracy:\n trial_info = self._adjust_trial_response(trial_info)\n else:\n trial_info = {key: self._map_str_to_num(\n self.preprocessed[key][trial_ind], key)\n for key in self.continuous_fields}\n trial_info['urt_ms'] = self.preprocessed['urt_ms'][trial_ind]\n trial_info = self._get_resampler_trial_type(trial_info, prev_cue)\n\n # Floor is used to ensure there is a gap between consecutive stimuli\n trial_info['onset'] = int(np.floor(abs_offset_ms / self.step))\n trial_info['offset'] = int(\n np.floor((abs_offset_ms\n + trial_info['urt_ms']\n + self.params['post_resp_buffer']) \n / self.step)) - 1\n trial_info['urt_samples'] = int(\n np.rint(trial_info['urt_ms'] / self.step))\n abs_offset_ms += trial_info['urt_ms'] + self.params['post_resp_buffer']\n return trial_info, abs_offset_ms\n\n def _resample_trial(self, trial_info, prev_cue):\n # Trial types: \n # 0 = congruent + stay\n # 1 = incongruent + stay\n # 2 = congruent + switch\n # 3 = incongruent + switch\n\n if np.isnan(prev_cue):\n prev_cue = random.sample([0, 1], 1)[0]\n # Randomly sample condition\n con_ind = np.random.choice(4)\n this_dist = self.resampling_info['rts'][con_ind]\n if self.resampling_type in ['kde', 'kde_no_switch_cost']:\n new_rt_ms = this_dist.resample(size=1)\n\n # Stay vs. switch\n if con_ind in [2, 3]:\n # Switch trial\n if prev_cue == 0:\n new_cue = 1\n else:\n new_cue = 0\n else:\n # Stay trial\n new_cue = prev_cue\n\n # Congruent vs. incongruent\n new_mv_dir = self._map_str_to_num(\n random.sample(self.direction_labels, 1)[0],\n 'mv_dir')\n if con_ind in [0, 2]:\n # Congruent trial\n new_pt_dir = new_mv_dir\n else:\n # Incongruent trial\n other_dirs = [i for i in range(4) if i != new_mv_dir]\n new_pt_dir = random.sample(other_dirs, 1)[0]\n\n # Response: set to correct dir; optionally adjusted later\n new_uresp = self._get_correct_dir(new_cue, new_mv_dir, new_pt_dir)\n\n new_data = {'urt_ms': new_rt_ms[0][0], 'task_cue': new_cue, \n 'mv_dir': new_mv_dir, 'point_dir': new_pt_dir,\n 'urespdir': new_uresp}\n trial_info.update(new_data)\n return trial_info, prev_cue\n\n def _adjust_trial_response(self, trial_info):\n # Randomly change the resampled response to an incorrect direction\n # at a rate determined by the user's accuracy for this trial type.\n con_acc = self.resampling_info['acc'][trial_info['trial_type']]\n is_correct = bernoulli.rvs(con_acc)\n new_correct_dir = self._get_correct_dir(\n trial_info['task_cue'], trial_info['mv_dir'], \n trial_info['point_dir'])\n if is_correct:\n trial_info['urespdir'] = new_correct_dir\n else:\n incorrect_dirs = [i for i in range(4) if i != new_correct_dir]\n trial_info['urespdir'] = random.sample(incorrect_dirs, 1)\n return trial_info\n\n def _get_resampler_trial_type(self, trial_info, prev_cue):\n is_congruent = self._is_congruent(trial_info['mv_dir'], \n trial_info['point_dir'])\n if np.isnan(prev_cue):\n # No previous trial to calculate switch;\n # treat as a switch for calculating resampling dists.\n is_switch = np.nan\n else:\n is_switch = self._is_switch(trial_info['task_cue'], prev_cue)\n\n if np.isnan(is_switch):\n trial_info['trial_type'] = np.nan\n self.is_valid = False\n elif is_congruent and not is_switch:\n trial_info['trial_type'] = 0\n elif is_congruent and is_switch:\n trial_info['trial_type'] = 2\n elif not is_congruent and not is_switch:\n trial_info['trial_type'] = 1\n elif not is_congruent and is_switch:\n trial_info['trial_type'] = 3\n return trial_info\n\n def _check_first_trial(self):\n game_t_offs = np.array(self.preprocessed['time_offset'])\n try:\n trial_ind = np.nonzero(game_t_offs >= self.start_time)[0][0]\n except IndexError:\n trial_ind = np.nan\n return trial_ind\n\n def _update_discrete(self, trial_info):\n for key in self.discrete_fields:\n self.discrete[key].append(trial_info[key])\n\n def _update_continuous(self, trial_info):\n for key in ['mv_dir', 'point_dir', 'task_cue']:\n self.continuous[key][trial_info['onset']:trial_info['offset'],\n trial_info[key]] = 1\n abs_rt = trial_info['onset'] + trial_info['urt_samples']\n self.continuous['urespdir'][abs_rt, trial_info['urespdir']] = 1\n\n def get_extra_stats(self, output_rates=None, latents=None, \n pca_latents=None, **kwargs):\n \"\"\"Add extra information to the discrete format \n (see self.stats_fields): congruency, stay/switch, model output RTs, \n switch cost,congruency effect, and previous trial info. \n Also window the output rates, model latents, and PCA-tranformed\n model latents for each trial. \n\n Args\n ----\n output_rates (NumPy array, optional): The rates for each response\n direction generated in a forward pass of the model. \n latents (NumPy array, optional): The latent state variables generated\n in a forward pass of the model. \n pca_latents (NumPy array, optional): PCA-transformed latent\n state variables. \n kwargs (dict, optional): Optional parameters which determine the \n length of each trial in the windowed variables:\n t_pre (int): Time in ms prior to stimulus onset (default 100).\n t_post (int): Time in ms after stimulus onset (default 1600). \n\n Returns\n -------\n win_vars (dict): Windowed rates, model latents, and PCA-transformed\n model latents for each trial. \n \"\"\"\n\n win_rates, win_latents, win_pca_latents = None, None, None\n if output_rates is not None or latents is not None:\n t_pre = kwargs.get('t_pre', 100) # ms\n t_post = kwargs.get('t_post', 1600) # ms\n n_pre = np.round(t_pre / self.step).astype('int')\n n_post = np.round(t_post / self.step).astype('int')\n win_length = n_pre + n_post\n win_rates = np.zeros((win_length, 0, 4))\n if latents is not None:\n win_latents = np.zeros((win_length, 0, latents.shape[1]))\n win_pca_latents = np.zeros((win_length, 0, pca_latents.shape[1]))\n\n n_trials = len(self.discrete['mv_dir'])\n self.discrete.update({key: [] for key in self.stats_fields})\n for n in range(n_trials):\n tr = {key: np.nan for key in self.stats_fields}\n if n != 0:\n tr.update(prev_trial_info)\n\n tr.update({key: self.discrete[key][n] \n for key in self.discrete_fields})\n\n # Stimulus info\n tr['correct_dir'] = self._get_correct_dir(tr['task_cue'], \n tr['mv_dir'], \n tr['point_dir'])\n tr['is_congruent'] = self._is_congruent(tr['mv_dir'], \n tr['point_dir'])\n if n != 0:\n tr['is_switch'] = self._is_switch(tr['task_cue'], \n tr['prev_task_cue'])\n\n # User response info\n tr['ucorrect'] = self._is_correct(tr['urespdir'], \n tr['correct_dir'])\n\n # Model response info\n if output_rates is not None:\n tr['mrt_ms'], tr['mrt_abs'], tr['mrespdir'] = \\\n self._get_model_rt(tr['onset'], tr['offset'], \n output_rates)\n tr['mcorrect'] = self._is_correct(tr['mrespdir'], \n tr['correct_dir'])\n\n # Window rates and model latents for current trial\n if output_rates is not None:\n this_win_rates = self._get_windowed(\n tr['onset'], output_rates, win_length, n_pre, n_post)\n win_rates = np.concatenate((win_rates, this_win_rates), 1)\n if latents is not None:\n this_win_latents = self._get_windowed(\n tr['onset'], latents, win_length, n_pre, n_post)\n win_latents = np.concatenate(\n (win_latents, this_win_latents), 1)\n this_win_pca_latents = self._get_windowed(\n tr['onset'], pca_latents, win_length, n_pre, n_post)\n win_pca_latents = np.concatenate(\n (win_pca_latents, this_win_pca_latents), 1)\n\n # Update discrete format and previous trial info\n self._add_stats_to_discrete(tr)\n prev_trial_info = {'prev_task_cue': tr['task_cue'], \n 'prev_point_dir': tr['point_dir'],\n 'prev_mv_dir': tr['mv_dir'],\n 'm_prev_correct': tr['mcorrect'],\n 'u_prev_correct': tr['ucorrect']}\n\n win_vars = {'rates': win_rates,\n 'latents': win_latents,\n 'pca_latents': win_pca_latents}\n\n return win_vars\n\n def _get_windowed(self, onset, data, win_length, n_pre, n_post):\n dim = data.shape[1]\n windowed = data[onset - n_pre:onset + n_post, :]\n if windowed.shape[0] < win_length: # zero pad\n windowed = np.append(windowed, np.zeros(\n (win_length - windowed.shape[0], dim)), 0)\n return np.expand_dims(windowed, 1)\n\n def _to_pandas(self):\n dfs = []\n n_trials = len(self.discrete['mv_dir'])\n for n in range(n_trials):\n trial = {key: [val[n]] for key, val in self.discrete.items()}\n dfs.append(pd.DataFrame(trial))\n return pd.concat(dfs, ignore_index=True)\n\n def _add_stats_to_discrete(self, data):\n for key in self.stats_fields:\n self.discrete[key].append(data[key])\n\n def plot(self, rates=None, do_plot=False, stim_ylims=None, \n resp_ylims=None, **kwargs):\n \"\"\"Plot the continuous representation of the stimuli and responses\n for this game. For the responses, the continuous format \n of the user's responses is plotted (used to train the model). \n The responses generated by the model can optionally also be plotted. \n\n Args\n ----\n rates (NumPy array, optional): The responses generated by the model.\n do_plot (Boolean, optional): If True, the figure is plotted. \n\n Returns\n -------\n fig (matplotlib Figure): The generated figure. \n axes (matplotlib AxesSubplot): The figure axes, can be used for \n further tweaking. \n \"\"\"\n\n alphas = kwargs.get('alphas', None)\n textsize = 14\n if alphas is None:\n figsize = (10, 18)\n n_plots = 14\n fig, axes = plt.subplots(n_plots, 1, figsize=figsize)\n else:\n figsize = (10, 22)\n n_alphas = alphas.shape[1]\n n_plots = 14 + n_alphas\n fig, axes = plt.subplots(n_plots, 1, figsize=figsize)\n colors = ['royalblue', 'crimson', 'forestgreen', 'orange']\n n_time = self.continuous['point_dir'].shape[0]\n x_plot = np.arange(n_time) * self.step\n stimulus_ylims = [-0.5, 1.5] if stim_ylims is None else stim_ylims\n response_ylims = [-0.2, 1.2] if resp_ylims is None else resp_ylims\n\n # Pointing stimuli\n for d in range(4):\n sns.lineplot(x=x_plot, y=self.continuous['point_dir'][:, d], \n ax=axes[d], color=colors[0])\n axes[d].set_ylabel(self.direction_labels[d], fontsize=textsize)\n if d == 0:\n axes[d].set_title('pointing stimuli', fontsize=textsize)\n\n # Moving stimuli\n for d in range(4):\n sns.lineplot(x=x_plot, y=self.continuous['mv_dir'][:, d], \n ax=axes[d + 4], color=colors[1])\n axes[d + 4].set_ylabel(self.direction_labels[d], fontsize=textsize)\n if d == 0:\n axes[d + 4].set_title('moving stimuli', fontsize=textsize)\n\n # Task cues\n for d in range(2):\n sns.lineplot(x=x_plot, y=self.continuous['task_cue'][:, d], \n ax=axes[d + 8], color=colors[2])\n axes[d + 8].set_ylabel(self.task_labels[d], fontsize=textsize)\n if d == 0:\n axes[d + 8].set_title('task cues', fontsize=textsize)\n\n # Responses\n for d in range(4):\n # User\n user_resp = self.continuous['urespdir'][:, d]\n sns.lineplot(x=x_plot, y=user_resp, ax=axes[d + 10], color='k', \n zorder=1, label='user')\n\n if rates is not None:\n # Model\n sns.lineplot(x=x_plot, y=rates[:, d], ax=axes[d + 10], \n color=colors[3], zorder=2, label='model')\n this_mrts = [rt for rt, rdir in zip(self.discrete['mrt_abs'],\n self.discrete['mrespdir'])\n if rdir == d]\n\n sns.scatterplot(x=x_plot[this_mrts], \n y=rates[this_mrts, d], s=15, \n ax=axes[d + 10], zorder=3, \n label='response times')\n axes[d + 10].set_ylabel(self.direction_labels[d], \n fontsize=textsize)\n\n if d == 0:\n axes[d + 10].set_title('responses', fontsize=textsize)\n axes[d + 10].legend(loc=1, fontsize=textsize - 5, \n frameon=False,\n columnspacing=1.25, ncol=3)\n else:\n axes[d + 10].get_legend().remove()\n\n # Alphas\n if alphas is not None:\n for d in range(n_alphas):\n sns.lineplot(x=x_plot, y=alphas[:, d], \n ax=axes[d + 14], color=colors[2])\n axes[d + 14].set_ylabel(f'alpha {d}', fontsize=textsize)\n if d == 0:\n axes[d + 14].set_title('alphas', fontsize=textsize)\n\n # Adjust\n [axes[d].set_ylim(stimulus_ylims) for d in range(10)]\n [axes[d].set_ylim(response_ylims) for d in range(10, 14)]\n [axes[d].set_yticks([]) for d in range(n_plots)]\n [axes[d].set_xticklabels([]) for d in range(n_plots - 1)]\n t_max = n_time * self.step\n axes[n_plots - 1].set_xticks(np.arange(0, t_max + 1000, 1000))\n axes[n_plots - 1].tick_params(axis=\"x\", labelsize=textsize)\n axes[n_plots - 1].set_xlabel('time (ms)', fontsize=textsize)\n [axes[d].set_xlim([0, x_plot[-1]]) for d in range(n_plots)]\n\n plt.tight_layout()\n if do_plot:\n plt.show()\n return fig, axes\n\n def _get_correct_dir(self, cue, mv, pt):\n return mv if cue == 0 else pt\n\n def _is_congruent(self, mv, pt):\n return 1 if mv == pt else 0\n\n def _is_switch(self, cue, prev_cue):\n return 0 if cue == prev_cue else 1\n\n def _is_correct(self, respdir, correct_dir):\n return 1 if respdir == correct_dir else 0\n\n def _get_model_rt(self, onset, offset, rates):\n # Get the response time for the current trial from the generated rates.\n win_on = np.round(onset + self.rt_tol).astype('int')\n win_rates = rates[win_on:(offset + 1), :]\n max_ind = np.unravel_index(np.argmax(\n win_rates, axis=None), win_rates.shape)\n mrespdir = max_ind[1]\n if self.params['rt_method'] == 'max':\n # Response time is calculated as the time at which the maximum \n # activation occurs across all four response directions, \n # within the stimulus window. \n mrt_abs = max_ind[0] + win_on # samples, relative to 0\n elif self.params['rt_method'] == 'center_of_mass':\n # Response time is calculated as the center of mass\n # of the rate with the highest activation. \n max_rate = win_rates[:, mrespdir]\n csum = np.cumsum(max_rate)\n com = np.nonzero(csum >= (csum[-1] / 2))[0][0]\n mrt_abs = win_on + com # samples, relative to zero\n mrt = self.step * (mrt_abs - onset) # ms\n return mrt, mrt_abs, mrespdir\n\n def _get_field_by_trial_type(self, trial_type, key):\n inds = [i for i, val in enumerate(self.discrete['trial_type'])\n if val == trial_type]\n return [self.discrete[key][i] for i in inds]\n\n\ndef _files_exist(files):\n # Return False if len(files) is zero or if any of the files do not exist.\n return len(files) != 0 and all([os.path.exists(f) for f in files])\n" ]
[ [ "numpy.expand_dims", "torch.load", "torch.cat", "numpy.squeeze", "numpy.cumsum", "pandas.DataFrame", "numpy.round", "numpy.concatenate", "scipy.stats.gaussian_kde", "scipy.stats.bernoulli.rvs", "numpy.mean", "torch.save", "matplotlib.pyplot.tight_layout", "numpy.arange", "torch.tensor", "numpy.std", "numpy.argmax", "numpy.zeros", "pandas.concat", "numpy.nonzero", "numpy.random.choice", "numpy.isnan", "numpy.rint", "numpy.floor", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots", "scipy.stats.bernoulli" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
jihwanlee-alphago/aqt
[ "dfc0761f8db13b10174550979b0a3c8b32fd3d01", "dfc0761f8db13b10174550979b0a3c8b32fd3d01" ]
[ "aqt/jax_legacy/jax/compute_cost_utils_test.py", "aqt/jax_legacy/jax/wmt_mlperf/models_test.py" ]
[ "# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for aqt.jax.compute_cost_utils.\"\"\"\n\nimport logging\n\nfrom absl import flags\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom aqt.jax_legacy.jax import compute_cost_utils\nfrom aqt.jax_legacy.jax import flax_layers as aqt_flax_layers\nfrom aqt.jax_legacy.jax import get_bounds\nfrom aqt.jax_legacy.jax import hlo_utils\nfrom aqt.jax_legacy.jax import quant_config\nfrom aqt.jax_legacy.jax import quantization\nfrom aqt.jax_legacy.jax.quantization import QuantOps\nfrom aqt.jax_legacy.jax.quantization import QuantType\nfrom flax import linen as nn\nfrom jax import random\nfrom jax._src.lax import convolution as lax_convolution\nfrom jax._src.lax import lax\nfrom jax.nn import initializers\nimport jax.numpy as jnp\nimport numpy as onp\n\nFLAGS = flags.FLAGS\n\n\nclass ComputeCostUtilsTest(parameterized.TestCase):\n\n def setUp(self):\n super(ComputeCostUtilsTest, self).setUp()\n self.rng_key = random.PRNGKey(0)\n\n def compare_hlo_instructions(self, hlo_no_annotation, hlo_w_annotation):\n \"\"\"Compares two HLO models to check if they only differ in metadata info.\"\"\"\n instrs_n = []\n instrs_w = []\n # gather instructions from both HLO models\n for computation in hlo_no_annotation.computations:\n for instr in computation.instructions:\n instrs_n.append(instr)\n for computation in hlo_w_annotation.computations:\n for instr in computation.instructions:\n instrs_w.append(instr)\n\n self.assertEqual(len(instrs_n), len(instrs_w))\n for i, _ in enumerate(instrs_n):\n # check instructions with the opcode 'convolution'\n # the metadata field for instrs_w and instrs_n should be different.\n if (instrs_n[i].opcode == 'convolution' and\n instrs_w[i].opcode == 'convolution'):\n self.assertNotEqual(instrs_n[i].metadata, instrs_w[i].metadata)\n\n # remove metadata op_type and op_name\n instrs_n[i].metadata.op_type = ''\n instrs_w[i].metadata.op_type = ''\n instrs_n[i].metadata.op_name = ''\n instrs_w[i].metadata.op_name = ''\n # compare the rest of the instructions.\n self.assertEqual(instrs_n[i], instrs_w[i])\n\n class TestModelWith1Dense(nn.Module):\n \"\"\"Test model with a single DenseAqt layer.\"\"\"\n\n @nn.compact\n def __call__(self, inputs, hparams, num_classes, dtype=jnp.float32):\n output = aqt_flax_layers.DenseAqt(\n features=num_classes,\n dtype=dtype,\n train=False,\n quant_context=quant_config.QuantContext(\n update_bounds=False, collect_acts_stats=False),\n paxis_name='batch',\n hparams=hparams,\n )(inputs, padding_mask=None)\n return output\n\n class TestModelWith1Conv(nn.Module):\n \"\"\"Test model with a single ConvAqt layer.\"\"\"\n\n @nn.compact\n def __call__(self,\n inputs,\n hparams,\n kernel_size,\n num_filters,\n strides,\n dtype=jnp.float32):\n output = aqt_flax_layers.ConvAqt(\n features=num_filters,\n kernel_size=kernel_size,\n strides=strides,\n use_bias=False,\n dtype=dtype,\n train=False,\n quant_context=quant_config.QuantContext(update_bounds=False),\n paxis_name='batch',\n hparams=hparams)(\n inputs)\n return output\n\n class TestModelWith1DynamicMatmul(nn.Module):\n \"\"\"Test model with a single dynamic matmul.\"\"\"\n\n @nn.compact\n def __call__(self, lhs_act, rhs_act, lhs_prec, rhs_prec):\n get_bounds_hyper = get_bounds.GetBounds.Hyper(\n initial_bound=10.0,\n stddev_coeff=0,\n absdev_coeff=0,\n mix_coeff=0,\n granularity=quant_config.QuantGranularity.PER_TENSOR)\n lhs_act_hparams = QuantOps.ActHParams(\n input_distribution='symmetric',\n bounds=get_bounds_hyper,\n prec=lhs_prec,\n half_shift=False)\n rhs_act_hparams = QuantOps.ActHParams(\n input_distribution='symmetric',\n bounds=get_bounds_hyper,\n prec=rhs_prec,\n half_shift=False)\n lhs_get_bounds_params = get_bounds.GetBounds.Params(\n update_stats=False, update_bounds=False, module_name='lhs')\n rhs_get_bounds_params = get_bounds.GetBounds.Params(\n update_stats=False, update_bounds=False, module_name='rhs')\n output = quantization.quantized_dynamic_dot_general(\n lhs_act=lhs_act,\n rhs_act=rhs_act,\n lhs_act_hparams=lhs_act_hparams,\n rhs_act_hparams=rhs_act_hparams,\n dot_dimension_numbers=(((1,), (0,)), ((), ())),\n quant_type=QuantType.AQT,\n lhs_get_bounds_params=lhs_get_bounds_params,\n rhs_get_bounds_params=rhs_get_bounds_params)\n return output\n\n @parameterized.named_parameters(\n # TestModelWith1Dense\n dict(\n testcase_name='single_dense_layer_bfloat16',\n modelclass=TestModelWith1Dense,\n input_shapes=[(1, 8)],\n model_kwargs={\n 'num_classes': 2,\n 'hparams': aqt_flax_layers.DenseAqt.HParams(\n weight_prec=None,\n quant_type=QuantType.FAKE_QUANT,\n quant_act=None,\n weight_quant_granularity=quant_config.QuantGranularity.PER_CHANNEL,\n weight_half_shift=False\n ),\n },\n expected_compute_cost=8 * 2 * (16 * 16),\n expected_compute_cost_ratio=1.0,\n expected_compute_cost_linear=8 * 2 * (16),\n expected_compute_cost_ratio_linear=1.0,\n expected_memory_cost=8 * 2 * (16),\n expected_memory_cost_ratio=1.0,\n ),\n dict(\n testcase_name='single_dense_layer_w8_a8',\n modelclass=TestModelWith1Dense,\n input_shapes=[(1, 8)],\n model_kwargs={\n 'num_classes': 2,\n 'hparams': aqt_flax_layers.DenseAqt.HParams(\n weight_prec=8,\n quant_type=QuantType.FAKE_QUANT,\n quant_act=QuantOps.ActHParams(\n input_distribution=QuantOps.ActHParams.InputDistribution.POSITIVE,\n prec=8,\n bounds=1.0,\n half_shift=False,\n ),\n weight_quant_granularity=quant_config.QuantGranularity.PER_CHANNEL,\n weight_half_shift=False\n ),\n },\n expected_compute_cost=8 * 2 * (8 * 8),\n expected_compute_cost_ratio=0.25,\n expected_compute_cost_linear=8 * 2 * (8),\n expected_compute_cost_ratio_linear=0.5,\n expected_memory_cost=8 * 2 * (8),\n expected_memory_cost_ratio=0.5,\n ),\n\n # TestModelWith1Conv\n dict(\n testcase_name='single_conv_layer_bfloat16',\n modelclass=TestModelWith1Conv,\n input_shapes=[(1, 8, 8, 3)],\n model_kwargs={\n 'kernel_size': (3, 3),\n 'num_filters': 16,\n 'strides': (1, 1),\n 'hparams': aqt_flax_layers.ConvAqt.HParams(\n weight_prec=None,\n quant_type=QuantType.FAKE_QUANT,\n quant_act=None,\n weight_half_shift=False,\n ),\n },\n expected_compute_cost=(3 * 3) * (8 * 8) * 3 * 16 * (16 * 16),\n expected_compute_cost_ratio=1.0,\n expected_compute_cost_linear=(3 * 3) * (8 * 8) * 3 * 16 * (16),\n expected_compute_cost_ratio_linear=1.0,\n expected_memory_cost=(3 * 3) * 3 * 16 * (16),\n expected_memory_cost_ratio=1.0,\n ),\n dict(\n testcase_name='single_conv_layer_bfloat16_strided',\n modelclass=TestModelWith1Conv,\n input_shapes=[(1, 8, 8, 3)],\n model_kwargs={\n 'kernel_size': (3, 3),\n 'num_filters': 16,\n 'strides': (4, 2),\n 'hparams': aqt_flax_layers.ConvAqt.HParams(\n weight_prec=None,\n quant_type=QuantType.FAKE_QUANT,\n quant_act=None,\n weight_half_shift=False,\n ),\n },\n expected_compute_cost=(3 * 3) * ((8 / 4) * (8 / 2)) * 3 * 16 * (16 * 16),\n expected_compute_cost_ratio=1.0,\n expected_compute_cost_linear=(3 * 3) * ((8 / 4) * (8 / 2)) * 3 * 16 * (16),\n expected_compute_cost_ratio_linear=1.0,\n expected_memory_cost=(3 * 3) * 3 * 16 * (16),\n expected_memory_cost_ratio=1.0,\n ),\n dict(\n testcase_name='single_conv_layer_bfloat16_3d',\n modelclass=TestModelWith1Conv,\n input_shapes=[(1, 8, 8, 8, 3)],\n model_kwargs={\n 'kernel_size': (3, 3, 3),\n 'num_filters': 16,\n 'strides': (1, 1, 1),\n 'hparams': aqt_flax_layers.ConvAqt.HParams(\n weight_prec=None,\n quant_type=QuantType.FAKE_QUANT,\n quant_act=None,\n weight_half_shift=False,\n ),\n },\n expected_compute_cost=(3 * 3 * 3) * (8 * 8 * 8) * 3 * 16 * (16 * 16),\n expected_compute_cost_ratio=1.0,\n expected_compute_cost_linear=(3 * 3 * 3) * (8 * 8 * 8) * 3 * 16 * (16),\n expected_compute_cost_ratio_linear=1.0,\n expected_memory_cost=(3 * 3 * 3) * 3 * 16 * (16),\n expected_memory_cost_ratio=1.0,\n ),\n dict(\n testcase_name='single_conv_layer_w4_a2',\n modelclass=TestModelWith1Conv,\n input_shapes=[(1, 8, 8, 3)],\n model_kwargs={\n 'kernel_size': (3, 3),\n 'num_filters': 16,\n 'strides': (1, 1),\n 'hparams': aqt_flax_layers.ConvAqt.HParams(\n weight_prec=4,\n quant_type=QuantType.FAKE_QUANT,\n quant_act=QuantOps.ActHParams(\n input_distribution=QuantOps.ActHParams.InputDistribution.POSITIVE,\n prec=2,\n bounds=1.0,\n half_shift=False,\n ),\n weight_half_shift=False,\n ),\n },\n expected_compute_cost=(3 * 3) * (8 * 8) * 3 * 16 * (4 * 2),\n expected_compute_cost_ratio=0.03125,\n expected_compute_cost_linear=(3 * 3) * (8 * 8) * 3 * 16 * (4),\n expected_compute_cost_ratio_linear=0.25,\n expected_memory_cost=(3 * 3) * 3 * 16 * (4),\n expected_memory_cost_ratio=0.25,\n ),\n # TestModelWith1DynamicMatmul\n dict(\n testcase_name='single_dynamic_matmul_layer_bfloat16',\n modelclass=TestModelWith1DynamicMatmul,\n input_shapes=[(1, 8), (8, 1)],\n model_kwargs={'lhs_prec': None,\n 'rhs_prec': None},\n expected_compute_cost=8 * (16 * 16),\n expected_compute_cost_ratio=1.0,\n expected_compute_cost_linear=8 * (16),\n expected_compute_cost_ratio_linear=1.0,\n expected_memory_cost=0,\n expected_memory_cost_ratio=1.0,\n ),\n dict(\n testcase_name='single_dynamic_matmul_layer_l8_r8',\n modelclass=TestModelWith1DynamicMatmul,\n input_shapes=[(1, 8), (8, 1)],\n model_kwargs={'lhs_prec': 8,\n 'rhs_prec': 8},\n expected_compute_cost=8 * (8 * 8),\n expected_compute_cost_ratio=0.25,\n expected_compute_cost_linear=8 * 8,\n expected_compute_cost_ratio_linear=0.5,\n expected_memory_cost=0,\n expected_memory_cost_ratio=1.0,\n ),\n dict(\n testcase_name='single_dynamic_matmul_layer_l8_r4',\n modelclass=TestModelWith1DynamicMatmul,\n input_shapes=[(1, 8), (8, 1)],\n model_kwargs={'lhs_prec': 8,\n 'rhs_prec': 4},\n expected_compute_cost=8 * (8 * 4),\n expected_compute_cost_ratio=0.125,\n expected_compute_cost_linear=8 * (8),\n expected_compute_cost_ratio_linear=0.5,\n expected_memory_cost=0,\n expected_memory_cost_ratio=1.0,\n ),\n ) # pylint: disable=line-too-long\n def test_estimate_simple_model_cost(\n self, modelclass, input_shapes, model_kwargs, expected_compute_cost,\n expected_compute_cost_ratio, expected_compute_cost_linear,\n expected_compute_cost_ratio_linear, expected_memory_cost,\n expected_memory_cost_ratio):\n module = modelclass()\n input_shapes_with_type = [(sh, jnp.float32) for sh in input_shapes]\n dummy_inputs = [\n jnp.ones(input_shape, dtype=dtype)\n for (input_shape, dtype) in input_shapes_with_type\n ]\n init_state = module.init(random.PRNGKey(0), *dummy_inputs, **model_kwargs)\n\n hlo_proto = hlo_utils.load_hlo_proto_from_model(module, init_state,\n input_shapes,\n **model_kwargs)\n compute_result = compute_cost_utils.estimate_compute_cost(hlo_proto)\n memory_result = compute_cost_utils.estimate_memory_cost(hlo_proto)\n logging.info('compute cost result is %s', compute_result)\n logging.info('memory cost result is %s', memory_result)\n self.assertEqual(compute_result['compute_cost'], expected_compute_cost)\n self.assertEqual(memory_result['memory_cost'], expected_memory_cost)\n self.assertEqual(compute_result['compute_cost_ratio_to_bfloat16'],\n expected_compute_cost_ratio)\n self.assertEqual(memory_result['memory_cost_ratio_to_bfloat16'],\n expected_memory_cost_ratio)\n self.assertEqual(compute_result['compute_cost_linear'],\n expected_compute_cost_linear)\n self.assertEqual(compute_result['compute_cost_ratio_to_bfloat16_linear'],\n expected_compute_cost_ratio_linear)\n\n @parameterized.named_parameters(\n # TestModelWith1Dense\n dict(\n testcase_name='single_dense_layer_bfloat16_batch_size',\n modelclass=TestModelWith1Dense,\n input_shape_per_sample=(16,),\n model_kwargs={\n 'num_classes':\n 20,\n 'hparams':\n aqt_flax_layers.DenseAqt.HParams(\n weight_prec=None,\n quant_act=None,\n quant_type=QuantType.FAKE_QUANT,\n weight_quant_granularity=quant_config.QuantGranularity\n .PER_CHANNEL,\n weight_half_shift=False)\n },\n ),\n # TestModelWith1Conv\n dict(\n testcase_name='single_conv_layer_bfloat16_batch_size',\n modelclass=TestModelWith1Conv,\n input_shape_per_sample=(16, 16, 3),\n model_kwargs={\n 'kernel_size': (3, 3),\n 'num_filters':\n 16,\n 'strides': (2, 2),\n 'hparams':\n aqt_flax_layers.ConvAqt.HParams(\n weight_prec=None,\n quant_act=None,\n quant_type=QuantType.FAKE_QUANT,\n weight_half_shift=False,\n )\n },\n ),\n )\n def test_batch_size_has_no_effect_on_cost(self, modelclass,\n input_shape_per_sample,\n model_kwargs):\n expected_compute_cost = None\n expected_memory_cost = None\n batch_size_list = [32, 64, 128, 256, 512, 1024]\n\n module = modelclass()\n\n # Sweep over the batch size list\n for batch_size in batch_size_list:\n input_shape = (batch_size,) + input_shape_per_sample\n init_state = module.init(\n random.PRNGKey(0), jnp.ones(input_shape, jnp.float32), **model_kwargs)\n hlo_proto = hlo_utils.load_hlo_proto_from_model(module, init_state,\n [input_shape],\n **model_kwargs)\n del init_state\n compute_result = compute_cost_utils.estimate_compute_cost(hlo_proto)\n memory_result = compute_cost_utils.estimate_memory_cost(hlo_proto)\n # Save the first cost and compare it with the rest\n if expected_compute_cost is None:\n expected_compute_cost = compute_result['compute_cost']\n else:\n self.assertEqual(compute_result['compute_cost'], expected_compute_cost)\n if expected_memory_cost is None:\n expected_memory_cost = memory_result['memory_cost']\n else:\n self.assertEqual(memory_result['memory_cost'], expected_memory_cost)\n\n @parameterized.named_parameters(\n dict(testcase_name='quant_8bit', weight_prec=8),\n dict(testcase_name='quant_4bit', weight_prec=4),\n )\n def test_check_value_inside_and_outside_of_context_conv_general(\n self, weight_prec):\n original_op_name = 'conv_general_dilated'\n # The 'name' in primitive should change in the context in 'flax_layers'\n # if the context is enabled\n self.assertEqual(original_op_name,\n lax_convolution.conv_general_dilated_p.name)\n\n with compute_cost_utils.ConvMetadataMonkeyPatch(\n weight_prec=weight_prec, act_prec=None):\n self.assertNotEqual(original_op_name,\n lax_convolution.conv_general_dilated_p.name)\n self.assertEqual(original_op_name,\n lax_convolution.conv_general_dilated_p.name)\n\n @parameterized.named_parameters(\n dict(testcase_name='quant_8bit', weight_prec=8, acts_prec=8),\n dict(testcase_name='quant_4bit', weight_prec=4, acts_prec=4),\n )\n def test_annotation_only_changes_hlo_metadata_conv(self, weight_prec,\n acts_prec):\n FLAGS.metadata_enabled = False\n quant_act = quantization.QuantOps.ActHParams(\n input_distribution=QuantOps.ActHParams.InputDistribution.SYMMETRIC,\n prec=acts_prec,\n bounds=1.0,\n half_shift=False)\n input_shape = (1, 8, 8, 3)\n module_no_annotation = aqt_flax_layers.ConvAqt(\n features=4,\n kernel_size=(3, 3),\n padding='VALID',\n paxis_name='batch',\n quant_context=quant_config.QuantContext(update_bounds=False),\n train=False,\n hparams=aqt_flax_layers.ConvAqt.HParams(\n weight_prec=weight_prec,\n quant_act=quant_act,\n quant_type=QuantType.FAKE_QUANT,\n weight_half_shift=False),\n kernel_init=initializers.ones,\n bias_init=initializers.ones,\n dtype=jnp.float32)\n\n init_state = module_no_annotation.init(self.rng_key,\n jnp.ones(input_shape, jnp.float32))\n output_no_annotation = module_no_annotation.apply(init_state,\n jnp.ones(input_shape))\n\n hlo_no_annotation = hlo_utils.load_hlo_proto_from_model(\n module_no_annotation, init_state, [input_shape])\n del init_state\n\n FLAGS.metadata_enabled = True\n module_w_annotation = aqt_flax_layers.ConvAqt(\n features=4,\n kernel_size=(3, 3),\n padding='VALID',\n paxis_name='batch',\n quant_context=quant_config.QuantContext(update_bounds=False),\n train=False,\n hparams=aqt_flax_layers.ConvAqt.HParams(\n weight_prec=weight_prec,\n quant_act=quant_act,\n quant_type=QuantType.FAKE_QUANT,\n weight_half_shift=False),\n kernel_init=initializers.ones,\n bias_init=initializers.ones,\n dtype=jnp.float32)\n\n init_state = module_w_annotation.init(self.rng_key,\n jnp.ones(input_shape, jnp.float32))\n output_w_annotation = module_w_annotation.apply(init_state,\n jnp.ones(input_shape))\n\n hlo_w_annotation = hlo_utils.load_hlo_proto_from_model(\n module_w_annotation, init_state, [input_shape])\n del init_state\n\n onp.testing.assert_array_equal(output_no_annotation, output_w_annotation)\n self.compare_hlo_instructions(hlo_no_annotation, hlo_w_annotation)\n\n @parameterized.named_parameters(\n dict(testcase_name='quant_8bit', weight_prec=8),\n dict(testcase_name='quant_4bit', weight_prec=4),\n )\n def test_check_value_inside_and_outside_of_context_dot_general(\n self, weight_prec):\n original_op_name = 'dot_general'\n # The 'name' in primitive should change in the context in 'flax_layers'\n # if the context is enabled.\n self.assertEqual(original_op_name, lax.dot_general_p.name)\n\n with compute_cost_utils.DotMetadataMonkeyPatch(\n lhs_prec=None, rhs_prec=weight_prec, rhs_is_weight=True):\n self.assertNotEqual(original_op_name, lax.dot_general_p.name)\n self.assertEqual(original_op_name, lax.dot_general_p.name)\n\n @parameterized.named_parameters(\n dict(\n testcase_name='quant_8bit',\n weight_prec=8,\n acts_prec=8,\n ),)\n def test_annotation_only_changes_hlo_metadata_dense(self, weight_prec,\n acts_prec):\n FLAGS.metadata_enabled = False\n quant_act = quantization.QuantOps.ActHParams(\n input_distribution=QuantOps.ActHParams.InputDistribution.SYMMETRIC,\n prec=acts_prec,\n bounds=1.0,\n half_shift=False)\n input_shape = (1, 16)\n module_no_annotation = aqt_flax_layers.DenseAqt(\n features=4,\n use_bias=False,\n quant_context=quant_config.QuantContext(\n update_bounds=False, collect_acts_stats=False),\n paxis_name='batch',\n train=False,\n hparams=aqt_flax_layers.DenseAqt.HParams(\n weight_prec=weight_prec,\n quant_act=quant_act,\n quant_type=QuantType.FAKE_QUANT,\n weight_quant_granularity=quant_config.QuantGranularity.PER_CHANNEL,\n weight_half_shift=False),\n dtype=jnp.float32)\n\n init_state = module_no_annotation.init(\n self.rng_key, jnp.ones(input_shape, jnp.float32), padding_mask=None)\n output_no_annotation = module_no_annotation.apply(\n init_state, jnp.ones(input_shape), padding_mask=None)\n\n hlo_no_annotation = hlo_utils.load_hlo_proto_from_model(\n module_no_annotation, init_state, [input_shape], padding_mask=None)\n del init_state\n\n FLAGS.metadata_enabled = True\n module_w_annotation = aqt_flax_layers.DenseAqt(\n features=4,\n use_bias=False,\n paxis_name='batch',\n train=False,\n quant_context=quant_config.QuantContext(\n update_bounds=False, collect_acts_stats=False),\n dtype=jnp.float32,\n hparams=aqt_flax_layers.DenseAqt.HParams(\n weight_prec=weight_prec,\n quant_act=quant_act,\n quant_type=QuantType.FAKE_QUANT,\n weight_quant_granularity=quant_config.QuantGranularity.PER_CHANNEL,\n weight_half_shift=False),\n )\n\n init_state = module_w_annotation.init(\n self.rng_key, jnp.ones(input_shape, jnp.float32), padding_mask=None)\n output_w_annotation = module_w_annotation.apply(\n init_state, jnp.ones(input_shape), padding_mask=None)\n\n hlo_w_annotation = hlo_utils.load_hlo_proto_from_model(\n module_w_annotation, init_state, [input_shape], padding_mask=None)\n del init_state\n\n onp.testing.assert_array_equal(output_no_annotation, output_w_annotation)\n self.compare_hlo_instructions(hlo_no_annotation, hlo_w_annotation)\n\n\nif __name__ == '__main__':\n FLAGS.metadata_enabled = True # Passes quantization information to HLO\n absltest.main()\n", "# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for wmt_mlperf.models.\"\"\"\n\nfrom unittest import mock\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom aqt.jax_legacy.jax import get_bounds\nfrom aqt.jax_legacy.jax import hlo_utils\nfrom aqt.jax_legacy.jax import primitives\nfrom aqt.jax_legacy.jax import quant_config\nfrom aqt.jax_legacy.jax import test_utils\nfrom aqt.jax_legacy.jax.quantization import QuantType\nfrom aqt.jax_legacy.jax.wmt_mlperf import models\nfrom aqt.jax_legacy.jax.wmt_mlperf import training_hparams_generator_lib\nimport flax\nimport jax\nimport jax.numpy as jnp\nimport numpy as onp\n\n\nclass ModelsTest(parameterized.TestCase):\n\n def setUp(self):\n super(ModelsTest, self).setUp()\n self.input_shape = (1, 1)\n self.target_shape = (1, 1)\n self.inputs = jnp.ones(self.input_shape, dtype=jnp.float32)\n self.target = jnp.ones(self.target_shape, dtype=jnp.float32)\n self.key = jax.random.PRNGKey(0)\n self.transformer_small_kwargs = {\n 'vocab_size': 1,\n 'output_vocab_size': 1,\n 'max_len': 1,\n 'train': False,\n }\n self.transformer_full_kwargs = {\n 'vocab_size': 4,\n 'output_vocab_size': 4,\n 'max_len': 2,\n 'train': False,\n }\n\n def init_model(self, transformer_kwargs):\n model = models.Transformer(\n use_bfloat16=False,\n quant_context=quant_config.QuantContext(\n collect_acts_stats=False, update_bounds=False),\n dropout_rate=.1,\n attention_dropout_rate=.1,\n should_decode=False,\n **transformer_kwargs)\n state = model.init(self.key, jnp.zeros(self.input_shape, jnp.float32),\n jnp.zeros(self.target_shape, jnp.float32))\n return model, state\n\n @parameterized.named_parameters(\n dict(testcase_name='test_mlp_weight_quant_8bit', mlp_weight_prec=8),\n dict(testcase_name='test_mlp_weight_quant_4bit', mlp_weight_prec=4),\n dict(testcase_name='test_mlp_weight_quant_1bit', mlp_weight_prec=1),\n )\n @mock.patch.object(primitives, 'round_with_gradient')\n @mock.patch.object(primitives, 'floor_with_gradient')\n def test_mlp_weight_quant(self, floor_with_gradient, round_with_gradient,\n mlp_weight_prec):\n hparams = training_hparams_generator_lib.create_base_transformer_hparams(\n mlp_weight_prec=mlp_weight_prec,\n embedding_weight_prec=None,\n attention_weight_prec=None,\n mlp_pos_inputs_prec=None,\n mlp_pos_inputs_hyper=None,\n mlp_signed_inputs_prec=None,\n mlp_signed_inputs_hyper=None,\n attention_kqv_inputs_prec=None,\n attention_kqv_inputs_hyper=None,\n attention_out_inputs_prec=None,\n attention_out_inputs_hyper=None,\n logits_inputs_prec=None,\n logits_inputs_hyper=None,\n logits_via_embeddings=True,\n attention_act_q_inputs_prec=None,\n attention_act_q_inputs_hyper=None,\n attention_act_k_inputs_prec=None,\n attention_act_k_inputs_hyper=None,\n attention_act_probs_inputs_prec=None,\n attention_act_v_inputs_prec=None,\n attention_act_v_inputs_hyper=None,\n num_layers=1,\n emb_dim=1,\n num_heads=1,\n qkv_dim=1,\n mlp_dim=1,\n quant_type=QuantType.FAKE_QUANT)\n transformer_kwargs = self.transformer_small_kwargs\n transformer_kwargs['hparams'] = hparams\n round_with_gradient.side_effect = lambda x: x\n floor_with_gradient.side_effect = lambda x: x\n\n model, init_state = self.init_model(transformer_kwargs)\n # there are 2 MLP blocks in this model, with 2 quant ops each, so both clip\n # and round should be called 4 times each.\n round_with_gradient.assert_called_with(mock.ANY)\n self.assertEqual(round_with_gradient.call_count, 4)\n floor_with_gradient.assert_not_called()\n\n round_with_gradient.reset_mock()\n floor_with_gradient.reset_mock()\n\n output = model.apply(init_state, self.inputs, self.target)\n self.assertEqual(output.shape, (1, 1))\n round_with_gradient.assert_called_with(mock.ANY)\n self.assertEqual(round_with_gradient.call_count, 4)\n floor_with_gradient.assert_not_called()\n\n @mock.patch.object(primitives, 'round_with_gradient')\n @mock.patch.object(primitives, 'floor_with_gradient')\n def test_without_mlp_weight_quant(self, floor_with_gradient,\n round_with_gradient):\n hparams = training_hparams_generator_lib.create_base_transformer_hparams(\n mlp_weight_prec=None,\n embedding_weight_prec=None,\n attention_weight_prec=None,\n mlp_pos_inputs_prec=None,\n mlp_pos_inputs_hyper=None,\n mlp_signed_inputs_prec=None,\n mlp_signed_inputs_hyper=None,\n attention_kqv_inputs_prec=None,\n attention_kqv_inputs_hyper=None,\n attention_out_inputs_prec=None,\n attention_out_inputs_hyper=None,\n logits_inputs_prec=None,\n logits_inputs_hyper=None,\n logits_via_embeddings=True,\n attention_act_q_inputs_prec=None,\n attention_act_q_inputs_hyper=None,\n attention_act_k_inputs_prec=None,\n attention_act_k_inputs_hyper=None,\n attention_act_probs_inputs_prec=None,\n attention_act_v_inputs_prec=None,\n attention_act_v_inputs_hyper=None,\n num_layers=1,\n emb_dim=1,\n num_heads=1,\n qkv_dim=1,\n mlp_dim=1,\n quant_type=QuantType.FAKE_QUANT)\n transformer_kwargs = self.transformer_small_kwargs\n transformer_kwargs['hparams'] = hparams\n round_with_gradient.side_effect = lambda x: x\n floor_with_gradient.side_effect = lambda x: x\n\n model, init_state = self.init_model(transformer_kwargs)\n model.apply(init_state, self.inputs, self.target)\n round_with_gradient.assert_not_called()\n floor_with_gradient.assert_not_called()\n\n def _num_mlp_floors(self, weight_quant, pos_input_quant, neg_input_quant):\n # There are 2 MLP blocks per layer (1 encoder, 1 decoder) and 2 weight quant\n # ops per MLP block, so 4 in total per layer.\n mlp_floors_per_layer = 4 if weight_quant else 0\n\n # There are 2 MLP blocks per layer (1 encoder, 1 decoder) and 1 input quant\n # op per unsigned MLP block, so 2 in total per layer.\n if pos_input_quant:\n mlp_floors_per_layer = mlp_floors_per_layer + 2\n\n # There are 2 MLP blocks per layer (1 encoder, 1 decoder) and 1 input quant\n # op per signed MLP block, so 2 in total per layer.\n if neg_input_quant:\n mlp_floors_per_layer = mlp_floors_per_layer + 2\n\n return mlp_floors_per_layer\n\n def _num_embedding_floors(self, weight_quant, act_quant):\n # 3 embedding layers in the whole model\n embedding_floors = 3 if weight_quant else 0\n\n # logits activation quantization\n if act_quant:\n embedding_floors = embedding_floors + 1\n\n return embedding_floors\n\n def _num_attention_floors(self, weight_quant, kqv_input_quant,\n out_input_quant, act_q_input_quant,\n act_k_input_quant, act_qk_input_quant,\n act_v_input_quant):\n # 3 attention blocks per layer (1 on encoder, 2 on decoder), each\n # attention block has 4 weight quant ops, so 12 in total per layer.\n attention_floors_per_layer = 12 if weight_quant else 0\n\n # 3 attention blocks per layer (1 on encoder, 2 on decoder), each\n # attention block has 3 kqv activation quant ops, so 9 in total per layer.\n if kqv_input_quant:\n attention_floors_per_layer = attention_floors_per_layer + 9\n\n # 3 attention blocks per layer (1 on encoder, 2 on decoder), each attention\n # block has 1 dense out activation quant op, so 3 in total per layer.\n if out_input_quant:\n attention_floors_per_layer = attention_floors_per_layer + 3\n\n # 3 attention blocks per layer (1 on encoder, 2 on decoder), each attention\n # block has 1 act*act activation quant op for each act per layer.\n for act_quant in [\n act_q_input_quant, act_k_input_quant, act_qk_input_quant,\n act_v_input_quant\n ]:\n if act_quant:\n attention_floors_per_layer = attention_floors_per_layer + 3\n\n return attention_floors_per_layer\n\n @parameterized.named_parameters(\n dict(\n testcase_name='test_2layers_no_quant',\n num_layers=2,\n mlp_weight_prec=None,\n mlp_pos_inputs_prec=None,\n mlp_signed_inputs_prec=None,\n attention_kqv_inputs_prec=None,\n attention_out_inputs_prec=None,\n embedding_weight_prec=None,\n attention_weight_prec=None,\n logits_inputs_prec=None,\n attention_act_q_inputs_prec=None,\n attention_act_k_inputs_prec=None,\n attention_act_probs_inputs_prec=None,\n attention_act_v_inputs_prec=None,\n ),\n dict(\n testcase_name='test_3layers_full_quant',\n num_layers=3,\n mlp_weight_prec=4,\n mlp_pos_inputs_prec=8,\n mlp_signed_inputs_prec=8,\n attention_kqv_inputs_prec=2,\n attention_out_inputs_prec=4,\n embedding_weight_prec=4,\n attention_weight_prec=4,\n logits_inputs_prec=8,\n attention_act_q_inputs_prec=4,\n attention_act_k_inputs_prec=8,\n attention_act_probs_inputs_prec=8,\n attention_act_v_inputs_prec=4,\n ),\n )\n def test_number_of_floor_ops(\n self, num_layers, mlp_weight_prec, mlp_pos_inputs_prec,\n mlp_signed_inputs_prec, attention_kqv_inputs_prec,\n attention_out_inputs_prec, embedding_weight_prec, attention_weight_prec,\n logits_inputs_prec, attention_act_q_inputs_prec,\n attention_act_k_inputs_prec, attention_act_probs_inputs_prec,\n attention_act_v_inputs_prec):\n # Counts number of floor ops as a proxy for quantization ops.\n act_fixed_clip_bound = 3.0\n hparams = training_hparams_generator_lib.create_base_transformer_hparams(\n mlp_weight_prec=mlp_weight_prec,\n embedding_weight_prec=embedding_weight_prec,\n attention_weight_prec=attention_weight_prec,\n mlp_pos_inputs_prec=mlp_pos_inputs_prec,\n mlp_pos_inputs_hyper=act_fixed_clip_bound,\n mlp_signed_inputs_prec=mlp_signed_inputs_prec,\n mlp_signed_inputs_hyper=act_fixed_clip_bound,\n attention_kqv_inputs_prec=attention_kqv_inputs_prec,\n attention_kqv_inputs_hyper=act_fixed_clip_bound,\n attention_out_inputs_prec=attention_out_inputs_prec,\n attention_out_inputs_hyper=act_fixed_clip_bound,\n logits_inputs_prec=logits_inputs_prec,\n logits_inputs_hyper=act_fixed_clip_bound,\n logits_via_embeddings=True,\n attention_act_q_inputs_prec=attention_act_q_inputs_prec,\n attention_act_q_inputs_hyper=act_fixed_clip_bound,\n attention_act_k_inputs_prec=attention_act_k_inputs_prec,\n attention_act_k_inputs_hyper=act_fixed_clip_bound,\n attention_act_probs_inputs_prec=attention_act_probs_inputs_prec,\n attention_act_v_inputs_prec=attention_act_v_inputs_prec,\n attention_act_v_inputs_hyper=act_fixed_clip_bound,\n num_layers=num_layers,\n emb_dim=5,\n num_heads=8,\n qkv_dim=8,\n mlp_dim=7,\n quant_type=QuantType.FAKE_QUANT)\n\n transformer_kwargs = self.transformer_full_kwargs\n transformer_kwargs['hparams'] = hparams\n input_shape = (2, 4)\n target_shape = input_shape\n model, init_state = self.init_model(transformer_kwargs)\n hlo_proto = hlo_utils.load_hlo_proto_from_model(model, init_state,\n [input_shape, target_shape])\n floor_count = hlo_utils.count_ops_in_hlo_proto(hlo_proto, r'floor')\n\n mlp_floors_per_layer = self._num_mlp_floors(\n (mlp_weight_prec is not None), (mlp_pos_inputs_prec is not None),\n (mlp_signed_inputs_prec is not None))\n\n attention_floors_per_layer = self._num_attention_floors(\n (attention_weight_prec is not None),\n (attention_kqv_inputs_prec is not None),\n (attention_out_inputs_prec is not None),\n (attention_act_q_inputs_prec is not None),\n (attention_act_k_inputs_prec is not None),\n (attention_act_probs_inputs_prec is not None),\n (attention_act_v_inputs_prec is not None))\n\n embedding_floors = self._num_embedding_floors(\n (embedding_weight_prec is not None), (logits_inputs_prec is not None))\n\n expected_floor_count = num_layers * (\n mlp_floors_per_layer + attention_floors_per_layer) + embedding_floors\n self.assertEqual(floor_count, expected_floor_count)\n\n @parameterized.named_parameters(\n dict(\n testcase_name='test_2layers_att8bit_weight_quant',\n num_layers=2,\n attention_kqv_inputs_prec=None,\n attention_out_inputs_prec=None,\n attention_weight_prec=8,\n attention_act_q_inputs_prec=None,\n attention_act_k_inputs_prec=None,\n attention_act_probs_inputs_prec=None,\n attention_act_v_inputs_prec=None,\n inputs_hyper_is_float=True,\n ),\n dict(\n testcase_name='test_3layers_att8bit_kqv_inputs_quant',\n num_layers=3,\n attention_kqv_inputs_prec=8,\n attention_out_inputs_prec=None,\n attention_weight_prec=None,\n attention_act_q_inputs_prec=None,\n attention_act_k_inputs_prec=None,\n attention_act_probs_inputs_prec=None,\n attention_act_v_inputs_prec=None,\n inputs_hyper_is_float=True,\n ),\n dict(\n testcase_name='test_3layers_att8bit_act_q_inputs_quant',\n num_layers=3,\n attention_kqv_inputs_prec=None,\n attention_out_inputs_prec=None,\n attention_weight_prec=None,\n attention_act_q_inputs_prec=8,\n attention_act_k_inputs_prec=None,\n attention_act_probs_inputs_prec=None,\n attention_act_v_inputs_prec=None,\n inputs_hyper_is_float=True,\n ),\n dict(\n testcase_name='test_2layers_att8bit_act_k_inputs_quant',\n num_layers=2,\n attention_kqv_inputs_prec=None,\n attention_out_inputs_prec=None,\n attention_weight_prec=None,\n attention_act_q_inputs_prec=None,\n attention_act_k_inputs_prec=8,\n attention_act_probs_inputs_prec=None,\n attention_act_v_inputs_prec=None,\n inputs_hyper_is_float=True,\n ),\n dict(\n testcase_name='test_3layers_att8bit_act_qk_inputs_quant',\n num_layers=3,\n attention_kqv_inputs_prec=None,\n attention_out_inputs_prec=None,\n attention_weight_prec=None,\n attention_act_q_inputs_prec=None,\n attention_act_k_inputs_prec=None,\n attention_act_probs_inputs_prec=4,\n attention_act_v_inputs_prec=None,\n inputs_hyper_is_float=True,\n ),\n dict(\n testcase_name='test_2layers_att8bit_act_v_inputs_quant',\n num_layers=2,\n attention_kqv_inputs_prec=None,\n attention_out_inputs_prec=None,\n attention_weight_prec=None,\n attention_act_q_inputs_prec=None,\n attention_act_k_inputs_prec=None,\n attention_act_probs_inputs_prec=None,\n attention_act_v_inputs_prec=8,\n inputs_hyper_is_float=True,\n ),\n dict(\n testcase_name='test_3layers_att8bit_kqv_inputs_auto_quant',\n num_layers=3,\n attention_kqv_inputs_prec=8,\n attention_out_inputs_prec=None,\n attention_weight_prec=None,\n attention_act_q_inputs_prec=None,\n attention_act_k_inputs_prec=None,\n attention_act_probs_inputs_prec=None,\n attention_act_v_inputs_prec=None,\n inputs_hyper_is_float=False,\n ),\n dict(\n testcase_name='test_2layers_att8bit_out_inputs_quant',\n num_layers=2,\n attention_kqv_inputs_prec=None,\n attention_out_inputs_prec=8,\n attention_weight_prec=None,\n attention_act_q_inputs_prec=None,\n attention_act_k_inputs_prec=None,\n attention_act_probs_inputs_prec=None,\n attention_act_v_inputs_prec=None,\n inputs_hyper_is_float=True,\n ),\n dict(\n testcase_name='test_2layers_att8bit_out_inputs_auto_quant',\n num_layers=2,\n attention_kqv_inputs_prec=None,\n attention_out_inputs_prec=8,\n attention_weight_prec=None,\n attention_act_q_inputs_prec=None,\n attention_act_k_inputs_prec=None,\n attention_act_probs_inputs_prec=None,\n attention_act_v_inputs_prec=None,\n inputs_hyper_is_float=False,\n ),\n dict(\n testcase_name='test_2layers_att_weight_kqv_out_quant',\n num_layers=2,\n attention_kqv_inputs_prec=8,\n attention_out_inputs_prec=4,\n attention_weight_prec=2,\n attention_act_q_inputs_prec=None,\n attention_act_k_inputs_prec=None,\n attention_act_probs_inputs_prec=None,\n attention_act_v_inputs_prec=None,\n inputs_hyper_is_float=True,\n ),\n dict(\n testcase_name='test_2layers_att_weight_kqv_out_act_quant',\n num_layers=2,\n attention_kqv_inputs_prec=8,\n attention_out_inputs_prec=4,\n attention_weight_prec=2,\n attention_act_q_inputs_prec=4,\n attention_act_k_inputs_prec=8,\n attention_act_probs_inputs_prec=4,\n attention_act_v_inputs_prec=2,\n inputs_hyper_is_float=True,\n ),\n dict(\n testcase_name='test_2layers_att_weight_kqv_out_auto_quant',\n num_layers=2,\n attention_kqv_inputs_prec=8,\n attention_out_inputs_prec=4,\n attention_weight_prec=2,\n attention_act_q_inputs_prec=None,\n attention_act_k_inputs_prec=None,\n attention_act_probs_inputs_prec=None,\n attention_act_v_inputs_prec=None,\n inputs_hyper_is_float=False,\n ),\n dict(\n testcase_name='test_3layers_att_weight_kqv_out_act_auto_quant',\n num_layers=3,\n attention_kqv_inputs_prec=8,\n attention_out_inputs_prec=4,\n attention_weight_prec=2,\n attention_act_q_inputs_prec=8,\n attention_act_k_inputs_prec=4,\n attention_act_probs_inputs_prec=4,\n attention_act_v_inputs_prec=2,\n inputs_hyper_is_float=False,\n ),\n )\n def test_number_of_floor_ops_attention(\n self,\n num_layers,\n attention_kqv_inputs_prec,\n attention_out_inputs_prec,\n attention_weight_prec,\n attention_act_q_inputs_prec,\n attention_act_k_inputs_prec,\n attention_act_probs_inputs_prec,\n attention_act_v_inputs_prec,\n inputs_hyper_is_float,\n ):\n # Counts number of floor ops as a proxy for quantization ops.\n if inputs_hyper_is_float:\n inputs_hyper = 6.0\n else:\n inputs_hyper = get_bounds.GetBounds.Hyper(\n initial_bound=6.0,\n stddev_coeff=3.0,\n absdev_coeff=2.0,\n mix_coeff=0.5,\n granularity=quant_config.QuantGranularity.PER_TENSOR)\n hparams = training_hparams_generator_lib.create_base_transformer_hparams(\n mlp_weight_prec=None,\n embedding_weight_prec=None,\n attention_weight_prec=attention_weight_prec,\n mlp_pos_inputs_prec=None,\n mlp_pos_inputs_hyper=None,\n mlp_signed_inputs_prec=None,\n mlp_signed_inputs_hyper=None,\n attention_kqv_inputs_prec=attention_kqv_inputs_prec,\n attention_kqv_inputs_hyper=inputs_hyper,\n attention_out_inputs_prec=attention_out_inputs_prec,\n attention_out_inputs_hyper=inputs_hyper,\n logits_inputs_prec=None,\n logits_inputs_hyper=None,\n logits_via_embeddings=True,\n attention_act_q_inputs_prec=attention_act_q_inputs_prec,\n attention_act_q_inputs_hyper=inputs_hyper,\n attention_act_k_inputs_prec=attention_act_k_inputs_prec,\n attention_act_k_inputs_hyper=inputs_hyper,\n attention_act_probs_inputs_prec=attention_act_probs_inputs_prec,\n attention_act_v_inputs_prec=attention_act_v_inputs_prec,\n attention_act_v_inputs_hyper=inputs_hyper,\n num_layers=num_layers,\n emb_dim=5,\n num_heads=8,\n qkv_dim=8,\n mlp_dim=7,\n quant_type=QuantType.FAKE_QUANT)\n\n transformer_kwargs = self.transformer_full_kwargs\n transformer_kwargs['hparams'] = hparams\n input_shape = (2, 4)\n target_shape = input_shape\n model, init_state = self.init_model(transformer_kwargs)\n hlo_proto = hlo_utils.load_hlo_proto_from_model(model, init_state,\n [input_shape, target_shape])\n floor_count = hlo_utils.count_ops_in_hlo_proto(hlo_proto, r'floor')\n\n attention_floors_per_layer = self._num_attention_floors(\n (attention_weight_prec is not None),\n (attention_kqv_inputs_prec is not None),\n (attention_out_inputs_prec is not None),\n (attention_act_q_inputs_prec is not None),\n (attention_act_k_inputs_prec is not None),\n (attention_act_probs_inputs_prec is not None),\n (attention_act_v_inputs_prec is not None))\n\n expected_floor_count = num_layers * attention_floors_per_layer\n self.assertEqual(floor_count, expected_floor_count)\n\n @parameterized.named_parameters(\n dict(\n testcase_name='test_3layers_mlp8bit_weight_quant',\n num_layers=3,\n mlp_weight_prec=8,\n mlp_pos_inputs_prec=None,\n mlp_pos_inputs_hyper_is_float=True,\n mlp_signed_inputs_prec=None,\n mlp_signed_inputs_hyper_is_float=True,\n ),\n dict(\n testcase_name='test_2layers_mlp8bit_pos_inputs_quant',\n num_layers=2,\n mlp_weight_prec=None,\n mlp_pos_inputs_prec=8,\n mlp_pos_inputs_hyper_is_float=True,\n mlp_signed_inputs_prec=None,\n mlp_signed_inputs_hyper_is_float=True,\n ),\n dict(\n testcase_name='test_2layers_mlp8bit_pos_inputs_auto_quant',\n num_layers=2,\n mlp_weight_prec=None,\n mlp_pos_inputs_prec=8,\n mlp_pos_inputs_hyper_is_float=False,\n mlp_signed_inputs_prec=None,\n mlp_signed_inputs_hyper_is_float=True,\n ),\n dict(\n testcase_name='test_2layers_mlp8bit_pos_inputs_weights_quant',\n num_layers=2,\n mlp_weight_prec=8,\n mlp_pos_inputs_prec=8,\n mlp_pos_inputs_hyper_is_float=True,\n mlp_signed_inputs_prec=None,\n mlp_signed_inputs_hyper_is_float=True,\n ),\n dict(\n testcase_name='test_2layers_mlp8bit_neg_inputs_quant',\n num_layers=2,\n mlp_weight_prec=None,\n mlp_pos_inputs_prec=None,\n mlp_pos_inputs_hyper_is_float=True,\n mlp_signed_inputs_prec=8,\n mlp_signed_inputs_hyper_is_float=True,\n ),\n dict(\n testcase_name='test_2layers_mlp8bit_neg_inputs_auto_quant',\n num_layers=2,\n mlp_weight_prec=None,\n mlp_pos_inputs_prec=None,\n mlp_pos_inputs_hyper_is_float=True,\n mlp_signed_inputs_prec=8,\n mlp_signed_inputs_hyper_is_float=False,\n ),\n dict(\n testcase_name='test_2layers_mlp8bit_all_inputs_weights_quant',\n num_layers=2,\n mlp_weight_prec=8,\n mlp_pos_inputs_prec=8,\n mlp_pos_inputs_hyper_is_float=True,\n mlp_signed_inputs_prec=8,\n mlp_signed_inputs_hyper_is_float=True,\n ),\n dict(\n testcase_name='test_2layers_mlp8bit_all_inputs_weights_auto_quant',\n num_layers=2,\n mlp_weight_prec=8,\n mlp_pos_inputs_prec=8,\n mlp_pos_inputs_hyper_is_float=False,\n mlp_signed_inputs_prec=8,\n mlp_signed_inputs_hyper_is_float=False,\n ),\n )\n def test_number_of_floor_ops_mlp(self, num_layers, mlp_weight_prec,\n mlp_pos_inputs_prec,\n mlp_pos_inputs_hyper_is_float,\n mlp_signed_inputs_prec,\n mlp_signed_inputs_hyper_is_float):\n # Counts number of floor ops as a proxy for quantization ops.\n if mlp_pos_inputs_hyper_is_float:\n mlp_pos_inputs_hyper = 6.0\n else:\n mlp_pos_inputs_hyper = get_bounds.GetBounds.Hyper(\n initial_bound=6.0,\n stddev_coeff=3.0,\n absdev_coeff=2.0,\n mix_coeff=0.5,\n granularity=quant_config.QuantGranularity.PER_TENSOR)\n if mlp_signed_inputs_hyper_is_float:\n mlp_pos_inputs_hyper = 6.0\n else:\n mlp_pos_inputs_hyper = get_bounds.GetBounds.Hyper(\n initial_bound=6.0,\n stddev_coeff=3.0,\n absdev_coeff=2.0,\n mix_coeff=0.5,\n granularity=quant_config.QuantGranularity.PER_TENSOR)\n hparams = training_hparams_generator_lib.create_base_transformer_hparams(\n mlp_weight_prec=mlp_weight_prec,\n embedding_weight_prec=None,\n attention_weight_prec=None,\n mlp_pos_inputs_prec=mlp_pos_inputs_prec,\n mlp_pos_inputs_hyper=mlp_pos_inputs_hyper,\n mlp_signed_inputs_prec=mlp_signed_inputs_prec,\n mlp_signed_inputs_hyper=mlp_pos_inputs_hyper,\n attention_kqv_inputs_prec=None,\n attention_kqv_inputs_hyper=None,\n attention_out_inputs_prec=None,\n attention_out_inputs_hyper=None,\n logits_inputs_prec=None,\n logits_inputs_hyper=None,\n logits_via_embeddings=True,\n attention_act_q_inputs_prec=None,\n attention_act_q_inputs_hyper=None,\n attention_act_k_inputs_prec=None,\n attention_act_k_inputs_hyper=None,\n attention_act_probs_inputs_prec=None,\n attention_act_v_inputs_prec=None,\n attention_act_v_inputs_hyper=None,\n num_layers=num_layers,\n emb_dim=5,\n num_heads=8,\n qkv_dim=8,\n mlp_dim=7,\n quant_type=QuantType.FAKE_QUANT)\n\n transformer_kwargs = self.transformer_full_kwargs\n transformer_kwargs['hparams'] = hparams\n input_shape = (2, 4)\n target_shape = input_shape\n model, init_state = self.init_model(transformer_kwargs)\n hlo_proto = hlo_utils.load_hlo_proto_from_model(model, init_state,\n [input_shape, target_shape])\n floor_count = hlo_utils.count_ops_in_hlo_proto(hlo_proto, r'floor')\n\n mlp_floors_per_layer = self._num_mlp_floors(\n (mlp_weight_prec is not None), (mlp_pos_inputs_prec is not None),\n (mlp_signed_inputs_prec is not None))\n\n expected_floor_count = num_layers * mlp_floors_per_layer\n self.assertEqual(floor_count, expected_floor_count)\n\n @parameterized.named_parameters(\n dict(\n testcase_name='test_3layers_embedding8bit_weight_quant',\n num_layers=3,\n embedding_weight_prec=8,\n logits_inputs_prec=None,\n logits_inputs_hyper_is_float=True,\n logits_via_embeddings=True,\n ),\n dict(\n testcase_name='test_2layers_embedding8bit_inputs_auto_quant',\n num_layers=2,\n embedding_weight_prec=None,\n logits_inputs_prec=8,\n logits_inputs_hyper_is_float=False,\n logits_via_embeddings=True,\n ),\n dict(\n testcase_name='test_2layers_embedding8bit_inputs_weights_quant_fixed',\n num_layers=2,\n embedding_weight_prec=8,\n logits_inputs_prec=8,\n logits_inputs_hyper_is_float=True,\n logits_via_embeddings=True,\n ),\n dict(\n testcase_name='test_2layers_embedding8bit_inputs_weights_auto_quant',\n num_layers=2,\n embedding_weight_prec=8,\n logits_inputs_prec=8,\n logits_inputs_hyper_is_float=False,\n logits_via_embeddings=True,\n ),\n dict(\n testcase_name='test_2layers_embedding8bit_without_logit_sharing',\n num_layers=2,\n embedding_weight_prec=8,\n logits_inputs_prec=8,\n logits_inputs_hyper_is_float=False,\n logits_via_embeddings=False,\n ),\n )\n def test_number_of_floor_ops_embedding(self, num_layers,\n embedding_weight_prec,\n logits_inputs_prec,\n logits_inputs_hyper_is_float,\n logits_via_embeddings):\n # Counts number of floor ops as a proxy for quantization ops.\n if logits_inputs_hyper_is_float:\n logits_inputs_hyper = 6.0\n else:\n logits_inputs_hyper = get_bounds.GetBounds.Hyper(\n initial_bound=6.0,\n stddev_coeff=3.0,\n absdev_coeff=2.0,\n mix_coeff=0.5,\n granularity=quant_config.QuantGranularity.PER_TENSOR)\n\n hparams = training_hparams_generator_lib.create_base_transformer_hparams(\n mlp_weight_prec=None,\n embedding_weight_prec=embedding_weight_prec,\n attention_weight_prec=None,\n mlp_pos_inputs_prec=None,\n mlp_pos_inputs_hyper=None,\n mlp_signed_inputs_prec=None,\n mlp_signed_inputs_hyper=None,\n attention_kqv_inputs_prec=None,\n attention_kqv_inputs_hyper=None,\n attention_out_inputs_prec=None,\n attention_out_inputs_hyper=None,\n logits_inputs_prec=logits_inputs_prec,\n logits_inputs_hyper=logits_inputs_hyper,\n logits_via_embeddings=logits_via_embeddings,\n attention_act_q_inputs_prec=None,\n attention_act_q_inputs_hyper=None,\n attention_act_k_inputs_prec=None,\n attention_act_k_inputs_hyper=None,\n attention_act_probs_inputs_prec=None,\n attention_act_v_inputs_prec=None,\n attention_act_v_inputs_hyper=None,\n num_layers=num_layers,\n emb_dim=5,\n num_heads=8,\n qkv_dim=8,\n mlp_dim=7,\n quant_type=QuantType.FAKE_QUANT)\n\n transformer_kwargs = self.transformer_full_kwargs\n transformer_kwargs['hparams'] = hparams\n input_shape = (2, 4)\n target_shape = input_shape\n model, init_state = self.init_model(transformer_kwargs)\n hlo_proto = hlo_utils.load_hlo_proto_from_model(model, init_state,\n [input_shape, target_shape])\n floor_count = hlo_utils.count_ops_in_hlo_proto(hlo_proto, r'floor')\n\n embedding_floor_ops = self._num_embedding_floors(\n (embedding_weight_prec is not None), (logits_inputs_prec is not None))\n\n self.assertEqual(floor_count, embedding_floor_ops)\n\n def test_padding_mask(self):\n # Fuzzing test to make sure activation statistics aren't affected by padding\n # tokens.\n #\n # This tests works by changing the embedding of the padding token (token\n # with id '0') and making sure all the stats stay the same.\n #\n # It also tests that the stats *do* change when the embedding of a\n # non-padding token changes.\n inputs_hyper = get_bounds.GetBounds.Hyper(\n initial_bound=6.0,\n stddev_coeff=3.0,\n absdev_coeff=2.0,\n mix_coeff=0.5,\n granularity=quant_config.QuantGranularity.PER_CHANNEL)\n # Set logits_via_embedding to false so that the embedding of the padding\n # token doesn't affect the logits calculation at the end of the decoder.\n hparams = training_hparams_generator_lib.create_base_transformer_hparams(\n mlp_weight_prec=8,\n embedding_weight_prec=None,\n attention_weight_prec=8,\n mlp_pos_inputs_prec=8,\n mlp_pos_inputs_hyper=inputs_hyper,\n mlp_signed_inputs_prec=8,\n mlp_signed_inputs_hyper=inputs_hyper,\n attention_kqv_inputs_prec=8,\n attention_kqv_inputs_hyper=inputs_hyper,\n attention_out_inputs_prec=8,\n attention_out_inputs_hyper=inputs_hyper,\n logits_inputs_prec=8,\n logits_inputs_hyper=inputs_hyper,\n logits_via_embeddings=False,\n attention_act_q_inputs_prec=8,\n attention_act_q_inputs_hyper=inputs_hyper,\n attention_act_k_inputs_prec=8,\n attention_act_k_inputs_hyper=inputs_hyper,\n attention_act_probs_inputs_prec=8,\n attention_act_v_inputs_prec=8,\n attention_act_v_inputs_hyper=inputs_hyper,\n num_layers=2,\n emb_dim=5,\n num_heads=2,\n qkv_dim=4,\n mlp_dim=4,\n quant_type=QuantType.FAKE_QUANT)\n module = models.Transformer(\n hparams=hparams,\n quant_context=quant_config.QuantContext(\n update_bounds=True, collect_acts_stats=True),\n vocab_size=3,\n output_vocab_size=3,\n max_len=10,\n train=False,\n use_bfloat16=False,\n dropout_rate=.1,\n attention_dropout_rate=.1,\n should_decode=False)\n key = jax.random.PRNGKey(0)\n # Mark the first token of the target and last token of the inputs as padding\n # tokens.\n targets = onp.array([[0, 2]])\n inputs = onp.array([[1, 0]])\n initial_state = module.init(key, inputs=inputs, targets=targets)\n # Change the embedding of the padding token.\n initial_state = initial_state.unfreeze()\n initial_state['params']['shared_embedding']['embedding'] = initial_state[\n 'params']['shared_embedding']['embedding'].at[0, :].set(10.0)\n module.train = True\n _, state1 = module.apply(\n flax.core.freeze(initial_state),\n inputs=inputs,\n targets=targets,\n mutable=True,\n rngs={'dropout': key})\n initial_state['params']['shared_embedding']['embedding'] = initial_state[\n 'params']['shared_embedding']['embedding'].at[0, :].set(20.0)\n _, state2 = module.apply(\n flax.core.freeze(initial_state),\n inputs=inputs,\n targets=targets,\n mutable=True,\n rngs={'dropout': key})\n # This tests the statistics in both the GetBounds and StatsTag modules.\n test_utils.assert_stats_are_equal(state1, state2)\n\n # Now we repeat the test, but changing the embedding of a non-padding token\n # (token with ID 1 here). We expect to see the stats change.\n # print(initial_state)\n initial_state['params']['shared_embedding']['embedding'] = initial_state[\n 'params']['shared_embedding']['embedding'].at[1, :].set(10.0)\n _, state1 = module.apply(\n flax.core.freeze(initial_state),\n inputs=inputs,\n targets=targets,\n mutable=True,\n rngs={'dropout': key})\n initial_state['params']['shared_embedding']['embedding'] = initial_state[\n 'params']['shared_embedding']['embedding'].at[1, :].set(200.0)\n _, state2 = module.apply(\n flax.core.freeze(initial_state),\n inputs=inputs,\n targets=targets,\n mutable=True,\n rngs={'dropout': key})\n print(initial_state['get_bounds']['encoder']['encoderblock_0']\n ['enc_self_att']['K']['bounds'])\n print(state1['get_bounds']['encoder']['encoderblock_0']['enc_self_att']['K']\n ['bounds'])\n print(state2['get_bounds']['encoder']['encoderblock_0']['enc_self_att']['K']\n ['bounds'])\n print('')\n test_utils.assert_stats_are_unequal(state1, state2)\n\n def test_hparams_without_logits_when_logits_not_shared_raises_error(self):\n # Create hparams without logits hparams by passing in\n # logits_via_embeddings=True.\n inputs_hyper = get_bounds.GetBounds.Hyper(\n initial_bound=6.0,\n stddev_coeff=3.0,\n absdev_coeff=2.0,\n mix_coeff=0.5,\n granularity=quant_config.QuantGranularity.PER_CHANNEL)\n hparams = training_hparams_generator_lib.create_base_transformer_hparams(\n mlp_weight_prec=8,\n embedding_weight_prec=None,\n attention_weight_prec=8,\n mlp_pos_inputs_prec=8,\n mlp_pos_inputs_hyper=inputs_hyper,\n mlp_signed_inputs_prec=8,\n mlp_signed_inputs_hyper=inputs_hyper,\n attention_kqv_inputs_prec=8,\n attention_kqv_inputs_hyper=inputs_hyper,\n attention_out_inputs_prec=8,\n attention_out_inputs_hyper=inputs_hyper,\n logits_inputs_prec=8,\n logits_inputs_hyper=inputs_hyper,\n logits_via_embeddings=True,\n attention_act_q_inputs_prec=8,\n attention_act_q_inputs_hyper=inputs_hyper,\n attention_act_k_inputs_prec=8,\n attention_act_k_inputs_hyper=inputs_hyper,\n attention_act_probs_inputs_prec=8,\n attention_act_v_inputs_prec=8,\n attention_act_v_inputs_hyper=inputs_hyper,\n num_layers=2,\n emb_dim=5,\n num_heads=2,\n qkv_dim=4,\n mlp_dim=4,\n quant_type=QuantType.FAKE_QUANT)\n\n self.assertIsNone(hparams.decoder.logits)\n\n # Now set logits_via_embedding in the model hparams to False.\n hparams.logits_via_embedding = False\n module = models.Transformer(\n hparams=hparams,\n quant_context=quant_config.QuantContext(\n update_bounds=True, collect_acts_stats=True),\n vocab_size=3,\n output_vocab_size=3,\n max_len=10,\n use_bfloat16=False,\n train=False,\n dropout_rate=.1,\n attention_dropout_rate=.1,\n should_decode=False)\n key = jax.random.PRNGKey(0)\n # Mark the first token of the target and last token of the inputs as padding\n # tokens.\n targets = onp.array([[0, 2]])\n inputs = onp.array([[1, 0]])\n # Because the model is not sharing logits with embeddings, but the logits\n # hparams are missing, it should raise an error.\n with self.assertRaises(ValueError):\n module.init(key, inputs=inputs, targets=targets)\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.testing.assert_array_equal" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
amathislab/Primer-MotionCapture
[ "ee595bb14ad89d5a6e2b412ce28962f5607cad77" ]
[ "Illustrating-Augmentations-FigurePipelineMouse.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis script uses Imgaug to display various augmentation methods\nfor a few labeled images of a mouse (Data in folder mouse_m7s3\nfrom Mathis, A., et al. DeepLabCut: markerless pose estimation of user-defined body parts with deep learning.\nNat Neurosci 21, 1281–1289 (2018). https://doi.org/10.1038/s41593-018-0209-y)\n\nFor \"A Primer on Motion Capture with Deep Learning: Principles, Pitfalls and Perspectives\"\nby Alexander Mathis, Steffen Schneider, Jessy Lauer, and Mackenzie Weygandt Mathis\n\n\nUses Imgaug:\nCode: https://github.com/aleju/imgaug\nDocs: https://imgaug.readthedocs.io/en/latest/index.html\n\"\"\"\n\n\nimport pandas as pd\nimport os\nimport numpy as np\n\nimport imgaug as ia\nimport imgaug.augmenters as iaa\nfrom imgaug.augmentables import Keypoint, KeypointsOnImage\n\nimport imageio\nfrom deeplabcut.utils.auxfun_videos import imread, imresize\n\nscale=.4\n##########################\n## Loading data\n##########################\n\nimfolder='mouse_m7s3'\nDataframe = pd.read_hdf(os.path.join(imfolder,\"CollectedData_Pranav.h5\"))\n\nscorer=Dataframe.columns.get_level_values(0)[0]\nbodyparts=Dataframe.columns.get_level_values(1)\n\nia.seed(1)\n\n#parameters for plotting:\ncolor=(200,0,0)\nsize=17\nalpha=.4\n\n#setting up augmentations\nAugmentations=[]\n\naugtype='invert'\nseq = iaa.Sequential([iaa.Invert(1, per_channel=0.5)])\nAugmentations.append([augtype,seq])\n\naugtype='coarsedropout'\nseq = iaa.Sequential([iaa.CoarseDropout(0.02, size_percent=0.15, per_channel=0.5)])\nAugmentations.append([augtype,seq])\n\naugtype='jpegcompression'\nseq = iaa.Sequential([iaa.JpegCompression(compression=(70, 99))])\nAugmentations.append([augtype,seq])\n\naugtype='motionblur'\nseq = iaa.Sequential([iaa.MotionBlur(k=30)])\nAugmentations.append([augtype,seq])\n\naugtype='edgedetect'\nseq = iaa.Sequential([iaa.EdgeDetect(alpha=(0.8, 1.0))])\nAugmentations.append([augtype,seq])\n\naugtype='flipud'\nseq = iaa.Sequential([iaa.Flipud(1)])\nAugmentations.append([augtype,seq])\n\naugtype='fliplr'\nseq = iaa.Sequential([iaa.Fliplr(1)])\nAugmentations.append([augtype,seq])\n\n\nfor ind, imname in enumerate(Dataframe.index):\n image=imresize(imread(os.path.join(imfolder,imname)),size=scale)\n ny,nx,nc=np.shape(image)\n\n kpts=[]\n for b in bodyparts:\n x, y=Dataframe.iloc[ind][scorer][b]['x'], Dataframe.iloc[ind][scorer][b]['y']\n if np.isfinite(x) and np.isfinite(y):\n kpts.append(Keypoint(x=x*scale,y=y*scale))\n\n kps=KeypointsOnImage(kpts, shape=image.shape)\n\n cells=[]\n\n # image with keypoints before augmentation\n image_before = kps.draw_on_image(image, color=color,size=size,alpha=alpha)\n cells.append(image_before)\n\n for name, seq in Augmentations:\n image_aug, kps_aug = seq(image=image, keypoints=kps)\n image_after = kps_aug.draw_on_image(image_aug, color=color,size=size,alpha=alpha)\n\n cells.append(image_after[:ny,:nx,:nc])\n\n grid_image = np.hstack(cells) # Horizontally stack the images\n imageio.imwrite('augmentationexamples/'+str(imfolder)+'_'+imname.split('.png')[0]+'_joint.jpg', grid_image)\n" ]
[ [ "numpy.hstack", "numpy.shape", "numpy.isfinite" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mishiba-Toshihiro/Self-Register
[ "ac9ce28c714db1d1086b3dba23a464d8f686cfd4" ]
[ "self-checkout.py" ]
[ "import argparse\nimport ntpath\nimport picamera\nimport pygame.mixer\nfrom datetime import datetime\nfrom yolo import YOLO\nfrom PIL import Image, ImageOps, ImageTk\nimport tkinter as tk\nfrom time import sleep\nfrom timeit import default_timer as timer\nimport pandas as pd\nimport os\nimport sys\nfrom contextlib import redirect_stdout\nimport getpass\nimport hashlib\n# warning処理\nimport warnings\nwarnings.filterwarnings('ignore')\n# 自作系\nfrom registerutil import y_n_input\nimport analyze\n\n# 撮影した写真データの保存場所\nphoto_filename = '/tmp/data.jpg'\n\ndef is_registered(x:int, class_dictionary:dict) -> bool:\n \"\"\"\n 登録済みかチェックするやつ\n \"\"\"\n #return x == 39\n #return x in range(len(class_dictionary) - 1)\n return x in range(len(class_dictionary))\n\ndef shutter():\n \"\"\"\n 音出して写真をとる。\n ピッと鳴る。\n \"\"\"\n # 音声再生\n read_sound.play()\n sleep(1)\n # 再生の終了\n read_sound.stop()\n # pi camera 用のライブラリーを使用して、画像を取得\n with picamera.PiCamera() as camera:\n camera.resolution = (300,400)\n camera.start_preview()\n sleep(2)\n camera.capture(photo_filename)\n\ndef scan():\n \"\"\"\n shutterで写真を取って商品を検出する。\n 予測クラス(商品ID)とscoreを返す\n \"\"\"\n shutter()\n\n try:\n image = Image.open(photo_filename)\n image = ImageOps.flip(image)\n image = ImageOps.mirror(image)\n except:\n print('読込みエラー、再度入力お願いします。')\n else:\n output_dir = 'output/'\n\n time = datetime.now().strftime('%Y%m%d%H%M%S')\n\n start = timer()\n pred, score, r_image = yolo.detect_image(image)\n end = timer()\n print('検出にかかった時間:{:.3f}秒'.format(end - start))\n\n image_path = output_dir + 'result_{}.jpg'.format(time)\n r_image.save(image_path)\n show_image(image_path)\n\n return pred, score\n\ndef show_image(image_path:str, window_title='prediction', duration=5000, scale=2):\n \"\"\"\n 一定時間画像を表示\n 推定結果を表示する際に使用\n window_title : 画像表示窓のタイトル\n duration : 表示時間(ms)\n scale : 画像の拡大倍率\n \"\"\"\n\n # imageを開く\n with Image.open(image_path) as img:\n # 画像サイズ取得\n width = img.width * scale\n height = img.height * scale\n\n # tkwindow作成\n root = tk.Tk()\n root.title(window_title)\n root.geometry(str(width) + 'x' + str(height))\n\n img = img.resize((width, height), Image.ANTIALIAS)\n img = ImageTk.PhotoImage(img)\n # canvas作成\n canvas = tk.Canvas(bg = \"black\", width=width, height=height)\n canvas.place(x=0, y=0)\n item = canvas.create_image(0, 0, image=img, anchor=tk.NW)\n root.after(duration, root.destroy)\n\n # 表示\n root.mainloop()\n\ndef initialize_model():\n \"\"\"\n システム起動時処理\n yoloの準備運動\n \"\"\"\n image = Image.open('./samples/output.jpg')\n _, _, _ = yolo.detect_image(image)\n\ndef check_book(datestr:str):\n \"\"\"\n 当日分の帳簿の存在確認など\n datastrのformat: %Y%m%d (当日日付)\n ./books 以下にsales_%Y%m%d.csvの形式で書き込み\n \n *return*\n last_index:int\n 帳簿の売上商品IDの最後の数\n last_cus_id:\n 帳簿のお客様IDの最後の数\n book_path:str\n 帳簿の相対パス\n 書き込み時に使用\n \"\"\"\n book_path = './books/sales_' + datestr + '.csv'\n # 本日分の帳簿の存在確認\n if os.path.isfile(book_path):\n tmp_df = pd.read_csv(book_path, index_col=0)\n # 記帳済み確認\n if len(tmp_df.index) > 0:\n # 帳簿最終行のindex\n last_index = len(tmp_df.index) - 1\n # 帳簿最終行の顧客ID\n last_cus_id = int(tmp_df.tail(1)['customerID'].values)\n else:\n # ファイルだけ作成されて記帳されていない場合\n last_index = -1\n last_cus_id = -1\n else:\n # 帳簿CSV新規作成\n tmp_df = pd.DataFrame(index=[], columns=['saletime', 'customerID', 'prodname', 'prodprice'])\n tmp_df.to_csv(book_path)\n last_index = -1\n last_cus_id = -1\n\n return last_index, last_cus_id, book_path\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)\n \"\"\"\n コマンドライン引数\n \"\"\"\n parser.add_argument(\n '-c', '--camera', default=False, action=\"store_true\",\n help='カメラ検出モード'\n )\n\n parser.add_argument(\n '-f', '--file', default=False, action=\"store_true\",\n help='ファイル検出モード'\n )\n\n # parser.add_argument(\n # '-s', '--sales', default=False, action=\"store_true\",\n # help='売上帳簿出力'\n # )\n\n FLAGS = parser.parse_args()\n\n # -cと-fどちらも指定されている/どちらも指定されていない場合異常終了\n if (not FLAGS.camera and not FLAGS.file) or (FLAGS.camera and FLAGS.file):\n print(\"\\\nSpecify one of the optional arguments: -c, -f\\n\\\n -c: camera mode\\n\\\n -f: file mode\")\n sys.exit(1)\n\n # モデルを読み込む\n yolo = YOLO()\n\n # 音声ファイル初期化\n\n pygame.mixer.init()\n read_sound = pygame.mixer.Sound(\"Cash_Register-Beep01-1+6.wav\")\n warn_sound = pygame.mixer.Sound(\"error2.wav\")\n guide_voice1 = pygame.mixer.Sound(\"./guide_sounds/Please_press_ENTER.wav\")\n guide_voice2 = pygame.mixer.Sound(\"./guide_sounds/Please_place_the_products_under_the_camera_and_press_enter.wav\")\n guide_voice3 = pygame.mixer.Sound(\"./guide_sounds/Please_enter_the_item_number_you_wish_to_check_out.wav\")\n guide_voice4 = pygame.mixer.Sound(\"./guide_sounds/Would_you_like_to_check_for_other_items.wav\")\n guide_voice5 = pygame.mixer.Sound(\"./guide_sounds/Thank_you_Have_a_nice_day.wav\")\n \n # 商品名・価格を読み込む\n cls_dic = pd.read_csv('products.csv').set_index('id').T.to_dict(orient='list')\n # {0:['GEORGIA ブラックコーヒー', 150],\n # 1:['コカ・コーラ', 120],\n # 2:['午後の紅茶レモンティー', 150],\n # 3:['ポカリスエット', 150],\n # 4:['綾鷹', 130]}\n # → 一番最後のカテゴリが「未登録」になるかも?\n\n # 起動時処理\n with redirect_stdout(open(os.devnull, 'w')):\n initialize_model()\n\n # セルフレジシステム起動\n while True:\n \n # terminalのクリア\n os.system('clear')\n \n # ロゴが出る\n print('\\\n# # # # # # # # # # # # # # # # # # # # # # # # # # # #\\n\\\n /MME JMMMMMMMF /MMMME /MME /MM /ME\\n\\\n /M/ME /MF /#/ /M/ME /M/M /VME\\n\\\n /M/ ME MMMMMM /MF /MMME /M/ ME /M/ M/V ME\\n\\\n /MM##ME /MF /#/ /MM##ME /M/ MV ME\\n\\\n/M/ ME /ME /MMMME /M/ ME /M/ ME\\n\\\n# # # # # # # # # # # # # # # # # # # # # # # # # # # #\\n')\n\n # 音声案内「エンターを押してください」\n sleep(1)\n guide_voice1.play()\n sleep(2)\n guide_voice1.stop()\n \n tmp = input('Welcome! (press enter)')\n # 'q'が入力されたら終了する\n if tmp == 'q':\n break\n # 'b'が入力されたら音声再生\n elif tmp == 'b':\n # 音声再生\n warn_sound.play()\n sleep(1)\n # 再生の終了\n pygame.mixer.music.stop()\n continue\n # 's'が入力されたら売上分析を開始\n elif tmp == 's':\n \"\"\"\n 売上分析モード\n \"\"\"\n auth_flag = True\n wrong_cnt = 0\n\n # パスワード入力 連続3回まで入力可能(2回まで間違えられる)\n while auth_flag:\n # 3回間違えると最初から\n if 3 <= wrong_cnt:\n print('Please try again.')\n sleep(2)\n break\n \n password = getpass.getpass(prompt='input password:')\n \n if hashlib.sha256(password.encode()).hexdigest() == 'f6f8057c7a9964f94fdd4a62ba70ff351ecb7411952760f549d8897b9c4fb201':\n auth_flag = False\n # 分析モード突入\n analyze.initiate('./books/') \n else:\n print('WRONG!!')\n wrong_cnt += 1\n \n continue\n\n # 会計開始\n checkout_list = []\n while True:\n\n if FLAGS.camera:\n \"\"\"\n カメラ検出\n \"\"\"\n # 音声案内「商品を置いてください」\n guide_voice2.play()\n sleep(4)\n guide_voice2.stop()\n \n key = input('商品をスキャンします。「Enter」を押して下さい。')\n pred, score = scan()\n \n elif FLAGS.file:\n \"\"\"\n データファイル検出\n \"\"\"\n img = input('ファイルパスを入力してください。:')\n try:\n image = Image.open(img)\n except:\n print('読込みエラー。再度入力してください。')\n continue\n else:\n output_dir = 'output/'\n _, file_name = ntpath.split(img)\n\n start = timer()\n pred, score, r_image = yolo.detect_image(image)\n end = timer()\n print('検出にかかった時間:{:.3f}秒'.format(end - start))\n\n image_path = output_dir + 'result_{}.jpg'.format(file_name.replace('.jpg', ''))\n r_image.save(image_path)\n show_image(image_path)\n\n # 未登録商品検出(消すかも)\n if not all([is_registered(x, cls_dic) for x in pred]):\n print('未登録商品を検出しました。再度読み込みますか?')\n key = y_n_input()\n if key:\n continue\n else:\n print('未登録商品はお会計されません。')\n pred = [x for x in pred if is_registered(x, cls_dic)] # 未登録商品を削る\n\n # 登録済み商品検出*なし*\n if len(pred) == 0:\n print('商品を検出しませんでした。再度読み込みますか?')\n key = y_n_input()\n if key:\n continue\n else:\n pass\n\n # 登録済み商品検出*あり*\n else:\n for i, item in enumerate(pred):\n print('商品番号{} {}の金額は¥{}'.format(i, cls_dic[item][0], cls_dic[item][1]))\n\n # 会計対象商品選択\n while True:\n \n # 音声案内「会計する商品を選んでください」\n guide_voice3.play()\n sleep(3)\n guide_voice3.stop()\n \n key = input('お会計を行いたい商品番号を半角スペース区切りで入力してください。(例:0 3 5)\\nすべての商品を会計する場合は何も入力せず「Enter」を押してください。:')\n \n # 入力を分割\n splited_key = key.split()\n \n # 何も入力されていない場合は検出した全商品を買い物カゴに入れる\n if len(splited_key) == 0:\n # 全商品の商品IDをそのまま渡す\n items = pred\n break\n\n try:\n # 複数回同じ数字が入力された場合も一つのみカゴに入れる\n prod_ids = set(map(int, splited_key))\n # 指定indexの商品の商品IDを取得・リスト化する\n items = [pred[x] for x in prod_ids]\n break\n except:\n # value check\n print('商品番号の誤りを検知しました。0-{}の間の番号を入力してください。:'.format(len(pred)-1))\n continue\n \n # カゴに追加\n checkout_list.extend(items)\n\n # 買い物カゴの状態に応じたメッセージを表示\n if len(checkout_list) > 0:\n print('買い物カゴに次の商品が入っています。')\n for item in checkout_list:\n print('{} ¥{}'.format(cls_dic[item][0], cls_dic[item][1]))\n else:\n print('買い物カゴは空です。')\n\n # 音声案内「他の商品も会計しますか」\n guide_voice4.play()\n sleep(2.5)\n guide_voice4.stop()\n\n # 会計終了プロセス\n print('他の商品もお会計しますか?')\n key = y_n_input()\n if key:\n continue\n else:\n break\n\n print('合計金額は¥{}です。'.format(sum([cls_dic[x][1] for x in checkout_list])))\n print('ありがとうございました。')\n # 音声案内「ありがとうございました」\n guide_voice5.play()\n sleep(2)\n guide_voice5.stop()\n\n # 記帳\n sale_date = datetime.now()\n sale_date_str = sale_date.strftime('%Y%m%d')\n # 帳簿チェック\n last_index, last_cus_id, book_path = check_book(sale_date_str)\n\n # DataFrame作成\n tmp_df = pd.DataFrame(index=range(last_index+1, last_index+1+len(checkout_list)),\n data={\n 'saletime': [sale_date.strftime('%Y/%m/%d %H:%M:%S')] * len(checkout_list),\n 'customerID': [last_cus_id+1] * len(checkout_list),\n 'prodname': [cls_dic[item][0] for item in checkout_list],\n 'prodprice': [cls_dic[item][1] for item in checkout_list],\n },\n columns=['saletime', 'customerID', 'prodname', 'prodprice'])\n\n # ファイル書き込み\n tmp_df.to_csv(book_path, mode='a', header=False)\n\n # last_*書き換え\n last_index = last_index+1+len(checkout_list)\n last_cus_id = last_cus_id+1\n\n print('Bye!')\n yolo.close_session()\n sys.exit(0)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
jesserobertson/uncover-ml
[ "22ca6361b25a119dd8fab1f3d50475df70b35170" ]
[ "preprocessing/raster_average.py" ]
[ "import warnings\nfrom subprocess import check_call\nimport shutil\nimport glob\nimport logging\nfrom os.path import abspath, join, basename, isdir, isfile\nimport click\nimport numpy as np\nfrom numpy.lib.stride_tricks import as_strided\nfrom scipy import ndimage\nfrom osgeo import gdal\nfrom uncoverml import mpiops\n\nwarnings.filterwarnings('ignore')\nlog = logging.getLogger(__name__)\nCOMMON = ['--config', 'GDAL_CACHEMAX', '200']\nTILES = ['-co', 'TILED=YES']\nTRANSLATE = 'gdal_translate'\nfunc_map = {'nanmean': np.nanmean,\n 'nanmax': np.nanmax,\n 'nanmin': np.nanmin,\n 'nanmedian': np.nanmedian}\n\n\[email protected]()\ndef cli():\n logging.basicConfig(level=logging.INFO)\n\n\[email protected]()\[email protected]('input_dir')\[email protected]('out_dir')\[email protected]('-s', '--size', type=int, default=3,\n help='size of the uniform filter to '\n 'perform uniform 2d average according to '\n 'scipy.ndimage.uniform_filter')\ndef average(input_dir, out_dir, size):\n input_dir = abspath(input_dir)\n log.info('Reading tifs from {}'.format(input_dir))\n tifs = glob.glob(join(input_dir, '*.tif'))\n\n for tif in tifs:\n data_source = gdal.Open(tif, gdal.GA_ReadOnly)\n band = data_source.GetRasterBand(1)\n # data_type = gdal.GetDataTypeName(band.DataType)\n data = band.ReadAsArray()\n no_data_val = band.GetNoDataValue()\n averaged_data = filter_data(data, size, no_data_val)\n log.info('Calculated average for {}'.format(basename(tif)))\n\n output_file = join(out_dir, 'average_' + basename(tif))\n out_ds = gdal.GetDriverByName('GTiff').Create(\n output_file, data_source.RasterXSize, data_source.RasterYSize,\n 1, band.DataType)\n out_band = out_ds.GetRasterBand(1)\n out_band.WriteArray(averaged_data)\n out_ds.SetGeoTransform(data_source.GetGeoTransform())\n out_ds.SetProjection(data_source.GetProjection())\n out_band.FlushCache() # Write data to disc\n out_ds = None # close out_ds\n data_source = None # close dataset\n\n log.info('Finished converting {}'.format(basename(tif)))\n\n\[email protected]()\[email protected]('input_dir')\[email protected]('out_dir')\[email protected]('-s', '--size', type=int, default=5,\n help='size of the uniform filter to '\n 'perform uniform 2d average according to '\n 'scipy.ndimage.uniform_filter')\ndef gdalaverage(input_dir, out_dir, size):\n \"\"\"\n average data using gdal's averaging method.\n Parameters\n ----------\n input_dir: str\n input dir name of the tifs that needs to be averaged\n out_dir: str\n output dir name\n size: int, optional\n size of kernel\n Returns\n -------\n\n \"\"\"\n input_dir = abspath(input_dir)\n log.info('Reading tifs from {}'.format(input_dir))\n tifs = glob.glob(join(input_dir, '*.tif'))\n\n process_tifs = np.array_split(tifs, mpiops.chunks)[mpiops.chunk_index]\n\n for tif in process_tifs:\n data_set = gdal.Open(tif, gdal.GA_ReadOnly)\n # band = data_set.GetRasterBand(1)\n # data_type = gdal.GetDataTypeName(band.DataType)\n # data = band.ReadAsArray()\n # no_data_val = band.GetNoDataValue()\n # averaged_data = filter_data(data, size, no_data_val)\n log.info('Calculated average for {}'.format(basename(tif)))\n\n output_file = join(out_dir, 'average_' + basename(tif))\n src_gt = data_set.GetGeoTransform()\n tmp_file = '/tmp/tmp_{}.tif'.format(mpiops.chunk_index)\n resample_cmd = [TRANSLATE] + [tif, tmp_file] + \\\n ['-tr', str(src_gt[1]*size), str(src_gt[1]*size)] + \\\n ['-r', 'bilinear']\n check_call(resample_cmd)\n rollback_cmd = [TRANSLATE] + [tmp_file, output_file] + \\\n ['-tr', str(src_gt[1]), str(src_gt[1])]\n check_call(rollback_cmd)\n log.info('Finished converting {}'.format(basename(tif)))\n\n\ndef filter_data(data, size, no_data_val=None):\n \"\"\"\n This does not work with masked array.\n ndimage.uniform_filter does not respect masked array\n Parameters\n ----------\n data\n size\n no_data_val\n\n Returns\n -------\n\n \"\"\"\n if no_data_val:\n mask = data == no_data_val\n data[mask] = np.nan\n averaged_data = np.zeros_like(data)\n ndimage.uniform_filter(data,\n output=averaged_data,\n size=size,\n mode='nearest')\n return averaged_data\n\n\ndef filter_center(data, size=3, no_data_val=None, func=np.nanmean,\n mask_no_data=False):\n \"\"\"\n Parameters\n ----------\n data: input data\n size: odd number uniform filtering kernel size\n no_data_val: value in matrix that is treated as no data value\n func: function to use, choose from np.nanmean/median/max/min etc.\n mask_no_data: bool, if True will keep the original no data pixel intact\n\n Returns: nanmean of the matrix A filtered by a uniform kernel of size=size\n -------\n Adapted from: http://stackoverflow.com/questions/23829097/python-numpy-fastest-method-for-2d-kernel-rank-filtering-on-masked-arrays-and-o?rq=1\n\n Notes\n -----\n This function `centers` the kernel at the target pixel.\n This is slightly different from scipy.ndimage.uniform_filter application.\n In scipy.ndimage.uniform_filter, a convolution approach is implemented.\n An equivalent is scipy.ndimage.uniform_filter like convolution approach\n with no_data_val/nan handling can be found in\n filter_broadcast_uniform_filter in this module.\n\n Change function to nanmedian, nanmax, nanmin as required.\n \"\"\"\n\n assert size % 2 == 1, 'Please supply an odd size'\n rows, cols = data.shape\n\n padded_data = np.empty(shape=(rows + size-1,\n cols + size-1),\n dtype=data.dtype)\n padded_data[:] = np.nan\n rows_pad, cols_pad = padded_data.shape\n\n if no_data_val is not None:\n mask = data == no_data_val\n data[mask] = np.nan\n\n padded_data[size//2:rows_pad - size//2,\n size//2: cols_pad - size//2] = data.copy()\n\n row, col = data.shape\n\n stride_data = as_strided(padded_data, (row, col, size, size),\n padded_data.strides+padded_data.strides)\n stride_data = stride_data.copy().reshape((row, col, size**2))\n\n avg = func(stride_data, axis=2)\n avg[np.isnan(avg)] = no_data_val\n\n if mask_no_data:\n avg[mask] = no_data_val\n\n return avg\n\n\ndef filter_uniform_filter(data, size=3, no_data_val=None,\n func=np.nanmean):\n \"\"\"\n Parameters\n ----------\n A = input data\n size = odd number uniform filtering kernel size\n no_data_val = value in matrix that is treated as no data value\n\n Returns: nanmean of the matrix A filtered by a uniform kernel of size=size\n -------\n Adapted from: http://stackoverflow.com/questions/23829097/python-numpy-fastest-method-for-2d-kernel-rank-filtering-on-masked-arrays-and-o?rq=1\n\n Notes:\n This is equivalent to scipy.ndimage.uniform_filter, but can handle nan's,\n and can use numpy nanmean/median/max/min functions.\n\n no_data_val/nan handling can be found in filter_broadcast_uniform_filter in\n this module.\n\n Change function to nanmeadian, nanmax, nanmin as required.\n \"\"\"\n\n assert size % 2 == 1, 'Please supply an odd size'\n rows, cols = data.shape\n\n padded_A = np.empty(shape=(rows + size-1,\n cols + size-1),\n dtype=data.dtype)\n padded_A[:] = np.nan\n rows_pad, cols_pad = padded_A.shape\n\n if no_data_val:\n mask = data == no_data_val\n data[mask] = np.nan\n\n padded_A[size-1: rows_pad, size - 1: cols_pad] = data.copy()\n\n n, m = data.shape\n strided_data = as_strided(padded_A, (n, m, size, size),\n padded_A.strides+padded_A.strides)\n strided_data = strided_data.copy().reshape((n, m, size**2))\n\n return func(strided_data, axis=2)\n\n\[email protected]()\[email protected]('input_dir', type=click.Path(exists=True))\[email protected]('out_dir', type=click.Path(exists=True))\[email protected]('-f', '--func',\n type=click.Choice(['nanmean', 'nanmedian',\n 'nanmax', 'nanmin']),\n default='nanmean', help='Level of logging')\[email protected]('-p', '--partitions', type=int, default=1,\n help='Number of partitions for calculating 2d statistics')\[email protected]('-s', '--size', type=int, default=3,\n help='size of the uniform filter to '\n 'calculate 2d stats with the uniform kernel '\n 'centered around the target pixel for continuous data. '\n 'Categorical data are copied unchanged.')\[email protected]('-m', '--mask', type=bool, default=False,\n help='whether to keep the original no data pixels intact')\ndef mean(input_dir, out_dir, size, func, partitions, mask):\n input_dir = abspath(input_dir)\n if isdir(input_dir):\n log.info('Reading tifs from {}'.format(input_dir))\n tifs = glob.glob(join(input_dir, '*.tif'))\n else:\n assert isfile(input_dir)\n tifs = [input_dir]\n\n process_tifs = np.array_split(tifs, mpiops.chunks)[mpiops.chunk_index]\n\n for tif in process_tifs:\n log.info('Starting to average {}'.format(basename(tif)))\n treat_file(tif, out_dir, size, func, partitions, mask)\n log.info('Finished averaging {}'.format(basename(tif)))\n\n\ndef treat_file(tif, out_dir, size, func, partitions, mask_no_data=False):\n \"\"\"\n Parameters\n ----------\n tif: input geotif\n out_dir: output dir\n size: odd int (2n+1)\n size of kernel, has to be odd\n func: str\n one of nanmean, nanmedian, nanmax, nanmin\n partitions: int\n number of partitions for calculating 2d statistics\n mask_no_data: bool\n whether to keep the original nodatavalues intact\n\n Returns\n -------\n None\n \"\"\"\n data_source = gdal.Open(tif, gdal.GA_ReadOnly)\n band = data_source.GetRasterBand(1)\n no_data_val = band.GetNoDataValue()\n output_file = join(out_dir, basename(tif))\n if no_data_val is None:\n log.error('NoDataValue was not found in input image {} \\n'\n 'and this file was skipped'.format(basename(tif)))\n return\n if band.DataType <= 4:\n shutil.copy(tif, output_file)\n data_source = None\n return\n out_ds = gdal.GetDriverByName('GTiff').Create(\n output_file, data_source.RasterXSize, data_source.RasterYSize,\n 1, band.DataType)\n out_band = out_ds.GetRasterBand(1)\n\n tif_rows = data_source.RasterYSize\n partition_rows = np.array_split(range(tif_rows), partitions)\n\n xoff = 0\n win_xsize = data_source.RasterXSize\n pad_width = size // 2\n\n for p in range(partitions):\n rows = partition_rows[p]\n\n # grab data with pad_width added appropriately\n _ysize, win_ysize, yoff = _edge_adjust(p, pad_width, partitions, rows)\n\n data = band.ReadAsArray(xoff=xoff, yoff=yoff,\n win_xsize=win_xsize, win_ysize=win_ysize)\n\n averaged_data = filter_center(data, size, no_data_val, func_map[func],\n mask_no_data)\n\n # discard pad_width\n averaged_data = averaged_data[_ysize: len(rows) + _ysize]\n\n out_band.WriteArray(averaged_data,\n xoff=0, yoff=int(rows[0]))\n out_band.FlushCache() # Write data to disc\n log.info('Calculated average for {} partition {}'.format(\n basename(tif), p))\n\n out_band.SetNoDataValue(no_data_val)\n out_ds.SetGeoTransform(data_source.GetGeoTransform())\n out_ds.SetProjection(data_source.GetProjection())\n\n out_ds = None # close out_ds\n band = None\n data_source = None # close dataset\n\n\ndef _edge_adjust(partition, pad_width, partitions, rows):\n # The following if else is to make sure we are not having\n # partition and mpi splitting effects\n # when p=0 don't look back\n if partition == 0:\n yoff = int(rows[0])\n win_ysize = len(rows) + pad_width\n _ysize = 0\n elif partition == partitions - 1:\n yoff = int(rows[0]) - pad_width\n win_ysize = len(rows) + pad_width\n _ysize = pad_width\n else:\n yoff = int(rows[0]) - pad_width\n win_ysize = len(rows) + pad_width * 2\n _ysize = pad_width\n if partitions == 1:\n yoff = int(rows[0])\n win_ysize = len(rows)\n _ysize = 0\n\n return _ysize, win_ysize, yoff\n" ]
[ [ "numpy.isnan", "scipy.ndimage.uniform_filter", "numpy.lib.stride_tricks.as_strided", "numpy.zeros_like", "numpy.array_split", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
Quansight-Labs/cudf
[ "d05de978f2d1f34b7629bd54ab9485df1f9949ef" ]
[ "python/cudf/cudf/tests/test_binops.py" ]
[ "# Copyright (c) 2018-2021, NVIDIA CORPORATION.\n\nfrom __future__ import division\n\nimport decimal\nimport operator\nimport random\nfrom itertools import product\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport cudf\nfrom cudf.core import Series\nfrom cudf.core.index import as_index\nfrom cudf.testing import _utils as utils\nfrom cudf.utils.dtypes import (\n BOOL_TYPES,\n DATETIME_TYPES,\n FLOAT_TYPES,\n INTEGER_TYPES,\n NUMERIC_TYPES,\n TIMEDELTA_TYPES,\n)\n\nSTRING_TYPES = {\"str\"}\n\n_binops = [\n operator.add,\n operator.sub,\n operator.mul,\n operator.floordiv,\n operator.truediv,\n operator.mod,\n operator.pow,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"binop\", _binops)\ndef test_series_binop(binop, obj_class):\n nelem = 1000\n arr1 = utils.gen_rand(\"float64\", nelem) * 10000\n # Keeping a low value because CUDA 'pow' has 2 full range error\n arr2 = utils.gen_rand(\"float64\", nelem) * 10\n\n sr1 = Series(arr1)\n sr2 = Series(arr2)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = binop(sr1, sr2)\n expect = binop(pd.Series(arr1), pd.Series(arr2))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n utils.assert_eq(result, expect)\n\n\[email protected](\"binop\", _binops)\ndef test_series_binop_concurrent(binop):\n def func(index):\n arr = np.random.random(100) * 10\n sr = Series(arr)\n\n result = binop(sr.astype(\"int32\"), sr)\n expect = binop(arr.astype(\"int32\"), arr)\n\n np.testing.assert_almost_equal(result.to_array(), expect, decimal=5)\n\n from concurrent.futures import ThreadPoolExecutor\n\n indices = range(10)\n with ThreadPoolExecutor(4) as e: # four processes\n list(e.map(func, indices))\n\n\[email protected](\"use_cudf_scalar\", [False, True])\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"nelem,binop\", list(product([1, 2, 100], _binops)))\ndef test_series_binop_scalar(nelem, binop, obj_class, use_cudf_scalar):\n arr = np.random.random(nelem)\n rhs = random.choice(arr).item()\n\n sr = Series(arr)\n if obj_class == \"Index\":\n sr = as_index(sr)\n\n if use_cudf_scalar:\n result = binop(sr, rhs)\n else:\n result = binop(sr, cudf.Scalar(rhs))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_almost_equal(result.to_array(), binop(arr, rhs))\n\n\n_bitwise_binops = [operator.and_, operator.or_, operator.xor]\n\n\n_int_types = [\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"uint8\",\n \"uint16\",\n \"uint32\",\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"binop\", _bitwise_binops)\[email protected](\n \"lhs_dtype,rhs_dtype\", list(product(_int_types, _int_types))\n)\ndef test_series_bitwise_binop(binop, obj_class, lhs_dtype, rhs_dtype):\n arr1 = (np.random.random(100) * 100).astype(lhs_dtype)\n sr1 = Series(arr1)\n\n arr2 = (np.random.random(100) * 100).astype(rhs_dtype)\n sr2 = Series(arr2)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = binop(sr1, sr2)\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_almost_equal(result.to_array(), binop(arr1, arr2))\n\n\n_logical_binops = [\n (operator.and_, operator.and_),\n (operator.or_, operator.or_),\n (np.logical_and, cudf.logical_and),\n (np.logical_or, cudf.logical_or),\n]\n\n\[email protected](\"lhstype\", _int_types + [np.bool_])\[email protected](\"rhstype\", _int_types + [np.bool_])\[email protected](\"binop,cubinop\", _logical_binops)\ndef test_series_logical_binop(lhstype, rhstype, binop, cubinop):\n arr1 = pd.Series(np.random.choice([True, False], 10))\n if lhstype is not np.bool_:\n arr1 = arr1 * (np.random.random(10) * 100).astype(lhstype)\n sr1 = Series(arr1)\n\n arr2 = pd.Series(np.random.choice([True, False], 10))\n if rhstype is not np.bool_:\n arr2 = arr2 * (np.random.random(10) * 100).astype(rhstype)\n sr2 = Series(arr2)\n\n result = cubinop(sr1, sr2)\n expect = binop(arr1, arr2)\n\n utils.assert_eq(result, expect)\n\n\n_cmpops = [\n operator.lt,\n operator.gt,\n operator.le,\n operator.ge,\n operator.eq,\n operator.ne,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"cmpop\", _cmpops)\[email protected](\n \"dtype\", [\"int8\", \"int32\", \"int64\", \"float32\", \"float64\", \"datetime64[ms]\"]\n)\ndef test_series_compare(cmpop, obj_class, dtype):\n arr1 = np.random.randint(0, 100, 100).astype(dtype)\n arr2 = np.random.randint(0, 100, 100).astype(dtype)\n sr1 = Series(arr1)\n sr2 = Series(arr2)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result1 = cmpop(sr1, sr1)\n result2 = cmpop(sr2, sr2)\n result3 = cmpop(sr1, sr2)\n\n if obj_class == \"Index\":\n result1 = Series(result1)\n result2 = Series(result2)\n result3 = Series(result3)\n\n np.testing.assert_equal(result1.to_array(), cmpop(arr1, arr1))\n np.testing.assert_equal(result2.to_array(), cmpop(arr2, arr2))\n np.testing.assert_equal(result3.to_array(), cmpop(arr1, arr2))\n\n\ndef _series_compare_nulls_typegen():\n tests = []\n tests += list(product(DATETIME_TYPES, DATETIME_TYPES))\n tests += list(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))\n tests += list(product(NUMERIC_TYPES, NUMERIC_TYPES))\n tests += list(product(STRING_TYPES, STRING_TYPES))\n\n return tests\n\n\[email protected](\"cmpop\", _cmpops)\[email protected](\"dtypes\", _series_compare_nulls_typegen())\ndef test_series_compare_nulls(cmpop, dtypes):\n ltype, rtype = dtypes\n\n ldata = [1, 2, None, None, 5]\n rdata = [2, 1, None, 4, None]\n\n lser = Series(ldata, dtype=ltype)\n rser = Series(rdata, dtype=rtype)\n\n lmask = ~lser.isnull()\n rmask = ~rser.isnull()\n\n expect_mask = np.logical_and(lmask, rmask)\n expect = cudf.Series([None] * 5, dtype=\"bool\")\n expect[expect_mask] = cmpop(lser[expect_mask], rser[expect_mask])\n\n got = cmpop(lser, rser)\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"obj\", [pd.Series([\"a\", \"b\", None, \"d\", \"e\", None], dtype=\"string\"), \"a\"]\n)\[email protected](\"cmpop\", _cmpops)\[email protected](\n \"cmp_obj\",\n [pd.Series([\"b\", \"a\", None, \"d\", \"f\", None], dtype=\"string\"), \"a\"],\n)\ndef test_string_series_compare(obj, cmpop, cmp_obj):\n\n g_obj = obj\n if isinstance(g_obj, pd.Series):\n g_obj = Series.from_pandas(g_obj)\n g_cmp_obj = cmp_obj\n if isinstance(g_cmp_obj, pd.Series):\n g_cmp_obj = Series.from_pandas(g_cmp_obj)\n got = cmpop(g_obj, g_cmp_obj)\n expected = cmpop(obj, cmp_obj)\n\n if isinstance(expected, pd.Series):\n expected = cudf.from_pandas(expected)\n\n utils.assert_eq(expected, got)\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"nelem\", [1, 2, 100])\[email protected](\"cmpop\", _cmpops)\[email protected](\"dtype\", utils.NUMERIC_TYPES + [\"datetime64[ms]\"])\[email protected](\"use_cudf_scalar\", [True, False])\ndef test_series_compare_scalar(\n nelem, cmpop, obj_class, dtype, use_cudf_scalar\n):\n arr1 = np.random.randint(0, 100, 100).astype(dtype)\n sr1 = Series(arr1)\n rhs = random.choice(arr1).item()\n\n if use_cudf_scalar:\n rhs = cudf.Scalar(rhs)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n\n result1 = cmpop(sr1, rhs)\n result2 = cmpop(rhs, sr1)\n\n if obj_class == \"Index\":\n result1 = Series(result1)\n result2 = Series(result2)\n\n np.testing.assert_equal(result1.to_array(), cmpop(arr1, rhs))\n np.testing.assert_equal(result2.to_array(), cmpop(rhs, arr1))\n\n\n_nulls = [\"none\", \"some\"]\n\n\[email protected](\"nelem\", [1, 7, 8, 9, 32, 64, 128])\[email protected](\"lhs_nulls,rhs_nulls\", list(product(_nulls, _nulls)))\ndef test_validity_add(nelem, lhs_nulls, rhs_nulls):\n np.random.seed(0)\n # LHS\n lhs_data = np.random.random(nelem)\n if lhs_nulls == \"some\":\n lhs_mask = utils.random_bitmask(nelem)\n lhs_bitmask = utils.expand_bits_to_bytes(lhs_mask)[:nelem]\n lhs_null_count = utils.count_zero(lhs_bitmask)\n assert lhs_null_count >= 0\n lhs = Series.from_masked_array(lhs_data, lhs_mask)\n assert lhs.null_count == lhs_null_count\n else:\n lhs = Series(lhs_data)\n # RHS\n rhs_data = np.random.random(nelem)\n if rhs_nulls == \"some\":\n rhs_mask = utils.random_bitmask(nelem)\n rhs_bitmask = utils.expand_bits_to_bytes(rhs_mask)[:nelem]\n rhs_null_count = utils.count_zero(rhs_bitmask)\n assert rhs_null_count >= 0\n rhs = Series.from_masked_array(rhs_data, rhs_mask)\n assert rhs.null_count == rhs_null_count\n else:\n rhs = Series(rhs_data)\n # Result\n res = lhs + rhs\n if lhs_nulls == \"some\" and rhs_nulls == \"some\":\n res_mask = np.asarray(\n utils.expand_bits_to_bytes(lhs_mask & rhs_mask), dtype=np.bool_\n )[:nelem]\n if lhs_nulls == \"some\" and rhs_nulls == \"none\":\n res_mask = np.asarray(\n utils.expand_bits_to_bytes(lhs_mask), dtype=np.bool_\n )[:nelem]\n if lhs_nulls == \"none\" and rhs_nulls == \"some\":\n res_mask = np.asarray(\n utils.expand_bits_to_bytes(rhs_mask), dtype=np.bool_\n )[:nelem]\n # Fill NA values\n na_value = -10000\n got = res.fillna(na_value).to_array()\n expect = lhs_data + rhs_data\n if lhs_nulls == \"some\" or rhs_nulls == \"some\":\n expect[~res_mask] = na_value\n\n np.testing.assert_array_equal(expect, got)\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"binop,lhs_dtype,rhs_dtype\",\n list(\n product(\n [operator.add, operator.mul],\n utils.NUMERIC_TYPES,\n utils.NUMERIC_TYPES,\n )\n ),\n)\ndef test_series_binop_mixed_dtype(binop, lhs_dtype, rhs_dtype, obj_class):\n nelem = 10\n lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype)\n rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype)\n\n sr1 = Series(lhs)\n sr2 = Series(rhs)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = binop(Series(sr1), Series(sr2))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_almost_equal(result.to_array(), binop(lhs, rhs))\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"cmpop,lhs_dtype,rhs_dtype\",\n list(product(_cmpops, utils.NUMERIC_TYPES, utils.NUMERIC_TYPES)),\n)\ndef test_series_cmpop_mixed_dtype(cmpop, lhs_dtype, rhs_dtype, obj_class):\n nelem = 5\n lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype)\n rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype)\n\n sr1 = Series(lhs)\n sr2 = Series(rhs)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = cmpop(Series(sr1), Series(sr2))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_array_equal(result.to_array(), cmpop(lhs, rhs))\n\n\n_reflected_ops = [\n lambda x: 1 + x,\n lambda x: 2 * x,\n lambda x: 2 - x,\n lambda x: 2 // x,\n lambda x: 2 / x,\n lambda x: 3 + x,\n lambda x: 3 * x,\n lambda x: 3 - x,\n lambda x: 3 // x,\n lambda x: 3 / x,\n lambda x: 3 % x,\n lambda x: -1 + x,\n lambda x: -2 * x,\n lambda x: -2 - x,\n lambda x: -2 // x,\n lambda x: -2 / x,\n lambda x: -3 + x,\n lambda x: -3 * x,\n lambda x: -3 - x,\n lambda x: -3 // x,\n lambda x: -3 / x,\n lambda x: -3 % x,\n lambda x: 0 + x,\n lambda x: 0 * x,\n lambda x: 0 - x,\n lambda x: 0 // x,\n lambda x: 0 / x,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"func, dtype\", list(product(_reflected_ops, utils.NUMERIC_TYPES))\n)\ndef test_reflected_ops_scalar(func, dtype, obj_class):\n # create random series\n np.random.seed(12)\n random_series = utils.gen_rand(dtype, 100, low=10)\n\n # gpu series\n gs = Series(random_series)\n\n # class typing\n if obj_class == \"Index\":\n gs = as_index(gs)\n\n gs_result = func(gs)\n\n # class typing\n if obj_class == \"Index\":\n gs = Series(gs)\n\n # pandas\n ps_result = func(random_series)\n\n # verify\n np.testing.assert_allclose(ps_result, gs_result.to_array())\n\n\n_cudf_scalar_reflected_ops = [\n lambda x: cudf.Scalar(1) + x,\n lambda x: cudf.Scalar(2) * x,\n lambda x: cudf.Scalar(2) - x,\n lambda x: cudf.Scalar(2) // x,\n lambda x: cudf.Scalar(2) / x,\n lambda x: cudf.Scalar(3) + x,\n lambda x: cudf.Scalar(3) * x,\n lambda x: cudf.Scalar(3) - x,\n lambda x: cudf.Scalar(3) // x,\n lambda x: cudf.Scalar(3) / x,\n lambda x: cudf.Scalar(3) % x,\n lambda x: cudf.Scalar(-1) + x,\n lambda x: cudf.Scalar(-2) * x,\n lambda x: cudf.Scalar(-2) - x,\n lambda x: cudf.Scalar(-2) // x,\n lambda x: cudf.Scalar(-2) / x,\n lambda x: cudf.Scalar(-3) + x,\n lambda x: cudf.Scalar(-3) * x,\n lambda x: cudf.Scalar(-3) - x,\n lambda x: cudf.Scalar(-3) // x,\n lambda x: cudf.Scalar(-3) / x,\n lambda x: cudf.Scalar(-3) % x,\n lambda x: cudf.Scalar(0) + x,\n lambda x: cudf.Scalar(0) * x,\n lambda x: cudf.Scalar(0) - x,\n lambda x: cudf.Scalar(0) // x,\n lambda x: cudf.Scalar(0) / x,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"funcs, dtype\",\n list(\n product(\n list(zip(_reflected_ops, _cudf_scalar_reflected_ops)),\n utils.NUMERIC_TYPES,\n )\n ),\n)\ndef test_reflected_ops_cudf_scalar(funcs, dtype, obj_class):\n cpu_func, gpu_func = funcs\n\n # create random series\n np.random.seed(12)\n random_series = utils.gen_rand(dtype, 100, low=10)\n\n # gpu series\n gs = Series(random_series)\n\n # class typing\n if obj_class == \"Index\":\n gs = as_index(gs)\n\n gs_result = gpu_func(gs)\n\n # class typing\n if obj_class == \"Index\":\n gs = Series(gs)\n\n # pandas\n ps_result = cpu_func(random_series)\n\n # verify\n np.testing.assert_allclose(ps_result, gs_result.to_array())\n\n\[email protected](\"binop\", _binops)\ndef test_different_shapes_and_columns(binop):\n\n # TODO: support `pow()` on NaN values. Particularly, the cases:\n # `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`\n if binop is operator.pow:\n return\n\n # Empty frame on the right side\n pd_frame = binop(pd.DataFrame({\"x\": [1, 2]}), pd.DataFrame({}))\n cd_frame = binop(cudf.DataFrame({\"x\": [1, 2]}), cudf.DataFrame({}))\n utils.assert_eq(cd_frame, pd_frame)\n\n # Empty frame on the left side\n pd_frame = pd.DataFrame({}) + pd.DataFrame({\"x\": [1, 2]})\n cd_frame = cudf.DataFrame({}) + cudf.DataFrame({\"x\": [1, 2]})\n utils.assert_eq(cd_frame, pd_frame)\n\n # Note: the below rely on a discrepancy between cudf and pandas\n # While pandas inserts columns in alphabetical order, cudf inserts in the\n # order of whichever column comes first. So the following code will not\n # work if the names of columns are reversed i.e. ('y', 'x') != ('x', 'y')\n\n # More rows on the left side\n pd_frame = pd.DataFrame({\"x\": [1, 2, 3]}) + pd.DataFrame({\"y\": [1, 2]})\n cd_frame = cudf.DataFrame({\"x\": [1, 2, 3]}) + cudf.DataFrame({\"y\": [1, 2]})\n utils.assert_eq(cd_frame, pd_frame)\n\n # More rows on the right side\n pd_frame = pd.DataFrame({\"x\": [1, 2]}) + pd.DataFrame({\"y\": [1, 2, 3]})\n cd_frame = cudf.DataFrame({\"x\": [1, 2]}) + cudf.DataFrame({\"y\": [1, 2, 3]})\n utils.assert_eq(cd_frame, pd_frame)\n\n\[email protected](\"binop\", _binops)\ndef test_different_shapes_and_same_columns(binop):\n\n # TODO: support `pow()` on NaN values. Particularly, the cases:\n # `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`\n if binop is operator.pow:\n return\n\n pd_frame = binop(\n pd.DataFrame({\"x\": [1, 2]}), pd.DataFrame({\"x\": [1, 2, 3]})\n )\n cd_frame = binop(\n cudf.DataFrame({\"x\": [1, 2]}), cudf.DataFrame({\"x\": [1, 2, 3]})\n )\n # cast x as float64 so it matches pandas dtype\n cd_frame[\"x\"] = cd_frame[\"x\"].astype(np.float64)\n utils.assert_eq(cd_frame, pd_frame)\n\n\[email protected](\"binop\", _binops)\ndef test_different_shapes_and_columns_with_unaligned_indices(binop):\n\n # TODO: support `pow()` on NaN values. Particularly, the cases:\n # `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`\n if binop is operator.pow:\n return\n\n # Test with a RangeIndex\n pdf1 = pd.DataFrame({\"x\": [4, 3, 2, 1], \"y\": [7, 3, 8, 6]})\n # Test with a GenericIndex\n pdf2 = pd.DataFrame(\n {\"x\": [1, 2, 3, 7], \"y\": [4, 5, 6, 7]}, index=[0, 1, 3, 4]\n )\n # Test with a GenericIndex in a different order\n pdf3 = pd.DataFrame(\n {\"x\": [4, 5, 6, 7], \"y\": [1, 2, 3, 7], \"z\": [0, 5, 3, 7]},\n index=[0, 3, 5, 3],\n )\n gdf1 = cudf.DataFrame.from_pandas(pdf1)\n gdf2 = cudf.DataFrame.from_pandas(pdf2)\n gdf3 = cudf.DataFrame.from_pandas(pdf3)\n\n pd_frame = binop(binop(pdf1, pdf2), pdf3)\n cd_frame = binop(binop(gdf1, gdf2), gdf3)\n # cast x and y as float64 so it matches pandas dtype\n cd_frame[\"x\"] = cd_frame[\"x\"].astype(np.float64)\n cd_frame[\"y\"] = cd_frame[\"y\"].astype(np.float64)\n utils.assert_eq(cd_frame, pd_frame)\n\n\[email protected](\n \"df2\",\n [\n cudf.DataFrame({\"a\": [3, 2, 1]}, index=[3, 2, 1]),\n cudf.DataFrame([3, 2]),\n ],\n)\[email protected](\"binop\", [operator.eq, operator.ne])\ndef test_df_different_index_shape(df2, binop):\n df1 = cudf.DataFrame([1, 2, 3], index=[1, 2, 3])\n\n pdf1 = df1.to_pandas()\n pdf2 = df2.to_pandas()\n\n utils.assert_exceptions_equal(\n lfunc=binop,\n rfunc=binop,\n lfunc_args_and_kwargs=([pdf1, pdf2],),\n rfunc_args_and_kwargs=([df1, df2],),\n )\n\n\[email protected](\"op\", [operator.eq, operator.ne])\ndef test_boolean_scalar_binop(op):\n psr = pd.Series(np.random.choice([True, False], 10))\n gsr = cudf.from_pandas(psr)\n utils.assert_eq(op(psr, True), op(gsr, True))\n utils.assert_eq(op(psr, False), op(gsr, False))\n\n # cuDF scalar\n utils.assert_eq(op(psr, True), op(gsr, cudf.Scalar(True)))\n utils.assert_eq(op(psr, False), op(gsr, cudf.Scalar(False)))\n\n\n_operators_arithmetic = [\n \"add\",\n \"radd\",\n \"sub\",\n \"rsub\",\n \"mul\",\n \"rmul\",\n \"mod\",\n \"rmod\",\n \"pow\",\n \"rpow\",\n \"floordiv\",\n \"rfloordiv\",\n \"truediv\",\n \"rtruediv\",\n]\n\n_operators_comparison = [\"eq\", \"ne\", \"lt\", \"le\", \"gt\", \"ge\"]\n\n\[email protected](\"func\", _operators_arithmetic)\[email protected](\"has_nulls\", [True, False])\[email protected](\"fill_value\", [None, 27])\[email protected](\"dtype\", [\"float32\", \"float64\"])\ndef test_operator_func_between_series(dtype, func, has_nulls, fill_value):\n count = 1000\n gdf_series_a = utils.gen_rand_series(\n dtype, count, has_nulls=has_nulls, stride=10000\n )\n gdf_series_b = utils.gen_rand_series(\n dtype, count, has_nulls=has_nulls, stride=100\n )\n pdf_series_a = gdf_series_a.to_pandas()\n pdf_series_b = gdf_series_b.to_pandas()\n\n gdf_result = getattr(gdf_series_a, func)(\n gdf_series_b, fill_value=fill_value\n )\n pdf_result = getattr(pdf_series_a, func)(\n pdf_series_b, fill_value=fill_value\n )\n\n utils.assert_eq(pdf_result, gdf_result)\n\n\[email protected](\"func\", _operators_arithmetic)\[email protected](\"has_nulls\", [True, False])\[email protected](\"fill_value\", [None, 27])\[email protected](\"dtype\", [\"float32\", \"float64\"])\[email protected](\"use_cudf_scalar\", [False, True])\ndef test_operator_func_series_and_scalar(\n dtype, func, has_nulls, fill_value, use_cudf_scalar\n):\n count = 1000\n scalar = 59\n gdf_series = utils.gen_rand_series(\n dtype, count, has_nulls=has_nulls, stride=10000\n )\n pdf_series = gdf_series.to_pandas()\n\n gdf_series_result = getattr(gdf_series, func)(\n cudf.Scalar(scalar) if use_cudf_scalar else scalar,\n fill_value=fill_value,\n )\n pdf_series_result = getattr(pdf_series, func)(\n scalar, fill_value=fill_value\n )\n\n utils.assert_eq(pdf_series_result, gdf_series_result)\n\n\n_permu_values = [0, 1, None, np.nan]\n\n\[email protected](\"fill_value\", _permu_values)\[email protected](\"scalar_a\", _permu_values)\[email protected](\"scalar_b\", _permu_values)\[email protected](\"func\", _operators_comparison)\[email protected](\"dtype\", [\"float32\", \"float64\"])\ndef test_operator_func_between_series_logical(\n dtype, func, scalar_a, scalar_b, fill_value\n):\n\n gdf_series_a = Series([scalar_a], nan_as_null=False).astype(dtype)\n gdf_series_b = Series([scalar_b], nan_as_null=False).astype(dtype)\n\n pdf_series_a = gdf_series_a.to_pandas(nullable=True)\n pdf_series_b = gdf_series_b.to_pandas(nullable=True)\n\n gdf_series_result = getattr(gdf_series_a, func)(\n gdf_series_b, fill_value=fill_value\n )\n pdf_series_result = getattr(pdf_series_a, func)(\n pdf_series_b, fill_value=fill_value\n )\n expect = pdf_series_result\n got = gdf_series_result.to_pandas(nullable=True)\n\n # If fill_value is np.nan, things break down a bit,\n # because setting a NaN into a pandas nullable float\n # array still gets transformed to <NA>. As such,\n # pd_series_with_nulls.fillna(np.nan) has no effect.\n if (\n (pdf_series_a.isnull().sum() != pdf_series_b.isnull().sum())\n and np.isscalar(fill_value)\n and np.isnan(fill_value)\n ):\n with pytest.raises(AssertionError):\n utils.assert_eq(expect, got)\n return\n utils.assert_eq(expect, got)\n\n\[email protected](\"dtype\", [\"float32\", \"float64\"])\[email protected](\"func\", _operators_comparison)\[email protected](\"has_nulls\", [True, False])\[email protected](\"scalar\", [-59.0, np.nan, 0, 59.0])\[email protected](\"fill_value\", [None, True, False, 1.0])\[email protected](\"use_cudf_scalar\", [False, True])\ndef test_operator_func_series_and_scalar_logical(\n dtype, func, has_nulls, scalar, fill_value, use_cudf_scalar\n):\n gdf_series = utils.gen_rand_series(\n dtype, 1000, has_nulls=has_nulls, stride=10000\n )\n pdf_series = gdf_series.to_pandas(nullable=True)\n gdf_series_result = getattr(gdf_series, func)(\n cudf.Scalar(scalar) if use_cudf_scalar else scalar,\n fill_value=fill_value,\n )\n pdf_series_result = getattr(pdf_series, func)(\n scalar, fill_value=fill_value\n )\n\n expect = pdf_series_result\n got = gdf_series_result.to_pandas(nullable=True)\n\n utils.assert_eq(expect, got)\n\n\[email protected](\"func\", _operators_arithmetic)\[email protected](\"nulls\", _nulls)\[email protected](\"fill_value\", [None, 27])\[email protected](\"other\", [\"df\", \"scalar\"])\ndef test_operator_func_dataframe(func, nulls, fill_value, other):\n num_rows = 100\n num_cols = 3\n\n def gen_df():\n pdf = pd.DataFrame()\n from string import ascii_lowercase\n\n cols = np.random.choice(num_cols + 5, num_cols, replace=False)\n\n for i in range(num_cols):\n colname = ascii_lowercase[cols[i]]\n data = utils.gen_rand(\"float64\", num_rows) * 10000\n if nulls == \"some\":\n idx = np.random.choice(\n num_rows, size=int(num_rows / 2), replace=False\n )\n data[idx] = np.nan\n pdf[colname] = data\n return pdf\n\n pdf1 = gen_df()\n pdf2 = gen_df() if other == \"df\" else 59.0\n gdf1 = cudf.DataFrame.from_pandas(pdf1)\n gdf2 = cudf.DataFrame.from_pandas(pdf2) if other == \"df\" else 59.0\n\n got = getattr(gdf1, func)(gdf2, fill_value=fill_value)\n expect = getattr(pdf1, func)(pdf2, fill_value=fill_value)[list(got._data)]\n\n utils.assert_eq(expect, got)\n\n\[email protected](\"func\", _operators_arithmetic + _operators_comparison)\[email protected](\"rhs\", [0, 1, 2, 128])\ndef test_binop_bool_uint(func, rhs):\n # TODO: remove this once issue #2172 is resolved\n if func == \"rmod\" or func == \"rfloordiv\":\n return\n psr = pd.Series([True, False, False])\n gsr = cudf.from_pandas(psr)\n utils.assert_eq(\n getattr(psr, func)(rhs), getattr(gsr, func)(rhs), check_dtype=False\n )\n\n\ndef test_series_misc_binop():\n pds = pd.Series([1, 2, 4], name=\"abc xyz\")\n gds = cudf.Series([1, 2, 4], name=\"abc xyz\")\n\n utils.assert_eq(pds + 1, gds + 1)\n utils.assert_eq(1 + pds, 1 + gds)\n\n utils.assert_eq(pds + pds, gds + gds)\n\n pds1 = pd.Series([1, 2, 4], name=\"hello world\")\n gds1 = cudf.Series([1, 2, 4], name=\"hello world\")\n\n utils.assert_eq(pds + pds1, gds + gds1)\n utils.assert_eq(pds1 + pds, gds1 + gds)\n\n utils.assert_eq(pds1 + pds + 5, gds1 + gds + 5)\n\n\ndef test_int8_float16_binop():\n a = cudf.Series([1], dtype=\"int8\")\n b = np.float16(2)\n expect = cudf.Series([0.5])\n got = a / b\n utils.assert_eq(expect, got, check_dtype=False)\n\n\[email protected](\"dtype\", [\"int64\", \"float64\", \"str\"])\ndef test_vector_to_none_binops(dtype):\n data = Series([1, 2, 3, None], dtype=dtype)\n\n expect = Series([None] * 4).astype(dtype)\n got = data + None\n\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"lhs\",\n [\n 1,\n 3,\n 4,\n pd.Series([5, 6, 2]),\n pd.Series([0, 10, 20, 30, 3, 4, 5, 6, 2]),\n 6,\n ],\n)\[email protected](\"rhs\", [1, 3, 4, pd.Series([5, 6, 2])])\[email protected](\n \"ops\",\n [\n (np.remainder, cudf.remainder),\n (np.floor_divide, cudf.floor_divide),\n (np.subtract, cudf.subtract),\n (np.add, cudf.add),\n (np.true_divide, cudf.true_divide),\n (np.multiply, cudf.multiply),\n ],\n)\ndef test_ufunc_ops(lhs, rhs, ops):\n np_op, cu_op = ops\n\n if isinstance(lhs, pd.Series):\n culhs = cudf.from_pandas(lhs)\n else:\n culhs = lhs\n\n if isinstance(rhs, pd.Series):\n curhs = cudf.from_pandas(rhs)\n else:\n curhs = rhs\n\n expect = np_op(lhs, rhs)\n got = cu_op(culhs, curhs)\n if np.isscalar(expect):\n assert got == expect\n else:\n utils.assert_eq(\n expect, got,\n )\n\n\ndef dtype_scalar(val, dtype):\n if dtype == \"str\":\n return str(val)\n dtype = np.dtype(dtype)\n if dtype.type in {np.datetime64, np.timedelta64}:\n res, _ = np.datetime_data(dtype)\n return dtype.type(val, res)\n else:\n return dtype.type(val)\n\n\ndef make_valid_scalar_add_data():\n valid = set()\n\n # to any int, we may add any kind of\n # other int, float, datetime timedelta, or bool\n valid |= set(\n product(\n INTEGER_TYPES,\n FLOAT_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # to any float, we may add any int, float, or bool\n valid |= set(\n product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)\n )\n\n # to any datetime, we may add any int, timedelta, or bool\n valid |= set(\n product(DATETIME_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES | BOOL_TYPES)\n )\n\n # to any timedelta, we may add any int, datetime, other timedelta, or bool\n valid |= set(\n product(TIMEDELTA_TYPES, INTEGER_TYPES | DATETIME_TYPES | BOOL_TYPES)\n )\n\n # to any bool, we may add any int, float, datetime, timedelta, or bool\n valid |= set(\n product(\n BOOL_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # to any string, we may add any other string\n valid |= {(\"str\", \"str\")}\n\n return sorted(list(valid))\n\n\ndef make_invalid_scalar_add_data():\n invalid = set()\n\n # we can not add a datetime to a float\n invalid |= set(product(FLOAT_TYPES, DATETIME_TYPES))\n\n # We can not add a timedelta to a float\n invalid |= set(product(FLOAT_TYPES, TIMEDELTA_TYPES))\n\n # we can not add a float to any datetime\n invalid |= set(product(DATETIME_TYPES, FLOAT_TYPES))\n\n # can can not add a datetime to a datetime\n invalid |= set(product(DATETIME_TYPES, DATETIME_TYPES))\n\n # can not add a timedelta to a float\n invalid |= set(product(FLOAT_TYPES, TIMEDELTA_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_valid_scalar_add_data())\ndef test_scalar_add(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n # expect = np.add(lval_host, rval_host)\n expect = lval_host + rval_host\n got = lval_gpu + rval_gpu\n\n assert expect == got.value\n if not dtype_l == dtype_r == \"str\":\n assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_invalid_scalar_add_data())\ndef test_scalar_add_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu + rval_gpu\n\n\ndef make_scalar_difference_data():\n valid = set()\n\n # from an int, we may subtract any int, float, timedelta,\n # or boolean value\n valid |= set(\n product(\n INTEGER_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # from any float, we may subtract any int, float, or bool\n valid |= set(\n product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)\n )\n\n # from any datetime we may subtract any int, datetime, timedelta, or bool\n valid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # from any timedelta we may subtract any int, timedelta, or bool\n valid |= set(\n product(TIMEDELTA_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES | BOOL_TYPES)\n )\n\n # from any bool we may subtract any int, float or timedelta\n valid |= set(\n product(BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES)\n )\n\n return sorted(list(valid))\n\n\ndef make_scalar_difference_data_invalid():\n invalid = set()\n\n # we can't subtract a datetime from an int\n invalid |= set(product(INTEGER_TYPES, DATETIME_TYPES))\n\n # we can't subtract a datetime or timedelta from a float\n invalid |= set(product(FLOAT_TYPES, DATETIME_TYPES | TIMEDELTA_TYPES))\n\n # we can't subtract a float from a datetime or timedelta\n invalid |= set(product(DATETIME_TYPES | TIMEDELTA_TYPES, FLOAT_TYPES))\n\n # We can't subtract a datetime from a timedelta\n invalid |= set(product(TIMEDELTA_TYPES, DATETIME_TYPES))\n\n # we can't subtract a datetime or bool from a bool\n invalid |= set(product(BOOL_TYPES, BOOL_TYPES | DATETIME_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_difference_data())\ndef test_scalar_difference(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host - rval_host\n got = lval_gpu - rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\n \"dtype_l,dtype_r\", make_scalar_difference_data_invalid()\n)\ndef test_scalar_difference_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu - rval_gpu\n\n\ndef make_scalar_product_data():\n valid = set()\n\n # we can multiply an int, or bool by any int, float, timedelta, or bool\n valid |= set(\n product(\n INTEGER_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # we can muliply any timedelta by any int, or bool\n valid |= set(product(TIMEDELTA_TYPES, INTEGER_TYPES | BOOL_TYPES))\n\n # we can multiply a float by any int, float, or bool\n valid |= set(\n product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)\n )\n\n return sorted(list(valid))\n\n\ndef make_scalar_product_data_invalid():\n invalid = set()\n\n # can't multiply a ints, floats, datetimes, timedeltas,\n # or bools by datetimes\n invalid |= set(\n product(\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n DATETIME_TYPES,\n )\n )\n\n # can't multiply datetimes with anything really\n invalid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # can't multiply timedeltas by timedeltas\n invalid |= set(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_product_data())\ndef test_scalar_product(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host * rval_host\n got = lval_gpu * rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_product_data_invalid())\ndef test_scalar_product_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu * rval_gpu\n\n\ndef make_scalar_floordiv_data():\n valid = set()\n\n # we can divide ints and floats by other ints, floats, or bools\n valid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # we can divide timedeltas by ints, floats or other timedeltas\n valid |= set(\n product(TIMEDELTA_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES)\n )\n\n # we can divide bools by ints, floats or bools\n valid |= set(product(BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES))\n\n return sorted(list(valid))\n\n\ndef make_scalar_floordiv_data_invalid():\n invalid = set()\n\n # we can't numeric types into datelike types\n invalid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # we can't divide datetime types into anything\n invalid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # we can't divide timedeltas into bools, or datetimes\n invalid |= set(product(TIMEDELTA_TYPES, BOOL_TYPES | DATETIME_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_floordiv_data())\ndef test_scalar_floordiv(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host // rval_host\n got = lval_gpu // rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\n \"dtype_l,dtype_r\", make_scalar_floordiv_data_invalid()\n)\ndef test_scalar_floordiv_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu // rval_gpu\n\n\ndef make_scalar_truediv_data():\n valid = set()\n\n # we can true divide ints, floats, or bools by other\n # ints, floats or bools\n valid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # we can true divide timedeltas by ints floats or timedeltas\n valid |= set(product(TIMEDELTA_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES))\n\n return sorted(list(valid))\n\n\ndef make_scalar_truediv_data_invalid():\n invalid = set()\n\n # we can't divide ints, floats or bools by datetimes\n # or timedeltas\n invalid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # we cant true divide datetime types by anything\n invalid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # we cant true divide timedeltas by datetimes or bools or floats\n invalid |= set(\n product(TIMEDELTA_TYPES, DATETIME_TYPES | BOOL_TYPES | FLOAT_TYPES)\n )\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_truediv_data())\ndef test_scalar_truediv(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = np.true_divide(lval_host, rval_host)\n got = lval_gpu / rval_gpu\n\n assert expect == got.value\n\n # numpy bug\n\n if np.dtype(dtype_l).itemsize <= 2 and np.dtype(dtype_r).itemsize <= 2:\n assert expect.dtype == \"float64\" and got.dtype == \"float32\"\n else:\n assert expect.dtype == got.dtype\n # assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_truediv_data_invalid())\ndef test_scalar_truediv_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu / rval_gpu\n\n\ndef make_scalar_remainder_data():\n valid = set()\n\n # can mod numeric types with each other\n valid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # can mod timedeltas by other timedeltas\n valid |= set(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))\n\n return sorted(list(valid))\n\n\ndef make_scalar_remainder_data_invalid():\n invalid = set()\n\n # numeric types cant be modded against timedeltas\n # or datetimes. Also, datetimes can't be modded\n # against datetimes or timedeltas\n invalid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES | DATETIME_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # datetime and timedelta types cant be modded against\n # any numeric types\n invalid |= set(\n product(\n DATETIME_TYPES | TIMEDELTA_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # timedeltas cant mod with datetimes\n invalid |= set(product(TIMEDELTA_TYPES, DATETIME_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_remainder_data())\ndef test_scalar_remainder(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host % rval_host\n got = lval_gpu % rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\n \"dtype_l,dtype_r\", make_scalar_remainder_data_invalid()\n)\ndef test_scalar_remainder_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu % rval_gpu\n\n\ndef make_scalar_power_data():\n # only numeric values form valid operands for power\n return sorted(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n\ndef make_scalar_power_data_invalid():\n invalid = set()\n\n # datetimes and timedeltas cant go in exponents\n invalid |= set(\n product(\n INTEGER_TYPES\n | FLOAT_TYPES\n | TIMEDELTA_TYPES\n | DATETIME_TYPES\n | BOOL_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # datetimes and timedeltas may not be raised to\n # any exponent of any dtype\n invalid |= set(\n product(\n DATETIME_TYPES | TIMEDELTA_TYPES,\n DATETIME_TYPES\n | TIMEDELTA_TYPES\n | INTEGER_TYPES\n | FLOAT_TYPES\n | BOOL_TYPES,\n )\n )\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_power_data())\ndef test_scalar_power(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host ** rval_host\n got = lval_gpu ** rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_power_data_invalid())\ndef test_scalar_power_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu ** rval_gpu\n\n\[email protected](\n \"date_col\",\n [\n [\n \"2000-01-01 00:00:00.012345678\",\n \"2000-01-31 00:00:00.012345678\",\n \"2000-02-29 00:00:00.012345678\",\n ]\n ],\n)\[email protected](\"n_periods\", [0, 1, -1, 12, -12])\[email protected](\n \"frequency\",\n [\n \"months\",\n \"years\",\n \"days\",\n \"hours\",\n \"minutes\",\n \"seconds\",\n \"microseconds\",\n pytest.param(\n \"nanoseconds\",\n marks=pytest.mark.xfail(\n reason=\"https://github.com/pandas-dev/pandas/issues/36589\"\n ),\n ),\n ],\n)\[email protected](\n \"dtype\",\n [\"datetime64[ns]\", \"datetime64[us]\", \"datetime64[ms]\", \"datetime64[s]\"],\n)\[email protected](\"op\", [operator.add, operator.sub])\ndef test_datetime_dateoffset_binaryop(\n date_col, n_periods, frequency, dtype, op\n):\n gsr = cudf.Series(date_col, dtype=dtype)\n psr = gsr.to_pandas() # converts to nanos\n\n kwargs = {frequency: n_periods}\n\n goffset = cudf.DateOffset(**kwargs)\n poffset = pd.DateOffset(**kwargs)\n\n expect = op(psr, poffset)\n got = op(gsr, goffset)\n\n utils.assert_eq(expect, got)\n\n expect = op(psr, -poffset)\n got = op(gsr, -goffset)\n\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"date_col\",\n [\n [\n \"2000-01-01 00:00:00.012345678\",\n \"2000-01-31 00:00:00.012345678\",\n \"2000-02-29 00:00:00.012345678\",\n ]\n ],\n)\[email protected](\n \"kwargs\",\n [\n {\"months\": 2, \"years\": 5},\n {\"microseconds\": 1, \"seconds\": 1},\n {\"months\": 2, \"years\": 5, \"seconds\": 923, \"microseconds\": 481},\n pytest.param(\n {\"milliseconds\": 4},\n marks=pytest.mark.xfail(\n reason=\"Pandas gets the wrong answer for milliseconds\"\n ),\n ),\n pytest.param(\n {\"milliseconds\": 4, \"years\": 2},\n marks=pytest.mark.xfail(\n reason=\"Pandas construction fails with these keywords\"\n ),\n ),\n pytest.param(\n {\"nanoseconds\": 12},\n marks=pytest.mark.xfail(\n reason=\"Pandas gets the wrong answer for nanoseconds\"\n ),\n ),\n ],\n)\[email protected](\"op\", [operator.add, operator.sub])\ndef test_datetime_dateoffset_binaryop_multiple(date_col, kwargs, op):\n\n gsr = cudf.Series(date_col, dtype=\"datetime64[ns]\")\n psr = gsr.to_pandas()\n\n poffset = pd.DateOffset(**kwargs)\n goffset = cudf.DateOffset(**kwargs)\n\n expect = op(psr, poffset)\n got = op(gsr, goffset)\n\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"date_col\",\n [\n [\n \"2000-01-01 00:00:00.012345678\",\n \"2000-01-31 00:00:00.012345678\",\n \"2000-02-29 00:00:00.012345678\",\n ]\n ],\n)\[email protected](\"n_periods\", [0, 1, -1, 12, -12])\[email protected](\n \"frequency\",\n [\n \"months\",\n \"years\",\n \"days\",\n \"hours\",\n \"minutes\",\n \"seconds\",\n \"microseconds\",\n pytest.param(\n \"nanoseconds\",\n marks=pytest.mark.xfail(\n reason=\"https://github.com/pandas-dev/pandas/issues/36589\"\n ),\n ),\n ],\n)\[email protected](\n \"dtype\",\n [\"datetime64[ns]\", \"datetime64[us]\", \"datetime64[ms]\", \"datetime64[s]\"],\n)\ndef test_datetime_dateoffset_binaryop_reflected(\n date_col, n_periods, frequency, dtype\n):\n gsr = cudf.Series(date_col, dtype=dtype)\n psr = gsr.to_pandas() # converts to nanos\n\n kwargs = {frequency: n_periods}\n\n goffset = cudf.DateOffset(**kwargs)\n poffset = pd.DateOffset(**kwargs)\n\n expect = poffset + psr\n got = goffset + gsr\n\n utils.assert_eq(expect, got)\n\n with pytest.raises(TypeError):\n poffset - psr\n\n with pytest.raises(TypeError):\n goffset - gsr\n\n\[email protected](\"frame\", [cudf.Series, cudf.Index, cudf.DataFrame])\[email protected](\n \"dtype\", [\"int\", \"str\", \"datetime64[s]\", \"timedelta64[s]\", \"category\"]\n)\ndef test_binops_with_lhs_numpy_scalar(frame, dtype):\n data = [1, 2, 3, 4, 5]\n\n data = (\n frame({\"a\": data}, dtype=dtype)\n if isinstance(frame, cudf.DataFrame)\n else frame(data, dtype=dtype)\n )\n\n if dtype == \"datetime64[s]\":\n val = np.dtype(dtype).type(4, \"s\")\n elif dtype == \"timedelta64[s]\":\n val = np.dtype(dtype).type(4, \"s\")\n elif dtype == \"category\":\n val = np.int64(4)\n else:\n val = np.dtype(dtype).type(4)\n\n expected = val == data.to_pandas()\n got = val == data\n\n # In case of index, expected would be a numpy array\n if isinstance(data, cudf.BaseIndex):\n expected = pd.Index(expected)\n\n utils.assert_eq(expected, got)\n\n\[email protected](\n \"dtype\",\n [\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"uint8\",\n \"uint16\",\n \"uint32\",\n \"uint64\",\n \"float32\",\n \"float64\",\n \"datetime64[ns]\",\n \"datetime64[us]\",\n \"datetime64[ms]\",\n \"datetime64[s]\",\n \"timedelta64[ns]\",\n \"timedelta64[us]\",\n \"timedelta64[ms]\",\n \"timedelta64[s]\",\n ],\n)\[email protected](\"op\", _operators_comparison)\ndef test_binops_with_NA_consistent(dtype, op):\n data = [1, 2, 3]\n sr = cudf.Series(data, dtype=dtype)\n\n result = getattr(sr, op)(cudf.NA)\n if dtype in NUMERIC_TYPES:\n if op == \"ne\":\n expect_all = True\n else:\n expect_all = False\n assert (result == expect_all).all()\n elif dtype in DATETIME_TYPES & TIMEDELTA_TYPES:\n assert result._column.null_count == len(data)\n\n\[email protected](\n \"args\",\n [\n (\n operator.add,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"3.0\", \"4.0\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n ),\n (\n operator.add,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"3.75\", \"3.005\"],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=17),\n [\"0.1\", \"0.2\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"100.1\", \"200.2\"],\n cudf.Decimal64Dtype(scale=3, precision=18),\n ),\n (\n operator.sub,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", \"0.995\"],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", \"0.995\"],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=10),\n [\"0.1\", \"0.2\"],\n cudf.Decimal64Dtype(scale=6, precision=10),\n [\"99.9\", \"199.8\"],\n cudf.Decimal64Dtype(scale=6, precision=18),\n ),\n (\n operator.mul,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", \"3.0\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"2.25\", \"6.0\"],\n cudf.Decimal64Dtype(scale=5, precision=7),\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"0.1\", \"0.2\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"10.0\", \"40.0\"],\n cudf.Decimal64Dtype(scale=1, precision=8),\n ),\n (\n operator.mul,\n [\"1000\", \"2000\"],\n cudf.Decimal64Dtype(scale=-3, precision=4),\n [\"0.343\", \"0.500\"],\n cudf.Decimal64Dtype(scale=3, precision=3),\n [\"343.0\", \"1000.0\"],\n cudf.Decimal64Dtype(scale=0, precision=8),\n ),\n (\n operator.truediv,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=4),\n [\"1.5\", \"3.0\"],\n cudf.Decimal64Dtype(scale=1, precision=4),\n [\"1.0\", \"0.6\"],\n cudf.Decimal64Dtype(scale=1, precision=9),\n ),\n (\n operator.truediv,\n [\"110\", \"200\"],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [\"0.1\", \"0.2\"],\n cudf.Decimal64Dtype(scale=2, precision=4),\n [\"1000.0\", \"1000.0\"],\n cudf.Decimal64Dtype(scale=-3, precision=8),\n ),\n (\n operator.truediv,\n [\"132.86\", \"15.25\"],\n cudf.Decimal64Dtype(scale=4, precision=14),\n [\"2.34\", \"8.50\"],\n cudf.Decimal64Dtype(scale=2, precision=8),\n [\"56.77\", \"1.79\"],\n cudf.Decimal64Dtype(scale=2, precision=18),\n ),\n (\n operator.add,\n [\"1.5\", None, \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", None, \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"3.0\", None, \"4.0\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n ),\n (\n operator.add,\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"3.75\", None],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", None],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", None],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", None],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", None],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.mul,\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"2.25\", None],\n cudf.Decimal64Dtype(scale=5, precision=7),\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=10),\n [\"0.1\", None],\n cudf.Decimal64Dtype(scale=3, precision=12),\n [\"10.0\", None],\n cudf.Decimal64Dtype(scale=1, precision=18),\n ),\n (\n operator.eq,\n [\"0.18\", \"0.42\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.18\", \"0.21\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [True, False],\n bool,\n ),\n (\n operator.eq,\n [\"0.18\", \"0.42\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1800\", \"0.2100\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [True, False],\n bool,\n ),\n (\n operator.eq,\n [\"100\", None],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [True, None],\n bool,\n ),\n (\n operator.ne,\n [\"0.06\", \"0.42\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.18\", \"0.42\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [True, False],\n bool,\n ),\n (\n operator.ne,\n [\"1.33\", \"1.21\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1899\", \"1.21\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [True, False],\n bool,\n ),\n (\n operator.ne,\n [\"300\", None],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"110\", \"5500\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [True, None],\n bool,\n ),\n (\n operator.lt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [False, True, False],\n bool,\n ),\n (\n operator.lt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [False, True, False],\n bool,\n ),\n (\n operator.lt,\n [\"200\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [False, None, False],\n bool,\n ),\n (\n operator.gt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [True, False, False],\n bool,\n ),\n (\n operator.gt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [True, False, False],\n bool,\n ),\n (\n operator.gt,\n [\"300\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [True, None, False],\n bool,\n ),\n (\n operator.le,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [False, True, True],\n bool,\n ),\n (\n operator.le,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [False, True, True],\n bool,\n ),\n (\n operator.le,\n [\"300\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [False, None, True],\n bool,\n ),\n (\n operator.ge,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [True, False, True],\n bool,\n ),\n (\n operator.ge,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [True, False, True],\n bool,\n ),\n (\n operator.ge,\n [\"300\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [True, None, True],\n bool,\n ),\n ],\n)\ndef test_binops_decimal(args):\n op, lhs, l_dtype, rhs, r_dtype, expect, expect_dtype = args\n\n a = utils._decimal_series(lhs, l_dtype)\n b = utils._decimal_series(rhs, r_dtype)\n expect = (\n utils._decimal_series(expect, expect_dtype)\n if isinstance(expect_dtype, cudf.Decimal64Dtype)\n else cudf.Series(expect, dtype=expect_dtype)\n )\n\n got = op(a, b)\n assert expect.dtype == got.dtype\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"args\",\n [\n (\n operator.eq,\n [\"100\", \"41\", None],\n cudf.Decimal64Dtype(scale=0, precision=5),\n [100, 42, 12],\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100.000\", \"42.001\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 12],\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100\", \"40\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 12],\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.ne,\n [\"100\", \"42\", \"24\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 40, 24, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.ne,\n [\"10.1\", \"88\", \"11\", None],\n cudf.Decimal64Dtype(scale=1, precision=3),\n [10, 42, 11, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.ne,\n [\"100.000\", \"42\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100\", \"40\", \"28\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 42, 24, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100\", \"42\", \"20\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 40, 24, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100\", \"40\", \"28\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 42, 24, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100\", \"42\", \"20\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 40, 24, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n ],\n)\[email protected](\"integer_dtype\", utils.INTEGER_TYPES)\[email protected](\"reflected\", [True, False])\ndef test_binops_decimal_comp_mixed_integer(args, integer_dtype, reflected):\n \"\"\"\n Tested compare operations:\n eq, lt, gt, le, ge\n Each operation has 3 decimal data setups, with scale from {==0, >0, <0}.\n Decimal precisions are sufficient to hold the digits.\n For each decimal data setup, there is at least one row that lead to one\n of the following compare results: {True, False, None}.\n \"\"\"\n if not reflected:\n op, ldata, ldtype, rdata, expected, _ = args\n else:\n op, ldata, ldtype, rdata, _, expected = args\n\n lhs = utils._decimal_series(ldata, ldtype)\n rhs = cudf.Series(rdata, dtype=integer_dtype)\n\n if reflected:\n rhs, lhs = lhs, rhs\n\n actual = op(lhs, rhs)\n\n utils.assert_eq(expected, actual)\n\n\[email protected](\n \"args\",\n [\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(1),\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(1),\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"200\", \"400\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"200\", \"400\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n True,\n ),\n (\n operator.truediv,\n [\"1000\", \"2000\"],\n cudf.Decimal64Dtype(scale=-2, precision=4),\n 1,\n [\"1000\", \"2000\"],\n cudf.Decimal64Dtype(scale=-2, precision=6),\n False,\n ),\n (\n operator.truediv,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=2, precision=4),\n decimal.Decimal(2),\n [\"50\", \"100\"],\n cudf.Decimal64Dtype(scale=2, precision=6),\n False,\n ),\n (\n operator.truediv,\n [\"35.23\", \"54.91\"],\n cudf.Decimal64Dtype(scale=2, precision=4),\n decimal.Decimal(\"1.5\"),\n [\"23.4\", \"36.6\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.truediv,\n [\"22.2\", \"93.6\"],\n cudf.Decimal64Dtype(scale=1, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"14\", \"62\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.truediv,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n 1,\n [\"0\", \"0\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n True,\n ),\n (\n operator.truediv,\n [\"1.2\", \"0.5\"],\n cudf.Decimal64Dtype(scale=1, precision=6),\n decimal.Decimal(20),\n [\"10\", \"40\"],\n cudf.Decimal64Dtype(scale=-1, precision=9),\n True,\n ),\n (\n operator.truediv,\n [\"1.22\", \"5.24\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n decimal.Decimal(\"8.55\"),\n [\"7\", \"1\"],\n cudf.Decimal64Dtype(scale=0, precision=7),\n True,\n ),\n (\n operator.truediv,\n [\"1.1\", \"42.8\"],\n cudf.Decimal64Dtype(scale=1, precision=3),\n cudf.Scalar(decimal.Decimal(\"90.84\")),\n [\"82.5\", \"2.1\"],\n cudf.Decimal64Dtype(scale=1, precision=8),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"98\", \"198\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"2.5\"),\n [\"97.5\", \"197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 4,\n [\"96\", \"196\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"2.5\")),\n [\"97.5\", \"197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"-98\", \"-198\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 4,\n [\"-96\", \"-196\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"2.5\"),\n [\"-97.5\", \"-197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"2.5\")),\n [\"-97.5\", \"-197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n ],\n)\ndef test_binops_decimal_scalar(args):\n op, lhs, l_dtype, rhs, expect, expect_dtype, reflect = args\n\n def decimal_series(input, dtype):\n return cudf.Series(\n [x if x is None else decimal.Decimal(x) for x in input],\n dtype=dtype,\n )\n\n lhs = decimal_series(lhs, l_dtype)\n expect = decimal_series(expect, expect_dtype)\n\n if reflect:\n lhs, rhs = rhs, lhs\n\n got = op(lhs, rhs)\n assert expect.dtype == got.dtype\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"args\",\n [\n (\n operator.eq,\n [\"100.00\", \"41\", None],\n cudf.Decimal64Dtype(scale=0, precision=5),\n 100,\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100.123\", \"41\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100.123\", \"41\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.ne,\n [\"100.00\", \"41\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([False, True, None], dtype=bool),\n cudf.Series([False, True, None], dtype=bool),\n ),\n (\n operator.ne,\n [\"100.123\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([False, True, None], dtype=bool),\n cudf.Series([False, True, None], dtype=bool),\n ),\n (\n operator.ne,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([False, True, True, None], dtype=bool),\n cudf.Series([False, True, True, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n ],\n)\[email protected](\"reflected\", [True, False])\ndef test_binops_decimal_scalar_compare(args, reflected):\n \"\"\"\n Tested compare operations:\n eq, lt, gt, le, ge\n Each operation has 3 data setups: pyints, Decimal, and\n decimal cudf.Scalar\n For each data setup, there is at least one row that lead to one of the\n following compare results: {True, False, None}.\n \"\"\"\n if not reflected:\n op, ldata, ldtype, rdata, expected, _ = args\n else:\n op, ldata, ldtype, rdata, _, expected = args\n\n lhs = utils._decimal_series(ldata, ldtype)\n rhs = rdata\n\n if reflected:\n rhs, lhs = lhs, rhs\n\n actual = op(lhs, rhs)\n\n utils.assert_eq(expected, actual)\n\n\[email protected](\n \"dtype\",\n [\n \"uint8\",\n \"uint16\",\n \"uint32\",\n \"uint64\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n \"str\",\n \"datetime64[ns]\",\n \"datetime64[us]\",\n \"datetime64[ms]\",\n \"datetime64[s]\",\n \"timedelta64[ns]\",\n \"timedelta64[us]\",\n \"timedelta64[ms]\",\n \"timedelta64[s]\",\n ],\n)\[email protected](\"null_scalar\", [None, cudf.NA, np.datetime64(\"NaT\")])\[email protected](\"cmpop\", _cmpops)\ndef test_column_null_scalar_comparison(dtype, null_scalar, cmpop):\n # This test is meant to validate that comparing\n # a series of any dtype with a null scalar produces\n # a new series where all the elements are <NA>.\n\n if isinstance(null_scalar, np.datetime64):\n if np.dtype(dtype).kind not in \"mM\":\n pytest.skip()\n null_scalar = null_scalar.astype(dtype)\n\n dtype = np.dtype(dtype)\n\n data = [1, 2, 3, 4, 5]\n sr = cudf.Series(data, dtype=dtype)\n result = cmpop(sr, null_scalar)\n\n assert result.isnull().all()\n\n\[email protected](\"fn\", [\"eq\", \"ne\", \"lt\", \"gt\", \"le\", \"ge\"])\ndef test_equality_ops_index_mismatch(fn):\n a = cudf.Series(\n [1, 2, 3, None, None, 4], index=[\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]\n )\n b = cudf.Series(\n [-5, 4, 3, 2, 1, 0, 19, 11],\n index=[\"aa\", \"b\", \"c\", \"d\", \"e\", \"f\", \"y\", \"z\"],\n )\n\n pa = a.to_pandas(nullable=True)\n pb = b.to_pandas(nullable=True)\n expected = getattr(pa, fn)(pb)\n actual = getattr(a, fn)(b).to_pandas(nullable=True)\n\n utils.assert_eq(expected, actual)\n\n\ndef generate_test_null_equals_columnops_data():\n # Generate tuples of:\n # (left_data, right_data, compare_bool\n # where compare_bool is the correct answer to\n # if the columns should compare as null equals\n\n def set_null_cases(column_l, column_r, case):\n if case == \"neither\":\n return column_l, column_r\n elif case == \"left\":\n column_l[1] = None\n elif case == \"right\":\n column_r[1] = None\n elif case == \"both\":\n column_l[1] = None\n column_r[1] = None\n else:\n raise ValueError(\"Unknown null case\")\n return column_l, column_r\n\n null_cases = [\"neither\", \"left\", \"right\", \"both\"]\n data = [1, 2, 3]\n\n results = []\n # TODO: Numeric types can be cross compared as null equal\n for dtype in (\n list(NUMERIC_TYPES)\n + list(DATETIME_TYPES)\n + list(TIMEDELTA_TYPES)\n + list(STRING_TYPES)\n + [\"category\"]\n ):\n for case in null_cases:\n left = cudf.Series(data, dtype=dtype)\n right = cudf.Series(data, dtype=dtype)\n if case in {\"left\", \"right\"}:\n answer = False\n else:\n answer = True\n left, right = set_null_cases(left, right, case)\n results.append((left._column, right._column, answer, case))\n\n return results\n\n\[email protected](\n \"lcol,rcol,ans,case\", generate_test_null_equals_columnops_data()\n)\ndef test_null_equals_columnops(lcol, rcol, ans, case):\n assert lcol._null_equals(rcol).all() == ans\n" ]
[ [ "numpy.true_divide", "numpy.datetime_data", "numpy.random.random", "pandas.Series", "numpy.random.seed", "pandas.DateOffset", "numpy.random.choice", "numpy.isnan", "numpy.float16", "pandas.Index", "pandas.DataFrame", "numpy.dtype", "numpy.testing.assert_array_equal", "numpy.datetime64", "numpy.int64", "numpy.isscalar", "numpy.logical_and", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.19", "0.24", "0.20", "1.0", "0.25" ], "scipy": [], "tensorflow": [] } ]
Ina299/prompt2slip
[ "b35489ff4fc4f5d724cfb74c75e5e128da553c70" ]
[ "prompt2slip/loss_transformers.py" ]
[ "from typing import Optional, Dict, Any, Union\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch import Tensor\nfrom torchtyping import TensorType\nimport transformers\n\nfrom .utils import make_forbit_indicates\n\nKeyValBatchType = Union[transformers.BatchEncoding, Dict[str, Tensor]]\n\n\nclass CosSimLoss(nn.Module):\n def __init__(\n self, original_output: Optional[transformers.file_utils.ModelOutput]\n ) -> None:\n super(CosSimLoss, self).__init__()\n original_hidden: Optional(TensorType[\"batch\", \"length\", \"dim_hidden\"]) = None\n if original_output is not None:\n self.original_hidden = original_output.hidden_states[0]\n\n def set_original(self, original_output: transformers.file_utils.ModelOutput):\n self.original_hidden = original_output.hidden_states[0]\n\n def forward(self, hf_output: transformers.file_utils.ModelOutput):\n if isinstance(self.original_hidden, torch.Tensor):\n expanded_bsize = hf_output.hidden_states[0].size(0)\n n_repeat = expanded_bsize // self.original_hidden.size(0)\n return (\n 1\n - F.cosine_similarity(\n self.original_hidden.repeat(n_repeat, 1, 1),\n hf_output.hidden_states[0],\n dim=2,\n )\n ).max()\n else:\n raise AttributeError(\"No `original_hidden`\")\n\n\ndef log_perplexity(logits, coeffs):\n shift_logits = logits[:, :-1, :].contiguous()\n shift_coeffs = coeffs[:, 1:, :].contiguous()\n shift_logits = shift_logits[:, :, : shift_coeffs.size(2)]\n return -(shift_coeffs * F.log_softmax(shift_logits, dim=-1)).sum(-1).mean()\n\n\nclass CrossEntropyLoss(nn.Module):\n def __init__(self, labels: Tensor) -> None:\n super(CrossEntropyLoss).__init__()\n self.loss_fn = nn.CrossEntropyLoss()\n self.labels = labels.long()\n\n def forward(self, hf_output: transformers.file_utils.ModelOutput):\n # hf_output.logits must be torch Tensor with [batch, n_labels]\n return self.loss_fn.forward(hf_output.logits, self.labels)\n\n\nclass MaxProbLoss(nn.Module):\n def __init__(\n self,\n margin: float = 1.5,\n forbid_mask: Optional[Tensor] = None,\n target_mask: Optional[Tensor] = None,\n ) -> None:\n super(MaxProbLoss, self).__init__()\n self.margin: float = margin\n\n if target_mask is None:\n self.target_mask = target_mask.bool()\n self.escape_mask = torch.logical_not(target_mask)\n else:\n self.target_mask = None\n self.escape_mask = None\n\n def forward(\n self,\n hf_output: transformers.file_utils.ModelOutput,\n target_ids: Tensor,\n ):\n assert isinstance(target_ids, Tensor)\n # hf_output.logits must be torch Tensor with [batch, length, n_vocabs]\n logits: TensorType[\"batch\", \"length\", \"n_vocabs\"] = hf_output.logits\n\n target_ids_set = set(target_ids.tolist())\n\n self.n_vocab = logits.shape[2]\n non_target_ids = (\n torch.Tensor(\n [i for i in range(self.n_vocab) if i not in target_ids_set],\n )\n .long()\n .to(target_ids.device)\n )\n logits_log_softmax = F.log_softmax(logits, dim=2)\n target_logits: TensorType[\"batch\", \"length\", \"n_target\"] = logits_log_softmax[\n :, :, target_ids\n ]\n nontarget_logits: TensorType[\n \"batch\", \"length\", \"n_non_target\"\n ] = logits_log_softmax[:, :, non_target_ids]\n max_tokens_target = torch.mean(target_logits, dim=2) # (bsize, length)\n max_tokens_nontarget = torch.max(nontarget_logits, dim=2)[0]\n max_tokens_diff = torch.clamp(\n max_tokens_target - max_tokens_nontarget, max=self.margin\n )\n\n if isinstance(self.escape_mask, torch.Tensor):\n max_tokens_target = max_tokens_target.masked_fill_(self.escape_mask, -20)\n max_tokens_nontarget = max_tokens_nontarget.masked_fill_(\n self.escape_mask, -20\n )\n max_tokens_diff = max_tokens_diff.masked_fill_(self.escape_mask, -20)\n return (\n -1\n * torch.max(\n max_tokens_diff,\n dim=1,\n )[0].mean()\n )\n\n\ndef preprocess_embedding(\n embedding_vector: TensorType[\"batch\", \"length\", \"dim_embed\"], **kargs\n) -> Dict[str, Any]:\n kargs[\"inputs_embeds\"] = embedding_vector\n kargs[\"output_hidden_states\"] = True\n kargs[\"return_dict\"] = True\n return kargs\n" ]
[ [ "torch.mean", "torch.nn.CrossEntropyLoss", "torch.max", "torch.nn.functional.log_softmax", "torch.clamp", "torch.logical_not" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
XC-Li/Deep_Learning_GWU
[ "2dfe0d39ce8f9d981cee545f489f9dde1ffdfa7c" ]
[ "Homework6/2_Tensor_Pytorch_Edited.py" ]
[ "import torch\n#----------------------------------------------------------------------------\ndtype = torch.float\nif torch.cuda.is_available():\n device = torch.device(\"cuda:0\")\nelse:\n device = torch.device(\"cpu\")\n#----------------------------------------------------------------------------\nBatch_size = 64 # Batch size\nQ = 1000 # Input size\nS = 100 # Number of neurons\na = 10 # Network output size\n#----------------------------------------------------------------------------\np = torch.randn(Batch_size, Q, device=device, dtype=dtype)\nt = torch.randn(Batch_size, a, device=device, dtype=dtype)\n#----------------------------------------------------------------------------\nw1 = torch.randn(Q, S, device=device, dtype=dtype)\nw2 = torch.randn(S, a, device=device, dtype=dtype)\nlearning_rate = 1e-6\n#----------------------------------------------------------------------------\nfor index in range(500):\n\n # h = p.mm(w1)\n # h_relu = h.clamp(min=0)\n # a_net = h_relu.mm(w2)\n a_0 = p\n n_1 = p.mm(w1)\n a_1 = n_1.clamp(min=0)\n a_2 = a_1.mm(w2)\n\n # loss = (a_net - t).pow(2).sum()\n # print(index, loss.item())\n loss = (a_2 - t).pow(2).sum()\n print(index, loss.item())\n\n # grad_y_pred = 2.0 * (a_net - t)\n # grad_w2 = h_relu.t().mm(grad_y_pred)\n # grad_h_relu = grad_y_pred.mm(w2.t())\n # grad_h = grad_h_relu.clone()\n # grad_h[h < 0] = 0\n # grad_w1 = p.t().mm(grad_h)\n s_2 = 2.0 * (a_2 - t)\n s_1 = s_2.mm(w2.t())\n s_1[n_1 < 0] = 0\n\n # w1 -= learning_rate * grad_w1\n # w2 -= learning_rate * grad_w2\n w1 = w1 - learning_rate * p.t().mm(s_1)\n w2 = w2 - learning_rate * a_1.t().mm(s_2)" ]
[ [ "torch.device", "torch.randn", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mynameisvinn/scikit-network
[ "255e99b2f7d5ad8914a8fad3a89d7817764666e0" ]
[ "sknetwork/linalg/sparse_lowrank.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Apr 19 2019\n@author: Nathan de Lara <[email protected]>\n\"\"\"\n\nfrom typing import Union, Tuple\n\nimport numpy as np\nfrom scipy import sparse\nfrom scipy.sparse.linalg import LinearOperator\n\n\nclass SparseLR(LinearOperator):\n \"\"\"Class for matrices with \"sparse + low rank\" structure.\n Example:\n\n :math:`A + xy^T`\n\n Parameters\n ----------\n sparse_mat: scipy.spmatrix\n Sparse component. Is converted to csr format automatically.\n low_rank_tuples: list\n Single tuple of arrays of list of tuples, representing the low rank components [(x1, y1), (x2, y2),...].\n Each low rank component is of the form :math:`xy^T`.\n\n Examples\n --------\n >>> from scipy import sparse\n >>> from sknetwork.linalg import SparseLR\n >>> adjacency = sparse.eye(2, format='csr')\n >>> slr = SparseLR(adjacency, (np.ones(2), np.ones(2)))\n >>> x = np.ones(2)\n >>> slr.dot(x)\n array([3., 3.])\n >>> slr.sum(axis=0)\n array([3., 3.])\n >>> slr.sum(axis=1)\n array([3., 3.])\n >>> slr.sum()\n 6.0\n\n References\n ----------\n De Lara (2019). `The Sparse + Low Rank trick for Matrix Factorization-Based Graph Algorithms.\n <http://www.mlgworkshop.org/2019/papers/MLG2019_paper_1.pdf>`_\n Proceedings of the 15th International Workshop on Mining and Learning with Graphs (MLG).\n \"\"\"\n def __init__(self, sparse_mat: Union[sparse.csr_matrix, sparse.csc_matrix], low_rank_tuples: Union[list, Tuple],\n dtype=float):\n n_row, n_col = sparse_mat.shape\n self.sparse_mat = sparse_mat.tocsr().astype(dtype)\n super(SparseLR, self).__init__(dtype=dtype, shape=(n_row, n_col))\n\n if isinstance(low_rank_tuples, Tuple):\n low_rank_tuples = [low_rank_tuples]\n self.low_rank_tuples = []\n for x, y in low_rank_tuples:\n if x.shape == (n_row,) and y.shape == (n_col,):\n self.low_rank_tuples.append((x.astype(self.dtype), y.astype(self.dtype)))\n else:\n raise ValueError('For each low rank tuple, x (resp. y) should be a vector of length {} (resp. {})'\n .format(n_row, n_col))\n\n def __neg__(self):\n return SparseLR(-self.sparse_mat, [(-x, y) for (x, y) in self.low_rank_tuples])\n\n def __add__(self, other: 'SparseLR'):\n if type(other) == sparse.csr_matrix:\n return SparseLR(self.sparse_mat + other, self.low_rank_tuples)\n else:\n return SparseLR(self.sparse_mat + other.sparse_mat, self.low_rank_tuples + other.low_rank_tuples)\n\n def __sub__(self, other):\n return self.__add__(-other)\n\n def __mul__(self, other):\n return SparseLR(other * self.sparse_mat, [(other * x, y) for (x, y) in self.low_rank_tuples])\n\n def _matvec(self, matrix: np.ndarray):\n \"\"\"Right dot product with a dense matrix.\n\n Parameters\n ----------\n matrix:\n Matrix.\n\n Returns\n -------\n Dot product as a dense array\n \"\"\"\n prod = self.sparse_mat.dot(matrix)\n if len(matrix.shape) == 1:\n for (x, y) in self.low_rank_tuples:\n prod += x * matrix.dot(y)\n else:\n transposed = matrix.T\n for (x, y) in self.low_rank_tuples:\n prod += x[:, np.newaxis].dot(transposed.dot(y)[:, np.newaxis].T)\n return prod\n\n def _transpose(self):\n \"\"\"Transposed operator.\"\"\"\n transposed_sparse = sparse.csr_matrix(self.sparse_mat.T)\n transposed_tuples = [(y, x) for (x, y) in self.low_rank_tuples]\n return SparseLR(transposed_sparse, transposed_tuples)\n\n def _adjoint(self):\n return self.transpose()\n\n def left_sparse_dot(self, matrix: sparse.csr_matrix):\n \"\"\"Left dot product with a sparse matrix.\"\"\"\n return SparseLR(matrix.dot(self.sparse_mat), [(matrix.dot(x), y) for (x, y) in self.low_rank_tuples])\n\n def right_sparse_dot(self, matrix: sparse.csr_matrix):\n \"\"\"Right dot product with a sparse matrix.\"\"\"\n return SparseLR(self.sparse_mat.dot(matrix), [(x, matrix.T.dot(y)) for (x, y) in self.low_rank_tuples])\n\n def sum(self, axis=None):\n \"\"\"Row-wise, column-wise or total sum of operator's coefficients.\n\n Parameters\n ----------\n axis :\n If 0, return column-wise sum. If 1, return row-wise sum. Otherwise, return total sum.\n \"\"\"\n if axis == 0:\n s = self.T.dot(np.ones(self.shape[0]))\n elif axis == 1:\n s = self.dot(np.ones(self.shape[1]))\n else:\n s = self.dot(np.ones(self.shape[1])).sum()\n return s\n\n def astype(self, dtype: Union[str, np.dtype]):\n \"\"\"Change dtype of the object.\"\"\"\n self.sparse_mat = self.sparse_mat.astype(dtype)\n self.low_rank_tuples = [(x.astype(dtype), y.astype(dtype)) for (x, y) in self.low_rank_tuples]\n self.dtype = np.dtype(dtype)\n\n return self\n" ]
[ [ "numpy.dtype", "scipy.sparse.csr_matrix", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
DaikiOnodera/kaggle-hpa
[ "3e7888cadaa18403231136800bfd1ac324d6db66" ]
[ "swa.py" ]
[ "import os\nimport argparse\nimport pprint\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom datasets import get_dataloader\nfrom transforms import get_transform\nfrom models import get_model\nimport utils.config\nimport utils.swa as swa\nimport utils.checkpoint\n\n\ndef get_checkpoints(config, num_checkpoint=10, epoch_end=None):\n checkpoint_dir = os.path.join(config.train.dir, 'checkpoint')\n if epoch_end is not None:\n epoch_begin = epoch_end - num_checkpoint + 1\n checkpoints = [os.path.join(checkpoint_dir, 'epoch.{:04d}.pth'.format(e))\n for e in range(epoch_begin, epoch_end+1)]\n checkpoints = [f for f in checkpoints if os.path.exists(f)]\n else:\n checkpoints = os.listdir(checkpoint_dir)\n checkpoints = [name for name in checkpoints\n if name.startswith('epoch') and name.endswith('pth')]\n checkpoints = list(sorted([os.path.join(checkpoint_dir, f) for f in checkpoints]))\n checkpoints = checkpoints[-num_checkpoint:]\n return checkpoints\n\n\ndef run(config, num_checkpoint, epoch_end, output_filename):\n dataloader = get_dataloader(config, 'train', get_transform(config, 'val'))\n\n model = get_model(config).cuda()\n checkpoints = get_checkpoints(config, num_checkpoint, epoch_end)\n\n utils.checkpoint.load_checkpoint(model, None, checkpoints[0])\n for i, checkpoint in enumerate(checkpoints[1:]):\n model2 = get_model(config).cuda()\n last_epoch, _ = utils.checkpoint.load_checkpoint(model2, None, checkpoint)\n swa.moving_average(model, model2, 1. / (i + 2))\n\n with torch.no_grad():\n swa.bn_update(dataloader, model)\n\n output_name = '{}.{}.{:03d}'.format(output_filename, num_checkpoint, last_epoch)\n print('save {}'.format(output_name))\n utils.checkpoint.save_checkpoint(config, model, None, 0, 0,\n name=output_name,\n weights_dict={'state_dict': model.state_dict()})\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='hpa')\n parser.add_argument('--config', dest='config_file',\n help='configuration filename',\n default=None, type=str)\n parser.add_argument('--output', dest='output_filename',\n help='output filename',\n default='swa', type=str)\n parser.add_argument('--num_checkpoint', dest='num_checkpoint',\n help='number of checkpoints for averaging',\n default=10, type=int)\n parser.add_argument('--epoch_end', dest='epoch_end',\n help='epoch end',\n default=None, type=int)\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n if args.config_file is None:\n raise Exception('no configuration file')\n \n config = utils.config.load(args.config_file)\n pprint.PrettyPrinter(indent=2).pprint(config)\n run(config, args.num_checkpoint, args.epoch_end, args.output_filename)\n \n print('success!')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
1751200/Xlab-k8s-gpu
[ "b258f9610d2416a047f8f9545b1d6f66a7e88df3" ]
[ "MatMul/PyTorch&CuPy/pythonTest.py" ]
[ "import torch\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport cupy as cp\r\n# torch.cuda.device_count()\r\n# torch.cuda.get_device_name(0)\r\n\r\n#计算cupy矩阵乘法以及其计算平均时间\r\n#shape:矩阵维度,shape*shape\r\n#times:计算次数,计算时间是取平均时间\r\ndef cupy_test(shape,times):\r\n sumT = 0\r\n for i in range(0,times):\r\n arr_gpu=cp.random.rand(shape,shape,dtype=cp.float)\r\n start = time.clock()\r\n cp.einsum('ij, jk',arr_gpu,arr_gpu)\r\n end = time.clock()\r\n if i == 0:\r\n continue\r\n sumT += (end-start)\r\n #print((end-start)*1000)\r\n times-=1\r\n avgT = ((sumT/times)*1000)\r\n print(\"cupy avg time %f\"%avgT)\r\n return avgT\r\n\r\n\r\n#计算torch_gpu矩阵乘法以及其计算平均时间\r\n#size:size*size\r\n#times:计算次数,计算时间是取平均时间\r\ndef torch_gpu_test(size,times):\r\n sumT = 0\r\n for i in range(0,times):\r\n # 随机生成矩阵,然后通过cuda拷贝到gpu上\r\n a = torch.rand(size,size).cuda()\r\n b = torch.rand(size,size).cuda()\r\n start = time.clock()\r\n c = torch.mm(a, b)\r\n end = time.clock()\r\n if i == 0:\r\n continue\r\n #print((end-start)*1000)\r\n sumT += end-start\r\n times-=1\r\n avgT = ((sumT/times)*1000)\r\n print(\"torch gpu avg time %f\"%avgT)\r\n return avgT\r\n\r\n\r\n#计算numpy矩阵乘法以及其计算平均时间\r\n#shape:shape*shape\r\n#times:计算次数,计算时间是取平均时间\r\ndef numpy_test(shape,times):\r\n sumT = 0\r\n for i in range(0,times):\r\n arr_cpu=np.random.rand(shape,shape)\r\n start = time.clock()\r\n np.einsum('ij, jk',arr_cpu,arr_cpu)\r\n end = time.clock()\r\n if i == 0:\r\n continue\r\n sumT += (end-start)\r\n #print((end-start)*1000)\r\n times-=1\r\n avgT = ((sumT/times)*1000)\r\n print(\"cupy avg time %f\"%avgT)\r\n return avgT\r\n\r\n\r\n# 根据上述计算的时间画图\r\ndef draw_time(timeDict):\r\n sizeList = timeDict['size']\r\n plt.plot(sizeList,timeDict['cupy'],marker='o',label='cupy')\r\n plt.plot(sizeList,timeDict['pytorch-gpu'],marker='o',label='pytorch-gpu')\r\n #plt.plot(sizeList,timeDict['numpy'],marker='o',label='numpy')\r\n plt.xlabel('matrix size') \r\n plt.ylabel('cal time (ms)')\r\n plt.legend()\r\n plt.show()\r\n\r\n\r\n# 测试矩阵大小从2^0 到2^12各种方法的计算时间\r\ndef statistics_time():\r\n times = 2\r\n timeDict = {}\r\n timeDict['cupy'] = []\r\n timeDict['pytorch-gpu'] = []\r\n timeDict['numpy'] = [] \r\n timeDict['size'] = []\r\n for n in range(0,13):\r\n size = 2**n\r\n print(size)\r\n cT = cupy_test(size,times)\r\n tT = torch_gpu_test(size,times)\r\n nT = numpy_test(size,times)\r\n timeDict['cupy'] .append(cT)\r\n timeDict['pytorch-gpu'].append(tT)\r\n timeDict['numpy'].append(nT)\r\n timeDict['size'].append(size)\r\n return timeDict\r\n\r\nif __name__ == \"__main__\":\r\n #设置gpu\r\n torch.cuda.set_device(0) \r\n timeDict = statistics_time()\r\n draw_time(timeDict) " ]
[ [ "matplotlib.pyplot.legend", "torch.mm", "torch.cuda.set_device", "numpy.einsum", "matplotlib.pyplot.plot", "numpy.random.rand", "torch.rand", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ykawazura/calliope
[ "343b72a0930d70332172a5d87a579b0f8dcced66" ]
[ "diagnostics/MHD_COMP_ADIAB/energy_dot_avg.py" ]
[ "# -*- coding: utf-8 -*-\n\n#####################################################\n## main program for making plots from AstroGK data ##\n#####################################################\nimport numpy as np\nfrom scipy.integrate import simps, trapz\nfrom scipy import interpolate\n\nfrom numba import jit\n\n@jit\ndef time_average(x, y, axis=0): # x: 1D array, y: any-D array \n return trapz(y, x, axis=axis)/(x[-1] - x[0])\n # return trapz(y, x, axis=axis)/(x[-1] - x[0])\n\naverage_start = 77\naverage_end = -1\n\n##########################################################\n# average energy time evolution #\n##########################################################\nprint('\\nplotting energy\\n')\noutdir = './fig_energy/'\n\n# load data\ntime = np.transpose(np.loadtxt(outdir + 'energies.txt' ))[0]\nupe2dot_sum = np.transpose(np.loadtxt(outdir + 'energies.txt' ))[5]\nbpe2dot_sum = np.transpose(np.loadtxt(outdir + 'energies.txt' ))[6]\n\nenergy_dot = upe2dot_sum + bpe2dot_sum\n\nenergy_dot_avg = time_average(time[average_start:average_end], energy_dot[average_start:average_end], axis=0)\nprint ('energy_dot = %.3E' % energy_dot_avg + ' ! This must be zero when stationary')\n" ]
[ [ "scipy.integrate.trapz", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.9", "1.7", "1.8" ], "tensorflow": [] } ]
swdev1202/PointRCNN
[ "2557d670e80e813a8af4edf9f7ff7d2e9d94cfb0" ]
[ "lib/utils/object3d.py" ]
[ "import numpy as np\n\n\ndef cls_type_to_id(cls_type):\n type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4}\n if cls_type not in type_to_id.keys():\n return -1\n return type_to_id[cls_type]\n\n\nclass Object3d(object):\n def __init__(self, line):\n label = line.strip().split(' ')\n self.src = line\n self.cls_type = label[0]\n self.cls_id = cls_type_to_id(self.cls_type)\n self.trucation = float(label[1])\n self.occlusion = float(label[2]) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown\n self.alpha = float(label[3])\n self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32)\n self.h = float(label[8])\n self.w = float(label[9])\n self.l = float(label[10])\n self.pos = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32)\n self.dis_to_cam = np.linalg.norm(self.pos)\n self.ry = float(label[14])\n self.score = float(label[15]) if label.__len__() == 16 else -1.0\n self.level_str = None\n self.level = self.get_obj_level()\n\n def get_obj_level(self):\n height = float(self.box2d[3]) - float(self.box2d[1]) + 1\n\n if height >= 40 and self.trucation <= 0.15 and self.occlusion <= 0:\n self.level_str = 'Easy'\n return 1 # Easy\n elif height >= 25 and self.trucation <= 0.3 and self.occlusion <= 1:\n self.level_str = 'Moderate'\n return 2 # Moderate\n elif height >= 25 and self.trucation <= 0.5 and self.occlusion <= 2:\n self.level_str = 'Hard'\n return 3 # Hard\n else:\n self.level_str = 'UnKnown'\n return 4\n\n def generate_corners3d(self):\n \"\"\"\n generate corners3d representation for this object\n :return corners_3d: (8, 3) corners of box3d in camera coord\n \"\"\"\n l, h, w = self.l, self.h, self.w\n x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]\n # y_corners = [0, 0, 0, 0, -h, -h, -h, -h]\n y_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2]\n z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]\n\n R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)],\n [0, 1, 0],\n [-np.sin(self.ry), 0, np.cos(self.ry)]])\n corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)\n corners3d = np.dot(R, corners3d).T\n corners3d = corners3d + self.pos\n return corners3d\n\n def to_bev_box2d(self, oblique=True, voxel_size=0.1):\n \"\"\"\n :param bev_shape: (2) for bev shape (h, w), => (y_max, x_max) in image\n :param voxel_size: float, 0.1m\n :param oblique:\n :return: box2d (4, 2)/ (4) in image coordinate\n \"\"\"\n if oblique:\n corners3d = self.generate_corners3d()\n xz_corners = corners3d[0:4, [0, 2]]\n box2d = np.zeros((4, 2), dtype=np.int32)\n box2d[:, 0] = ((xz_corners[:, 0] - Object3d.MIN_XZ[0]) / voxel_size).astype(np.int32)\n box2d[:, 1] = Object3d.BEV_SHAPE[0] - 1 - ((xz_corners[:, 1] - Object3d.MIN_XZ[1]) / voxel_size).astype(np.int32)\n box2d[:, 0] = np.clip(box2d[:, 0], 0, Object3d.BEV_SHAPE[1])\n box2d[:, 1] = np.clip(box2d[:, 1], 0, Object3d.BEV_SHAPE[0])\n else:\n box2d = np.zeros(4, dtype=np.int32)\n # discrete_center = np.floor((self.pos / voxel_size)).astype(np.int32)\n cu = np.floor((self.pos[0] - Object3d.MIN_XZ[0]) / voxel_size).astype(np.int32)\n cv = Object3d.BEV_SHAPE[0] - 1 - ((self.pos[2] - Object3d.MIN_XZ[1]) / voxel_size).astype(np.int32)\n half_l, half_w = int(self.l / voxel_size / 2), int(self.w / voxel_size / 2)\n box2d[0], box2d[1] = cu - half_l, cv - half_w\n box2d[2], box2d[3] = cu + half_l, cv + half_w\n\n return box2d\n\n def to_str(self):\n print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \\\n % (self.cls_type, self.trucation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l,\n self.pos, self.ry)\n return print_str\n\n def to_kitti_format(self):\n kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \\\n % (self.cls_type, self.trucation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],\n self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.pos[0], self.pos[1], self.pos[2],\n self.ry)\n return kitti_str\n\n" ]
[ [ "numpy.dot", "numpy.clip", "numpy.linalg.norm", "numpy.cos", "numpy.sin", "numpy.floor", "numpy.zeros", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
asanoviskhak/Outtalent
[ "8a10b23335d8e9f080e5c39715b38bcc2916ff00" ]
[ "Leetcode/1420. Build Array Where You Can Find The Maximum Exactly K Comparisons/solution1.py" ]
[ "import numpy as np\n\n\nclass Solution:\n def numOfArrays(self, n: int, m: int, k: int) -> int:\n mod = 1e9 + 7\n dp = np.zeros((n, k + 1, m + 1))\n ps = np.zeros((n, k + 1, m + 1))\n for i in range(1, m + 1):\n dp[0, 1, i] = 1\n ps[0, 1, i] = ps[0, 1, i - 1] + 1\n for i in range(1, n):\n for j in range(1, k + 1):\n for h in range(1, m + 1):\n dp[i][j][h] = (dp[i][j][h] + dp[i - 1][j][h] * h) % mod\n dp[i][j][h] = (dp[i][j][h] + ps[i - 1][j - 1][h - 1]) % mod\n ps[i][j][h] = (ps[i][j][h - 1] + dp[i][j][h]) % mod\n result = 0\n for i in range(1, m + 1):\n result = (result + dp[n - 1][k][i]) % mod\n return int(result)\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Matheus-IT/lang-python-related
[ "dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9" ]
[ "data_analysis/NumPy/03NumPy_estatistica.py" ]
[ "import numpy as np\r\n\r\n# Criando um array\r\nA = np.array([15, 23, 63, 94, 75])\r\n\r\n# Em estatística a média é o valor que aponta para onde mais se concentram os dados de uma distribuição.\r\nprint(np.mean(A))\r\n\r\n# O desvio padrão mostra o quanto de variação ou \"dispersão\" existe em \r\n# relação à média (ou valor esperado). \r\n# Um baixo desvio padrão indica que os dados tendem a estar próximos da média.\r\n# Um desvio padrão alto indica que os dados estão espalhados por uma gama de valores.\r\nprint(np.std(A))\r\n\r\n# Variância de uma variável aleatória é uma medida da sua dispersão \r\n# estatística, indicando \"o quão longe\" em geral os seus valores se \r\n# encontram do valor esperado\r\nnp.var(A)\r\n" ]
[ [ "numpy.var", "numpy.std", "numpy.array", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mani-shailesh/lime
[ "3aa9ea9c30bffe73f1bffbe09fe70f6b8bd2c29c" ]
[ "lime/explanation.py" ]
[ "\"\"\"\nExplanation class, with visualization functions.\n\"\"\"\nfrom __future__ import unicode_literals\nfrom io import open\nimport os\nimport os.path\nimport json\nimport string\nimport numpy as np\n\nfrom .exceptions import LimeError\n\nfrom sklearn.utils import check_random_state\n\n\ndef id_generator(size=15, random_state=None):\n \"\"\"Helper function to generate random div ids. This is useful for embedding\n HTML into ipython notebooks.\"\"\"\n chars = list(string.ascii_uppercase + string.digits)\n return ''.join(random_state.choice(chars, size, replace=True))\n\n\nclass DomainMapper(object):\n \"\"\"Class for mapping features to the specific domain.\n\n The idea is that there would be a subclass for each domain (text, tables,\n images, etc), so that we can have a general Explanation class, and separate\n out the specifics of visualizing features in here.\n \"\"\"\n\n def __init__(self):\n pass\n\n def map_exp_ids(self, exp, **kwargs):\n \"\"\"Maps the feature ids to concrete names.\n\n Default behaviour is the identity function. Subclasses can implement\n this as they see fit.\n\n Args:\n exp: list of tuples [(id, weight), (id,weight)]\n kwargs: optional keyword arguments\n\n Returns:\n exp: list of tuples [(name, weight), (name, weight)...]\n \"\"\"\n return exp\n\n def visualize_instance_html(self,\n exp,\n label,\n div_name,\n exp_object_name,\n **kwargs):\n \"\"\"Produces html for visualizing the instance.\n\n Default behaviour does nothing. Subclasses can implement this as they\n see fit.\n\n Args:\n exp: list of tuples [(id, weight), (id,weight)]\n label: label id (integer)\n div_name: name of div object to be used for rendering(in js)\n exp_object_name: name of js explanation object\n kwargs: optional keyword arguments\n\n Returns:\n js code for visualizing the instance\n \"\"\"\n return ''\n\n\nclass Explanation(object):\n \"\"\"Object returned by explainers.\"\"\"\n\n def __init__(self,\n domain_mapper,\n mode='classification',\n class_names=None,\n random_state=None):\n \"\"\"\n\n Initializer.\n\n Args:\n domain_mapper: must inherit from DomainMapper class\n type: \"classification\" or \"regression\"\n class_names: list of class names (only used for classification)\n random_state: an integer or numpy.RandomState that will be used to\n generate random numbers. If None, the random state will be\n initialized using the internal numpy seed.\n \"\"\"\n self.random_state = random_state\n self.mode = mode\n self.domain_mapper = domain_mapper\n self.local_exp = {}\n self.intercept = {}\n self.score = None\n self.local_pred = None\n self.scaled_data = None\n if mode == 'classification':\n self.class_names = class_names\n self.top_labels = None\n self.predict_proba = None\n elif mode == 'regression':\n self.class_names = ['negative', 'positive']\n self.predicted_value = None\n self.min_value = 0.0\n self.max_value = 1.0\n self.dummy_label = 1\n else:\n raise LimeError('Invalid explanation mode \"{}\". '\n 'Should be either \"classification\" '\n 'or \"regression\".'.format(mode))\n\n def available_labels(self):\n \"\"\"\n Returns the list of classification labels for which we have any explanations.\n \"\"\"\n try:\n assert self.mode == \"classification\"\n except AssertionError:\n raise NotImplementedError('Not supported for regression explanations.')\n else:\n ans = self.top_labels if self.top_labels else self.local_exp.keys()\n return list(ans)\n\n def as_list(self, label=1, **kwargs):\n \"\"\"Returns the explanation as a list.\n\n Args:\n label: desired label. If you ask for a label for which an\n explanation wasn't computed, will throw an exception.\n Will be ignored for regression explanations.\n kwargs: keyword arguments, passed to domain_mapper\n\n Returns:\n list of tuples (representation, weight), where representation is\n given by domain_mapper. Weight is a float.\n \"\"\"\n label_to_use = label if self.mode == \"classification\" else self.dummy_label\n ans = self.domain_mapper.map_exp_ids(self.local_exp[label_to_use], **kwargs)\n ans = [(x[0], float(x[1])) for x in ans]\n return ans\n\n def as_map(self):\n \"\"\"Returns the map of explanations.\n\n Returns:\n Map from label to list of tuples (feature_id, weight).\n \"\"\"\n return self.local_exp\n\n def as_pyplot_figure(self, label=1, **kwargs):\n \"\"\"Returns the explanation as a pyplot figure.\n\n Will throw an error if you don't have matplotlib installed\n Args:\n label: desired label. If you ask for a label for which an\n explanation wasn't computed, will throw an exception.\n Will be ignored for regression explanations.\n kwargs: keyword arguments, passed to domain_mapper\n\n Returns:\n pyplot figure (barchart).\n \"\"\"\n import matplotlib.pyplot as plt\n exp = self.as_list(label=label, **kwargs)\n fig = plt.figure()\n vals = [x[1] for x in exp]\n names = [x[0] for x in exp]\n vals.reverse()\n names.reverse()\n colors = ['green' if x > 0 else 'red' for x in vals]\n pos = np.arange(len(exp)) + .5\n plt.barh(pos, vals, align='center', color=colors)\n plt.yticks(pos, names)\n if self.mode == \"classification\":\n title = 'Local explanation for class %s' % self.class_names[label]\n else:\n title = 'Local explanation'\n plt.title(title)\n return fig\n\n def show_in_notebook(self,\n labels=None,\n predict_proba=True,\n show_predicted_value=True,\n **kwargs):\n \"\"\"Shows html explanation in ipython notebook.\n\n See as_html() for parameters.\n This will throw an error if you don't have IPython installed\"\"\"\n\n from IPython.core.display import display, HTML\n display(HTML(self.as_html(labels=labels,\n predict_proba=predict_proba,\n show_predicted_value=show_predicted_value,\n **kwargs)))\n\n def save_to_file(self,\n file_path,\n labels=None,\n predict_proba=True,\n show_predicted_value=True,\n **kwargs):\n \"\"\"Saves html explanation to file. .\n\n Params:\n file_path: file to save explanations to\n\n See as_html() for additional parameters.\n\n \"\"\"\n file_ = open(file_path, 'w', encoding='utf8')\n file_.write(self.as_html(labels=labels,\n predict_proba=predict_proba,\n show_predicted_value=show_predicted_value,\n **kwargs))\n file_.close()\n\n def as_html(self,\n labels=None,\n predict_proba=True,\n show_predicted_value=True,\n **kwargs):\n \"\"\"Returns the explanation as an html page.\n\n Args:\n labels: desired labels to show explanations for (as barcharts).\n If you ask for a label for which an explanation wasn't\n computed, will throw an exception. If None, will show\n explanations for all available labels. (only used for classification)\n predict_proba: if true, add barchart with prediction probabilities\n for the top classes. (only used for classification)\n show_predicted_value: if true, add barchart with expected value\n (only used for regression)\n kwargs: keyword arguments, passed to domain_mapper\n\n Returns:\n code for an html page, including javascript includes.\n \"\"\"\n\n def jsonize(x):\n return json.dumps(x, ensure_ascii=False)\n\n if labels is None and self.mode == \"classification\":\n labels = self.available_labels()\n\n this_dir, _ = os.path.split(__file__)\n bundle = open(os.path.join(this_dir, 'bundle.js'),\n encoding=\"utf8\").read()\n\n out = u'''<html>\n <meta http-equiv=\"content-type\" content=\"text/html; charset=UTF8\">\n <head><script>%s </script></head><body>''' % bundle\n random_id = id_generator(size=15, random_state=check_random_state(self.random_state))\n out += u'''\n <div class=\"lime top_div\" id=\"top_div%s\"></div>\n ''' % random_id\n\n predict_proba_js = ''\n if self.mode == \"classification\" and predict_proba:\n predict_proba_js = u'''\n var pp_div = top_div.append('div')\n .classed('lime predict_proba', true);\n var pp_svg = pp_div.append('svg').style('width', '100%%');\n var pp = new lime.PredictProba(pp_svg, %s, %s, %s);\n ''' % (jsonize([str(x) for x in self.class_names]),\n jsonize(self.domain_mapper.true_label),\n jsonize(list(self.predict_proba.astype(float))))\n\n predict_value_js = ''\n if self.mode == \"regression\" and show_predicted_value:\n # reference self.predicted_value\n # (svg, predicted_value, min_value, max_value)\n predict_value_js = u'''\n var pp_div = top_div.append('div')\n .classed('lime predicted_value', true);\n var pp_svg = pp_div.append('svg').style('width', '100%%');\n var pp = new lime.PredictedValue(pp_svg, %s, %s, %s);\n ''' % (jsonize(float(self.predicted_value)),\n jsonize(float(self.min_value)),\n jsonize(float(self.max_value)))\n\n exp_js = '''var exp_div;\n var exp = new lime.Explanation(%s);\n ''' % (jsonize([str(x) for x in self.class_names]))\n\n if self.mode == \"classification\":\n for label in labels:\n exp = jsonize(self.as_list(label))\n exp_js += u'''\n exp_div = top_div.append('div').classed('lime explanation', true);\n exp.show(%s, %d, exp_div);\n ''' % (exp, label)\n else:\n exp = jsonize(self.as_list())\n exp_js += u'''\n exp_div = top_div.append('div').classed('lime explanation', true);\n exp.show(%s, %s, exp_div);\n ''' % (exp, self.dummy_label)\n\n raw_js = '''var raw_div = top_div.append('div');'''\n\n if self.mode == \"classification\":\n html_data = self.local_exp[labels[0]]\n else:\n html_data = self.local_exp[self.dummy_label]\n\n raw_js += self.domain_mapper.visualize_instance_html(\n html_data,\n labels[0] if self.mode == \"classification\" else self.dummy_label,\n 'raw_div',\n 'exp',\n **kwargs)\n out += u'''\n <script>\n var top_div = d3.select('#top_div%s').classed('lime top_div', true);\n %s\n %s\n %s\n %s\n </script>\n ''' % (random_id, predict_proba_js, predict_value_js, exp_js, raw_js)\n out += u'</body></html>'\n\n return out\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.barh", "matplotlib.pyplot.yticks", "sklearn.utils.check_random_state", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jagadeeshmeesala/data-processing-pipeline
[ "9a3ef1b650c1e5dd7f12a007ff598ba5f70bdb82" ]
[ "producer.py" ]
[ "#####################################################\n## PROGRAM TO IMPLEMENT KINESIS PRODUCER THAT FETCHES WEATHER INFORMATION\n## AND STREAMS THE DATA INTO KINESIS STREAM\n####################################################\n\n# necessary imports\nimport boto3\nimport datetime as dt\nimport pandas as pd\nimport time\nfrom pandas.core.tools import numeric\nimport requests\nimport json\nimport numpy as np\nimport math\n\n\n# function to create a client with aws for a specific service and region\ndef create_client(service, region):\n return boto3.client(\n service, \n region_name=region,\n aws_access_key_id='XXXX', # replace with actual key\n aws_secret_access_key='XXXX' # replace with actual key\n # aws_session_token=SESSION_TOKEN\n )\n\n# function for generating new runtime to be used for timefield in ES\ndef get_date():\n\n today = str(dt.datetime.today()) # get today as a string\n year = today[:4]\n month = today[5:7]\n day = today[8:10]\n\n hour = today[11:13]\n minutes = today[14:16]\n seconds = today[17:19]\n\n # return a date string in the correct format for ES\n return \"%s/%s/%s %s:%s:%s\" % (year, month, day, hour, minutes, seconds)\n\n# function for generating new runtime to be used for timefield in ES\ndef format_date(date):\n\n date = str(date) # get today as a string\n year = date[:4]\n month = date[5:7]\n day = date[8:10]\n\n # return a date string in the correct format for ES\n return \"%s/%s/%s\" % (year, month, day)\n\n\ndef find_station_name(record, stations_info):\n for _,station in stations_info.iterrows():\n if station['id'] == record['station']:\n return station['name']\n\n# function to transform the data\ndef transform_data(data, stations_info):\n \n # dates = data['cdatetime'] # get the datetime field\n data = data.replace(np.nan, 0)\n transformed = []\n\n # loop over all records\n for _, record in data.iterrows():\n item = {}\n # find station Id\n station_name= find_station_name(record, stations_info)\n \n # station = stations_info.loc[stations_info['name'] == record[\"station\"]]\n # print(station)\n # print('*** station id', station['name'])\n item['stationId'] = record['station']\n item['stationName'] = station_name\n if record['datatype'] == \"PRCP\":\n item['precipitation'] = record['value']\n item['snow'] = 0.0\n item['minTemp'] = 0.0\n item['maxTemp'] = 0.0\n\n if record[\"datatype\"] == \"SNOW\":\n item['precipitation'] = 0.0\n item['snow'] = record['value']\n item['minTemp'] = 0.0\n item['maxTemp'] = 0.0\n if record[\"datatype\"] == \"TMIN\":\n item['precipitation'] = 0.0\n item['snow'] = 0.0\n item['minTemp'] = record['value']\n item['maxTemp'] = 0.0\n if record[\"datatype\"] == \"TMAX\":\n item['precipitation'] = 0.0\n item['snow'] = 0.0\n item['minTemp'] = 0.0\n item['maxTemp'] = record['value']\n\n item['observationDate'] = format_date(record['date']) # format as YYYY-MM-DD\n item['insertedTimeStamp'] = get_date() # current timestamp\n print('*** item**', item)\n transformed.append(item)\n \n # return the dataframe\n return pd.DataFrame(transformed)\n\n\n# fetches weather information for MD stations for month of October for GHCND data\ndef fetch_weather_data():\n\n datasetid = 'GHCND'\n startdate = '2021-10-01'\n enddate = '2021-10-31'\n locationid= 'FIPS:24' # maryland\n datatypeid = 'PRCP,SNOW,TEMP,TMAX,TMIN'\n limit = 1000 # api restricts the data to 1000 rows for every call\n\n offset = 0\n\n baseUrl = \"https://www.ncdc.noaa.gov/cdo-web/api/v2/data?datasetid={datasetid}&startdate={startdate}&enddate={enddate}&locationid={locationid}&includemetadata=true&units=metric&datatypeid={datatypeid}&limit={limit}&offset={offset}\"\n\n headers = {\n 'token': 'YmWIsqbWVOByimkultmIWeLGAztzSjCa'\n }\n\n url = baseUrl.format(datasetid=datasetid, startdate = startdate, enddate = enddate, locationid = locationid, datatypeid = datatypeid, limit = limit, offset = offset)\n response = requests.request(\"GET\", url, headers=headers)\n\n results = json.loads(response.text)\n\n totalCount = results[\"metadata\"][\"resultset\"][\"count\"]\n\n dataFrame = pd.DataFrame(results[\"results\"])\n\n pagination = math.floor(totalCount/limit + 1)\n\n # api limits the result set to 1000\n # pagination to fetch the total count\n for loop in range(1, pagination):\n offset = 0\n offset = offset+limit*loop + 1\n url = baseUrl.format(datasetid=datasetid, startdate = startdate, enddate = enddate, locationid = locationid, datatypeid = datatypeid, limit = limit, offset = offset)\n temp = json.loads((requests.request(\"GET\", url, headers=headers)).text)\n tempResults = pd.DataFrame(temp[\"results\"])\n dataFrame = dataFrame.append(tempResults)\n return dataFrame\n\n# fetch station metadata\ndef fetch_station_meta_info():\n datasetid = 'GHCND'\n locationid= 'FIPS:24' # maryland\n limit = 1000\n baseUrl = \"https://www.ncdc.noaa.gov/cdo-web/api/v2/stations?datasetid={datasetid}&locationid={locationid}&limit={limit}\"\n\n headers = {\n 'token': 'YmWIsqbWVOByimkultmIWeLGAztzSjCa'\n }\n\n url = baseUrl.format(datasetid=datasetid, locationid = locationid, limit = limit)\n response = requests.request(\"GET\", url, headers=headers)\n\n results = json.loads(response.text)\n\n totalCount = results[\"metadata\"][\"resultset\"][\"count\"]\n print('**** number of stations *** :', totalCount)\n dataFrame = pd.DataFrame(results[\"results\"])\n return dataFrame\n\n# function for sending data to Kinesis at the absolute maximum throughput\ndef send_kinesis(kinesis_client, kinesis_stream_name, kinesis_shard_count, data):\n\n\n kinesisRecords = [] # empty list to store data\n\n (rows, columns) = data.shape # get rows and columns off provided data\n\n currentBytes = 0 # counter for bytes\n\n rowCount = 0 # as we start with the first\n\n totalRowCount = rows # using our rows variable we got earlier\n\n sendKinesis = False # flag to update when it's time to send data\n \n shardCount = 1 # shard counter\n\n # loop over each of the data rows received \n for _, row in data.iterrows(): \n\n values = '|'.join(str(value) for value in row) # join the values together by a '|'\n\n encodedValues = bytes(values, 'utf-8') # encode the string to bytes\n\n # create a dict object of the row\n kinesisRecord = {\n \"Data\": encodedValues, # data byte-encoded\n \"PartitionKey\": str('aa-bb') # some key used to tell Kinesis which shard to use\n }\n\n\n kinesisRecords.append(kinesisRecord) # add the object to the list\n stringBytes = len(values.encode('utf-8')) # get the number of bytes from the string\n currentBytes = currentBytes + stringBytes # keep a running total\n\n # check conditional whether ready to send\n if len(kinesisRecords) == 500: # if we have 500 records packed up, then proceed\n sendKinesis = True # set the flag\n\n if currentBytes > 50000: # if the byte size is over 50000, proceed\n sendKinesis = True # set the flag\n\n if rowCount == totalRowCount - 1: # if we've reached the last record in the results\n sendKinesis = True # set the flag\n\n # if the flag is set\n if sendKinesis == True:\n \n # put the records to kinesis\n response = kinesis_client.put_records(\n Records=kinesisRecords,\n # Data= encodedValues, # data byte-encoded\n # PartitionKey=str(shardCount), # some key used to tell Kinesis which shard to use\n\n StreamName = kinesis_stream_name\n )\n \n # resetting values ready for next loop\n kinesisRecords = [] # empty array\n sendKinesis = False # reset flag\n currentBytes = 0 # reset bytecount\n \n # increment shard count after each put\n shardCount = shardCount + 1\n \n # if it's hit the max, reset\n if shardCount > kinesis_shard_count:\n shardCount = 1\n \n # regardless, make sure to incrememnt the counter for rows.\n rowCount = rowCount + 1\n \n \n # log out how many records were pushed\n print('Total Records sent to Kinesis: {0}'.format(totalRowCount))\n\n# main function\ndef main():\n \n # start timer\n start = time. time()\n \n # create a client with kinesis\n # kinesis-labs-project\n kinesis = create_client('kinesis','us-east-1')\n\n # fetch stattion meta info for FIPS:27 (Maryland)\n stations_info = fetch_station_meta_info()\n\n # fetch weather data\n dataFrame = fetch_weather_data()\n print('**** total number of records to be processed',len(dataFrame.values.tolist()))\n\n # transform data\n data = transform_data(dataFrame, stations_info)\n\n # send it to kinesis data stream\n stream_name = \"kinesis-labs-project-3\"\n stream_shard_count = 1\n \n send_kinesis(kinesis, stream_name, stream_shard_count, data) # send it!\n \n # end timer\n end = time. time()\n \n # log time\n print(\"Runtime: \" + str(end - start))\n \nif __name__ == \"__main__\":\n \n # run main\n main()\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
wondar-chan/akshare
[ "16eeed1c42bfd66533a0430c3f086890269fad90" ]
[ "akshare/economic/macro_usa.py" ]
[ "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nDate: 2021/12/24 12:08\nDesc: 金十数据中心-经济指标-美国\nhttps://datacenter.jin10.com/economic\n\"\"\"\nimport json\nimport time\n\nimport pandas as pd\nfrom akshare.utils import demjson\nimport requests\n\nfrom akshare.economic.cons import (\n JS_USA_NON_FARM_URL,\n JS_USA_UNEMPLOYMENT_RATE_URL,\n JS_USA_EIA_CRUDE_URL,\n JS_USA_INITIAL_JOBLESS_URL,\n JS_USA_CORE_PCE_PRICE_URL,\n JS_USA_CPI_MONTHLY_URL,\n JS_USA_LMCI_URL,\n JS_USA_ADP_NONFARM_URL,\n JS_USA_GDP_MONTHLY_URL,\n)\n\n\n# 东方财富-美国-未决房屋销售月率\ndef macro_usa_phs() -> pd.DataFrame:\n \"\"\"\n 东方财富-经济数据一览-美国-未决房屋销售月率\n http://data.eastmoney.com/cjsj/foreign_0_5.html\n :return: 未决房屋销售月率\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx\"\n params = {\n 'type': 'GJZB',\n 'sty': 'HKZB',\n 'js': '({data:[(x)],pages:(pc)})',\n 'p': '1',\n 'ps': '2000',\n 'mkt': '0',\n 'stat': '5',\n 'pageNo': '1',\n 'pageNum': '1',\n '_': '1625474966006'\n }\n r = requests.get(url, params=params)\n data_text = r.text\n data_json = demjson.decode(data_text[1:-1])\n temp_df = pd.DataFrame([item.split(',') for item in data_json['data']])\n temp_df.columns = [\n '时间',\n '前值',\n '现值',\n '发布日期',\n ]\n temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date\n temp_df['前值'] = pd.to_numeric(temp_df['前值'])\n temp_df['现值'] = pd.to_numeric(temp_df['现值'])\n temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-经济状况-美国GDP\ndef macro_usa_gdp_monthly() -> pd.DataFrame:\n \"\"\"\n 金十数据-美国国内生产总值(GDP)报告, 数据区间从 20080228-至今\n https://datacenter.jin10.com/reportType/dc_usa_gdp\n :return: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n JS_USA_GDP_MONTHLY_URL.format(\n str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)\n )\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国国内生产总值(GDP)\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"53\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"gdp\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-物价水平-美国CPI月率报告\ndef macro_usa_cpi_monthly() -> pd.DataFrame:\n \"\"\"\n 美国CPI月率报告, 数据区间从19700101-至今\n https://datacenter.jin10.com/reportType/dc_usa_cpi\n https://cdn.jin10.com/dc/reports/dc_usa_cpi_all.js?v=1578741110\n :return: 美国CPI月率报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n JS_USA_CPI_MONTHLY_URL.format(\n str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)\n )\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国居民消费价格指数(CPI)(月环比)\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"9\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"cpi_monthly\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-物价水平-美国核心CPI月率报告\ndef macro_usa_core_cpi_monthly() -> pd.DataFrame:\n \"\"\"\n 美国核心CPI月率报告, 数据区间从19700101-至今\n https://datacenter.jin10.com/reportType/dc_usa_core_cpi\n https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v=1578740570\n :return: 美国核心CPI月率报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国核心CPI月率报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"6\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_core_cpi\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-物价水平-美国个人支出月率报告\ndef macro_usa_personal_spending() -> pd.DataFrame:\n \"\"\"\n 美国个人支出月率报告, 数据区间从19700101-至今\n https://datacenter.jin10.com/reportType/dc_usa_personal_spending\n https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v=1578741327\n :return: 美国个人支出月率报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国个人支出月率报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"35\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_personal_spending\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-物价水平-美国零售销售月率报告\ndef macro_usa_retail_sales() -> pd.DataFrame:\n \"\"\"\n 美国零售销售月率报告, 数据区间从19920301-至今\n https://datacenter.jin10.com/reportType/dc_usa_retail_sales\n https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v=1578741528\n :return: 美国零售销售月率报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国零售销售月率报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"39\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_retail_sales\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-物价水平-美国进口物价指数报告\ndef macro_usa_import_price() -> pd.DataFrame:\n \"\"\"\n 美国进口物价指数报告, 数据区间从19890201-至今\n https://datacenter.jin10.com/reportType/dc_usa_import_price\n https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v=1578741716\n :return: 美国进口物价指数报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国进口物价指数\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"18\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_import_price\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-物价水平-美国出口价格指数报告\ndef macro_usa_export_price() -> pd.DataFrame:\n \"\"\"\n 美国出口价格指数报告, 数据区间从19890201-至今\n https://datacenter.jin10.com/reportType/dc_usa_export_price\n https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v=1578741832\n :return: 美国出口价格指数报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国出口价格指数\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"79\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_export_price\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-劳动力市场-LMCI\ndef macro_usa_lmci() -> pd.DataFrame:\n \"\"\"\n 美联储劳动力市场状况指数报告, 数据区间从20141006-至今\n https://datacenter.jin10.com/reportType/dc_usa_lmci\n https://cdn.jin10.com/dc/reports/dc_usa_lmci_all.js?v=1578742043\n :return: 美联储劳动力市场状况指数报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n JS_USA_LMCI_URL.format(\n str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)\n )\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美联储劳动力市场状况指数\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"93\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"lmci\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国失业率报告\ndef macro_usa_unemployment_rate() -> pd.DataFrame:\n \"\"\"\n 美国失业率报告, 数据区间从19700101-至今\n https://datacenter.jin10.com/reportType/dc_usa_unemployment_rate\n https://cdn.jin10.com/dc/reports/dc_usa_unemployment_rate_all.js?v=1578821511\n :return: 获取美国失业率报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n JS_USA_UNEMPLOYMENT_RATE_URL.format(\n str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)\n )\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国失业率\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"category\": \"ec\",\n \"attr_id\": \"47\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df = temp_df.astype(\"float\")\n temp_df.name = \"unemployment_rate\"\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国挑战者企业裁员人数报告\ndef macro_usa_job_cuts() -> pd.DataFrame:\n \"\"\"\n 美国挑战者企业裁员人数报告, 数据区间从19940201-至今\n https://datacenter.jin10.com/reportType/dc_usa_job_cuts\n https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v=1578742262\n :return: 美国挑战者企业裁员人数报告-今值(万人)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国挑战者企业裁员人数报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(万人)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"category\": \"ec\",\n \"attr_id\": \"78\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df = temp_df.astype(\"float\")\n temp_df.name = \"usa_job_cuts\"\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国非农就业人数报告\ndef macro_usa_non_farm() -> pd.DataFrame:\n \"\"\"\n 美国非农就业人数报告, 数据区间从19700102-至今\n https://datacenter.jin10.com/reportType/dc_nonfarm_payrolls\n https://cdn.jin10.com/dc/reports/dc_nonfarm_payrolls_all.js?v=1578742490\n :return: 美国非农就业人数报告-今值(万人)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n JS_USA_NON_FARM_URL.format(\n str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)\n )\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国非农就业人数\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(万人)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"category\": \"ec\",\n \"attr_id\": \"33\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df = temp_df.astype(\"float\")\n temp_df.name = \"non_farm\"\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国ADP就业人数报告\ndef macro_usa_adp_employment() -> pd.DataFrame:\n \"\"\"\n 美国ADP就业人数报告, 数据区间从20010601-至今\n https://datacenter.jin10.com/reportType/dc_adp_nonfarm_employment\n https://cdn.jin10.com/dc/reports/dc_adp_nonfarm_employment_all.js?v=1578742564\n :return: 美国ADP就业人数报告-今值(万人)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n JS_USA_ADP_NONFARM_URL.format(\n str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)\n )\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国ADP就业人数(万人)\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(万人)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"category\": \"ec\",\n \"attr_id\": \"1\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df = temp_df.astype(\"float\")\n temp_df.name = \"adp\"\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国核心PCE物价指数年率报告\ndef macro_usa_core_pce_price() -> pd.DataFrame:\n \"\"\"\n 美国核心PCE物价指数年率报告, 数据区间从19700101-至今\n https://datacenter.jin10.com/reportType/dc_usa_core_pce_price\n https://cdn.jin10.com/dc/reports/dc_usa_core_pce_price_all.js?v=1578742641\n :return: 美国核心PCE物价指数年率报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n JS_USA_CORE_PCE_PRICE_URL.format(\n str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)\n )\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国核心PCE物价指数年率\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"category\": \"ec\",\n \"attr_id\": \"80\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df = temp_df.astype(\"float\")\n temp_df.name = \"core_pce_price\"\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国实际个人消费支出季率初值报告\ndef macro_usa_real_consumer_spending() -> pd.DataFrame:\n \"\"\"\n 美国实际个人消费支出季率初值报告, 数据区间从20131107-至今\n https://datacenter.jin10.com/reportType/dc_usa_real_consumer_spending\n https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v=1578742802\n :return: 美国实际个人消费支出季率初值报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国实际个人消费支出季率初值报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"category\": \"ec\",\n \"attr_id\": \"81\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df = temp_df.astype(\"float\")\n temp_df.name = \"usa_real_consumer_spending\"\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-贸易状况-美国贸易帐报告\ndef macro_usa_trade_balance() -> pd.DataFrame:\n \"\"\"\n 美国贸易帐报告, 数据区间从19700101-至今\n https://datacenter.jin10.com/reportType/dc_usa_trade_balance\n https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v=1578742911\n :return: 美国贸易帐报告-今值(亿美元)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国贸易帐报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(亿美元)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"category\": \"ec\",\n \"attr_id\": \"42\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df = temp_df.astype(\"float\")\n temp_df.name = \"usa_trade_balance\"\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-贸易状况-美国经常帐报告\ndef macro_usa_current_account() -> pd.DataFrame:\n \"\"\"\n 美国经常帐报告, 数据区间从20080317-至今\n https://datacenter.jin10.com/reportType/dc_usa_current_account\n https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v=1578743012\n :return: 美国经常帐报告-今值(亿美元)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_current_account_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国经常账报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(亿美元)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"category\": \"ec\",\n \"attr_id\": \"12\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df = temp_df.astype(\"float\")\n temp_df.name = \"usa_current_account\"\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-制造业-贝克休斯钻井报告\ndef macro_usa_rig_count() -> pd.DataFrame:\n \"\"\"\n 贝克休斯钻井报告, 数据区间从20080317-至今\n https://datacenter.jin10.com/reportType/dc_rig_count_summary\n https://cdn.jin10.com/dc/reports/dc_rig_count_summary_all.js?v=1578743203\n :return: 贝克休斯钻井报告-当周\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n params = {\n \"_\": t\n }\n res = requests.get(\"https://cdn.jin10.com/data_center/reports/baker.json\", params=params)\n temp_df = pd.DataFrame(res.json().get(\"values\")).T\n big_df = pd.DataFrame()\n big_df[\"钻井总数_钻井数\"] = temp_df[\"钻井总数\"].apply(lambda x: x[0])\n big_df[\"钻井总数_变化\"] = temp_df[\"钻井总数\"].apply(lambda x: x[1])\n big_df[\"美国石油钻井_钻井数\"] = temp_df[\"美国石油钻井\"].apply(lambda x: x[0])\n big_df[\"美国石油钻井_变化\"] = temp_df[\"美国石油钻井\"].apply(lambda x: x[1])\n big_df[\"混合钻井_钻井数\"] = temp_df[\"混合钻井\"].apply(lambda x: x[0])\n big_df[\"混合钻井_变化\"] = temp_df[\"混合钻井\"].apply(lambda x: x[1])\n big_df[\"美国天然气钻井_钻井数\"] = temp_df[\"美国天然气钻井\"].apply(lambda x: x[0])\n big_df[\"美国天然气钻井_变化\"] = temp_df[\"美国天然气钻井\"].apply(lambda x: x[1])\n big_df = big_df.astype(\"float\")\n return big_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-制造业-美国个人支出月率报告\n\n\n# 金十数据中心-经济指标-美国-产业指标-制造业-美国生产者物价指数(PPI)报告\ndef macro_usa_ppi() -> pd.DataFrame:\n \"\"\"\n 美国生产者物价指数(PPI)报告, 数据区间从20080226-至今\n https://datacenter.jin10.com/reportType/dc_usa_ppi\n https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v=1578743628\n :return: 美国生产者物价指数(PPI)报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国生产者物价指数(PPI)报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"37\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_ppi\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-制造业-美国核心生产者物价指数(PPI)报告\ndef macro_usa_core_ppi() -> pd.DataFrame:\n \"\"\"\n 美国核心生产者物价指数(PPI)报告, 数据区间从20080318-至今\n https://datacenter.jin10.com/reportType/dc_usa_core_ppi\n https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v=1578743709\n :return: 美国核心生产者物价指数(PPI)报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_core_ppi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国核心生产者物价指数(PPI)报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"7\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_core_ppi\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-制造业-美国API原油库存报告\ndef macro_usa_api_crude_stock() -> pd.DataFrame:\n \"\"\"\n 美国API原油库存报告, 数据区间从20120328-至今\n https://datacenter.jin10.com/reportType/dc_usa_api_crude_stock\n https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v=1578743859\n :return: 美国API原油库存报告-今值(万桶)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_api_crude_stock_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国API原油库存报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(万桶)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"69\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_api_crude_stock\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-制造业-美国Markit制造业PMI初值报告\ndef macro_usa_pmi() -> pd.DataFrame:\n \"\"\"\n 美国Markit制造业PMI初值报告, 数据区间从20120601-至今\n https://datacenter.jin10.com/reportType/dc_usa_pmi\n https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v=1578743969\n :return: 美国Markit制造业PMI初值报告-今值\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国Markit制造业PMI报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"74\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_pmi\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-制造业-美国ISM制造业PMI报告\ndef macro_usa_ism_pmi() -> pd.DataFrame:\n \"\"\"\n 美国ISM制造业PMI报告, 数据区间从19700101-至今\n https://datacenter.jin10.com/reportType/dc_usa_ism_pmi\n https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v=1578744071\n :return: 美国ISM制造业PMI报告-今值\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_ism_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国ISM制造业PMI报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"28\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_ism_pmi\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-工业-美国工业产出月率报告\ndef macro_usa_industrial_production() -> pd.DataFrame:\n \"\"\"\n 美国工业产出月率报告, 数据区间从19700101-至今\n https://datacenter.jin10.com/reportType/dc_usa_industrial_production\n https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v=1578744188\n :return: 美国工业产出月率报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_industrial_production_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国工业产出月率报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"20\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_industrial_production\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-工业-美国耐用品订单月率报告\ndef macro_usa_durable_goods_orders() -> pd.DataFrame:\n \"\"\"\n 美国耐用品订单月率报告, 数据区间从20080227-至今\n https://datacenter.jin10.com/reportType/dc_usa_durable_goods_orders\n https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v=1578744295\n :return: 美国耐用品订单月率报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_durable_goods_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国耐用品订单月率报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"13\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_durable_goods_orders\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-工业-美国工厂订单月率报告\ndef macro_usa_factory_orders() -> pd.DataFrame:\n \"\"\"\n 美国工厂订单月率报告, 数据区间从19920401-至今\n https://datacenter.jin10.com/reportType/dc_usa_factory_orders\n https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v=1578744385\n :return: 美国工厂订单月率报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_factory_orders_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国工厂订单月率报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"16\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_factory_orders\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-服务业-美国Markit服务业PMI初值报告\ndef macro_usa_services_pmi() -> pd.DataFrame:\n \"\"\"\n 美国Markit服务业PMI初值报告, 数据区间从20120701-至今\n https://datacenter.jin10.com/reportType/dc_usa_services_pmi\n https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v=1578744503\n :return: 美国Markit服务业PMI初值报告-今值\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_services_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国Markit服务业PMI初值报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"89\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_services_pmi\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-服务业-美国商业库存月率报告\ndef macro_usa_business_inventories() -> pd.DataFrame:\n \"\"\"\n 美国商业库存月率报告, 数据区间从19920301-至今\n https://datacenter.jin10.com/reportType/dc_usa_business_inventories\n https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v=1578744618\n :return: 美国商业库存月率报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_business_inventories_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国商业库存月率报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"4\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_business_inventories\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-服务业-美国ISM非制造业PMI报告\ndef macro_usa_ism_non_pmi() -> pd.DataFrame:\n \"\"\"\n 美国ISM非制造业PMI报告, 数据区间从19970801-至今\n https://datacenter.jin10.com/reportType/dc_usa_ism_non_pmi\n https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v=1578744693\n :return: 美国ISM非制造业PMI报告-今值\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_ism_non_pmi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国ISM非制造业PMI报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"29\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_ism_non_pmi\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-房地产-美国NAHB房产市场指数报告\ndef macro_usa_nahb_house_market_index() -> pd.DataFrame:\n \"\"\"\n 美国NAHB房产市场指数报告, 数据区间从19850201-至今\n https://datacenter.jin10.com/reportType/dc_usa_nahb_house_market_index\n https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v=1578744817\n :return: 美国NAHB房产市场指数报告-今值\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_nahb_house_market_index_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国NAHB房产市场指数报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"31\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_nahb_house_market_index\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋开工总数年化报告\ndef macro_usa_house_starts() -> pd.DataFrame:\n \"\"\"\n 美国新屋开工总数年化报告, 数据区间从19700101-至今\n https://datacenter.jin10.com/reportType/dc_usa_house_starts\n https://cdn.jin10.com/dc/reports/dc_usa_house_starts_all.js?v=1578747388\n :return: 美国新屋开工总数年化报告-今值(万户)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_house_starts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国新屋开工总数年化报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(万户)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"17\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_house_starts\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋销售总数年化报告\ndef macro_usa_new_home_sales() -> pd.DataFrame:\n \"\"\"\n 美国新屋销售总数年化报告, 数据区间从19700101-至今\n https://datacenter.jin10.com/reportType/dc_usa_new_home_sales\n https://cdn.jin10.com/dc/reports/dc_usa_new_home_sales_all.js?v=1578747501\n :return: 美国新屋销售总数年化报告-今值(万户)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_new_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国新屋销售总数年化报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(万户)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"32\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_new_home_sales\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-房地产-美国营建许可总数报告\ndef macro_usa_building_permits() -> pd.DataFrame:\n \"\"\"\n 美国营建许可总数报告, 数据区间从20080220-至今\n https://datacenter.jin10.com/reportType/dc_usa_building_permits\n https://cdn.jin10.com/dc/reports/dc_usa_building_permits_all.js?v=1578747599\n :return: 美国营建许可总数报告-今值(万户)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_building_permits_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国营建许可总数报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(万户)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"3\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_building_permits\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋销售总数年化报告\ndef macro_usa_exist_home_sales() -> pd.DataFrame:\n \"\"\"\n 美国成屋销售总数年化报告, 数据区间从19700101-至今\n https://datacenter.jin10.com/reportType/dc_usa_exist_home_sales\n https://cdn.jin10.com/dc/reports/dc_usa_exist_home_sales_all.js?v=1578747703\n :return: 美国成屋销售总数年化报告-今值(万户)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_exist_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国成屋销售总数年化报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(万户)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"15\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_exist_home_sales\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-房地产-美国FHFA房价指数月率报告\ndef macro_usa_house_price_index() -> pd.DataFrame:\n \"\"\"\n 美国FHFA房价指数月率报告, 数据区间从19910301-至今\n https://datacenter.jin10.com/reportType/dc_usa_house_price_index\n https://cdn.jin10.com/dc/reports/dc_usa_house_price_index_all.js?v=1578747781\n :return: 美国FHFA房价指数月率报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_house_price_index_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国FHFA房价指数月率报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"51\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_house_price_index\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-房地产-美国S&P/CS20座大城市房价指数年率报告\ndef macro_usa_spcs20() -> pd.DataFrame:\n \"\"\"\n 美国S&P/CS20座大城市房价指数年率报告, 数据区间从20010201-至今\n https://datacenter.jin10.com/reportType/dc_usa_spcs20\n https://cdn.jin10.com/dc/reports/dc_usa_spcs20_all.js?v=1578747873\n :return: 美国S&P/CS20座大城市房价指数年率报告-今值(%)\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_spcs20_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国S&P/CS20座大城市房价指数年率报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"52\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", keep=\"last\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_spcs20\"\n temp_df = temp_df.astype(float)\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋签约销售指数月率报告\ndef macro_usa_pending_home_sales() -> pd.DataFrame:\n \"\"\"\n 美国成屋签约销售指数月率报告, 数据区间从20010301-至今\n https://datacenter.jin10.com/reportType/dc_usa_pending_home_sales\n https://cdn.jin10.com/dc/reports/dc_usa_pending_home_sales_all.js?v=1578747959\n :return: 美国成屋签约销售指数月率报告\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_pending_home_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\"\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国成屋签约销售指数月率报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(%)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"34\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", keep=\"last\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"usa_pending_home_sales\"\n temp_df = temp_df.astype(float)\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-领先指标-美国谘商会消费者信心指数报告\ndef macro_usa_cb_consumer_confidence() -> pd.DataFrame:\n \"\"\"\n 金十数据中心-经济指标-美国-领先指标-美国谘商会消费者信心指数报告, 数据区间从 19700101-至今\n https://cdn.jin10.com/dc/reports/dc_usa_cb_consumer_confidence_all.js?v=1578576859\n :return: 美国谘商会消费者信心指数报告-今值\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_cb_consumer_confidence_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\")\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国谘商会消费者信心指数报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值\"]\n\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"5\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", keep=\"last\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"cb_consumer_confidence\"\n temp_df = temp_df.astype(float)\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-领先指标-美国NFIB小型企业信心指数报告\ndef macro_usa_nfib_small_business() -> pd.DataFrame:\n \"\"\"\n 美国NFIB小型企业信心指数报告, 数据区间从19750201-至今\n https://cdn.jin10.com/dc/reports/dc_usa_nfib_small_business_all.js?v=1578576631\n :return: 美国NFIB小型企业信心指数报告-今值\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_nfib_small_business_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\")\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国NFIB小型企业信心指数报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"63\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", keep=\"last\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"nfib_small_business\"\n temp_df = temp_df.astype(float)\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-领先指标-美国密歇根大学消费者信心指数初值报告\ndef macro_usa_michigan_consumer_sentiment() -> pd.DataFrame:\n \"\"\"\n 美国密歇根大学消费者信心指数初值报告, 数据区间从19700301-至今\n https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\n :return: 美国密歇根大学消费者信心指数初值报告-今值\n :rtype: pandas.Series\n \"\"\"\n t = time.time()\n res = requests.get(\n f\"https://cdn.jin10.com/dc/reports/dc_usa_michigan_consumer_sentiment_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}\")\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国密歇根大学消费者信心指数初值报告\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"50\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", keep=\"last\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"michigan_consumer_sentiment\"\n temp_df = temp_df.astype(float)\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-其他-美国EIA原油库存报告\ndef macro_usa_eia_crude_rate() -> pd.DataFrame:\n \"\"\"\n 美国EIA原油库存报告, 数据区间从19950801-至今\n https://datacenter.jin10.com/reportType/dc_eia_crude_oil\n :return: pandas.Series\n 1982-09-01 -262.6\n 1982-10-01 -8\n 1982-11-01 -41.3\n 1982-12-01 -87.6\n 1983-01-01 51.3\n ...\n 2019-10-02 310\n 2019-10-09 292.7\n 2019-10-16 0\n 2019-10-17 928.1\n 2019-10-23 0\n \"\"\"\n t = time.time()\n res = requests.get(\n JS_USA_EIA_CRUDE_URL.format(\n str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)\n )\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国EIA原油库存(万桶)\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(万桶)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"10\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"eia_crude_rate\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-其他-美国初请失业金人数报告\ndef macro_usa_initial_jobless() -> pd.DataFrame:\n \"\"\"\n 美国初请失业金人数报告, 数据区间从19700101-至今\n :return: pandas.Series\n 1970-01-01 22.1087\n 1970-02-01 24.9318\n 1970-03-01 25.85\n 1970-04-01 26.8682\n 1970-05-01 33.1591\n ...\n 2019-09-26 21.5\n 2019-10-03 22\n 2019-10-10 21\n 2019-10-17 21.4\n 2019-10-24 0\n \"\"\"\n t = time.time()\n res = requests.get(\n JS_USA_INITIAL_JOBLESS_URL.format(\n str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)\n )\n )\n json_data = json.loads(res.text[res.text.find(\"{\"): res.text.rfind(\"}\") + 1])\n date_list = [item[\"date\"] for item in json_data[\"list\"]]\n value_list = [item[\"datas\"][\"美国初请失业金人数(万人)\"] for item in json_data[\"list\"]]\n value_df = pd.DataFrame(value_list)\n value_df.columns = json_data[\"kinds\"]\n value_df.index = pd.to_datetime(date_list)\n temp_df = value_df[\"今值(万人)\"]\n url = \"https://datacenter-api.jin10.com/reports/list_v2\"\n params = {\n \"max_date\": \"\",\n \"category\": \"ec\",\n \"attr_id\": \"44\",\n \"_\": str(int(round(t * 1000))),\n }\n headers = {\n \"accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"no-cache\",\n \"origin\": \"https://datacenter.jin10.com\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"x-app-id\": \"rU6QIu7JHe2gOUeR\",\n \"x-csrf-token\": \"\",\n \"x-version\": \"1.0.0\",\n }\n r = requests.get(url, params=params, headers=headers)\n temp_se = pd.DataFrame(r.json()[\"data\"][\"values\"]).iloc[:, :2]\n temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])\n temp_se = temp_se.iloc[:, 1]\n temp_df = temp_df.append(temp_se)\n temp_df.dropna(inplace=True)\n temp_df.sort_index(inplace=True)\n temp_df = temp_df.reset_index()\n temp_df.drop_duplicates(subset=\"index\", inplace=True)\n temp_df.set_index(\"index\", inplace=True)\n temp_df = temp_df.squeeze()\n temp_df.index.name = None\n temp_df.name = \"initial_jobless\"\n temp_df = temp_df.astype(\"float\")\n return temp_df\n\n\n# 金十数据中心-经济指标-美国-其他-美国原油产量报告\ndef macro_usa_crude_inner() -> pd.DataFrame:\n \"\"\"\n 美国原油产量报告, 数据区间从19830107-至今\n https://datacenter.jin10.com/reportType/dc_eia_crude_oil_produce\n :return: pandas.Series\n 1983-01-07 863.40\n 1983-01-14 863.40\n 1983-01-21 863.40\n 1983-01-28 863.40\n 1983-02-04 866.00\n ...\n 2019-09-20 1250.00\n 2019-09-27 1240.00\n 2019-10-04 1260.00\n 2019-10-11 1260.00\n 2019-10-18 1260.00\n \"\"\"\n t = time.time()\n params = {\n \"_\": t\n }\n res = requests.get(\"https://cdn.jin10.com/data_center/reports/usa_oil.json\", params=params)\n temp_df = pd.DataFrame(res.json().get(\"values\")).T\n big_df = pd.DataFrame()\n big_df[\"美国国内原油总量_产量\"] = temp_df[\"美国国内原油总量\"].apply(lambda x: x[0])\n big_df[\"美国国内原油总量_变化\"] = temp_df[\"美国国内原油总量\"].apply(lambda x: x[1])\n big_df[\"美国本土48州原油产量_产量\"] = temp_df[\"美国本土48州原油产量\"].apply(lambda x: x[0])\n big_df[\"美国本土48州原油产量_变化\"] = temp_df[\"美国本土48州原油产量\"].apply(lambda x: x[1])\n big_df[\"美国阿拉斯加州原油产量_产量\"] = temp_df[\"美国阿拉斯加州原油产量\"].apply(lambda x: x[0])\n big_df[\"美国阿拉斯加州原油产量_变化\"] = temp_df[\"美国阿拉斯加州原油产量\"].apply(lambda x: x[1])\n big_df = big_df.astype(\"float\")\n return big_df\n\n\n# 金十数据中心-美国商品期货交易委员会CFTC外汇类非商业持仓报告\ndef macro_usa_cftc_nc_holding() -> pd.DataFrame:\n \"\"\"\n 美国商品期货交易委员会CFTC外汇类非商业持仓报告, 数据区间从 19830107-至今\n https://datacenter.jin10.com/reportType/dc_cftc_nc_report\n https://cdn.jin10.com/data_center/reports/cftc_4.json?_=1591535493741\n :return: pandas.DataFrame\n \"\"\"\n t = time.time()\n params = {\n \"_\": str(int(round(t * 1000)))\n }\n r = requests.get(\"https://cdn.jin10.com/data_center/reports/cftc_4.json\", params=params)\n json_data = r.json()\n temp_df = pd.DataFrame(json_data[\"values\"]).T\n temp_df.fillna(\"[0, 0, 0]\", inplace=True)\n big_df = pd.DataFrame()\n for item in temp_df.columns:\n for i in range(3):\n inner_temp_df = temp_df.loc[:, item].apply(lambda x: eval(str(x))[i])\n inner_temp_df.name = inner_temp_df.name + \"-\" + json_data[\"keys\"][i][\"name\"]\n big_df = pd.concat([big_df, inner_temp_df], axis=1)\n big_df.sort_index(inplace=True)\n return big_df\n\n\n# 金十数据中心-美国商品期货交易委员会CFTC商品类非商业持仓报告\ndef macro_usa_cftc_c_holding() -> pd.DataFrame:\n \"\"\"\n 美国商品期货交易委员会CFTC商品类非商业持仓报告, 数据区间从 19830107-至今\n https://datacenter.jin10.com/reportType/dc_cftc_c_report\n https://cdn.jin10.com/data_center/reports/cftc_2.json?_=1591536282271\n :return: pandas.DataFrame\n \"\"\"\n t = time.time()\n params = {\n \"_\": str(int(round(t * 1000)))\n }\n r = requests.get(\"https://cdn.jin10.com/data_center/reports/cftc_2.json\", params=params)\n json_data = r.json()\n temp_df = pd.DataFrame(json_data[\"values\"]).T\n temp_df.fillna(\"[0, 0, 0]\", inplace=True)\n big_df = pd.DataFrame()\n for item in temp_df.columns:\n for i in range(3):\n inner_temp_df = temp_df.loc[:, item].apply(lambda x: eval(str(x))[i])\n inner_temp_df.name = inner_temp_df.name + \"-\" + json_data[\"keys\"][i][\"name\"]\n big_df = pd.concat([big_df, inner_temp_df], axis=1)\n big_df.sort_index(inplace=True)\n return big_df\n\n\n# 金十数据中心-美国商品期货交易委员会CFTC外汇类商业持仓报告\ndef macro_usa_cftc_merchant_currency_holding() -> pd.DataFrame:\n \"\"\"\n 美国商品期货交易委员会CFTC外汇类商业持仓报告, 数据区间从 19860115-至今\n https://datacenter.jin10.com/reportType/dc_cftc_merchant_currency\n https://cdn.jin10.com/data_center/reports/cftc_3.json?_=1591536389283\n :return: pandas.DataFrame\n \"\"\"\n t = time.time()\n params = {\n \"_\": str(int(round(t * 1000)))\n }\n r = requests.get(\"https://cdn.jin10.com/data_center/reports/cftc_3.json\", params=params)\n json_data = r.json()\n temp_df = pd.DataFrame(json_data[\"values\"]).T\n temp_df.fillna(\"[0, 0, 0]\", inplace=True)\n big_df = pd.DataFrame()\n for item in temp_df.columns:\n for i in range(3):\n inner_temp_df = temp_df.loc[:, item].apply(lambda x: eval(str(x))[i])\n inner_temp_df.name = inner_temp_df.name + \"-\" + json_data[\"keys\"][i][\"name\"]\n big_df = pd.concat([big_df, inner_temp_df], axis=1)\n big_df.sort_index(inplace=True)\n return big_df\n\n\n# 金十数据中心-美国商品期货交易委员会CFTC商品类商业持仓报告\ndef macro_usa_cftc_merchant_goods_holding() -> pd.DataFrame:\n \"\"\"\n 美国商品期货交易委员会CFTC商品类商业持仓报告, 数据区间从 19860115-至今\n https://datacenter.jin10.com/reportType/dc_cftc_merchant_goods\n https://cdn.jin10.com/data_center/reports/cftc_1.json?_=1591536502095\n :return: 美国商品期货交易委员会CFTC商品类商业持仓报告\n :rtype: pandas.DataFrame\n \"\"\"\n t = time.time()\n params = {\n \"_\": str(int(round(t * 1000)))\n }\n r = requests.get(\"https://cdn.jin10.com/data_center/reports/cftc_1.json\", params=params)\n json_data = r.json()\n temp_df = pd.DataFrame(json_data[\"values\"]).T\n temp_df.fillna(\"[0, 0, 0]\", inplace=True)\n big_df = pd.DataFrame()\n for item in temp_df.columns:\n for i in range(3):\n inner_temp_df = temp_df.loc[:, item].apply(lambda x: eval(str(x))[i])\n inner_temp_df.name = inner_temp_df.name + \"-\" + json_data[\"keys\"][i][\"name\"]\n big_df = pd.concat([big_df, inner_temp_df], axis=1)\n big_df.sort_index(inplace=True)\n return big_df\n\n\n# 金十数据中心-CME-贵金属\n# def macro_usa_cme_merchant_goods_holding():\n# \"\"\"\n# CME-贵金属, 数据区间从 19860115-至今\n# https://datacenter.jin10.com/org\n# https://cdn.jin10.com/data_center/reports/cme_3.json?_=1591536643385\n# :return: pandas.DataFrame\n# \"\"\"\n# t = time.time()\n# params = {\n# \"_\": str(int(round(t * 1000)))\n# }\n# r = requests.get(\"https://cdn.jin10.com/data_center/reports/cme_3.json\", params=params)\n# json_data = r.json()\n# json_data[\"values\"].keys()\n# temp_df = pd.DataFrame(json_data[\"values\"]).T\n# temp_df.fillna(\"[0, 0, 0]\", inplace=True)\n# big_df = pd.DataFrame()\n# for item in temp_df.columns:\n# for i in range(3):\n# inner_temp_df = temp_df.loc[:, item].apply(lambda x: eval(str(x))[i])\n# inner_temp_df.name = inner_temp_df.name + \"-\" + json_data[\"keys\"][i][\"name\"]\n# big_df = pd.concat([big_df, inner_temp_df], axis=1)\n# big_df.sort_index(inplace=True)\n# return big_df\n\n\nif __name__ == \"__main__\":\n # 东方财富-经济指标-美国-未决房屋销售月率\n macro_usa_phs_df = macro_usa_phs()\n print(macro_usa_phs_df)\n\n # 金十数据中心-经济指标-美国-经济状况-美国GDP\n macro_usa_gdp_monthly_df = macro_usa_gdp_monthly()\n print(macro_usa_gdp_monthly_df)\n # 金十数据中心-经济指标-美国-物价水平-美国CPI月率报告\n macro_usa_cpi_monthly_df = macro_usa_cpi_monthly()\n print(macro_usa_cpi_monthly_df)\n # 金十数据中心-经济指标-美国-物价水平-美国核心CPI月率报告\n macro_usa_core_cpi_monthly_df = macro_usa_core_cpi_monthly()\n print(macro_usa_core_cpi_monthly_df)\n # 金十数据中心-经济指标-美国-物价水平-美国个人支出月率报告\n macro_usa_personal_spending_df = macro_usa_personal_spending()\n print(macro_usa_personal_spending_df)\n # 金十数据中心-经济指标-美国-物价水平-美国零售销售月率报告\n macro_usa_retail_sales_df = macro_usa_retail_sales()\n print(macro_usa_retail_sales_df)\n # 金十数据中心-经济指标-美国-物价水平-美国进口物价指数报告\n macro_usa_import_price_df = macro_usa_import_price()\n print(macro_usa_import_price_df)\n # 金十数据中心-经济指标-美国-物价水平-美国出口价格指数报告\n macro_usa_export_price_df = macro_usa_export_price()\n print(macro_usa_export_price_df)\n # 金十数据中心-经济指标-美国-劳动力市场-LMCI\n macro_usa_lmci_df = macro_usa_lmci()\n print(macro_usa_lmci_df)\n # 金十数据中心-经济指标-美国-劳动力市场-失业率-美国失业率报告\n macro_usa_unemployment_rate_df = macro_usa_unemployment_rate()\n print(macro_usa_unemployment_rate_df)\n # 金十数据中心-经济指标-美国-劳动力市场-失业率-美国挑战者企业裁员人数报告\n macro_usa_job_cuts_df = macro_usa_job_cuts()\n print(macro_usa_job_cuts_df)\n # 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国非农就业人数报告\n macro_usa_non_farm_df = macro_usa_non_farm()\n print(macro_usa_non_farm_df)\n # 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国ADP就业人数报告\n macro_usa_adp_employment_df = macro_usa_adp_employment()\n print(macro_usa_adp_employment_df)\n # 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国核心PCE物价指数年率报告\n macro_usa_core_pce_price_df = macro_usa_core_pce_price()\n print(macro_usa_core_pce_price_df)\n # 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国实际个人消费支出季率初值报告\n macro_usa_real_consumer_spending_df = macro_usa_real_consumer_spending()\n print(macro_usa_real_consumer_spending_df)\n # 金十数据中心-经济指标-美国-贸易状况-美国贸易帐报告\n macro_usa_trade_balance_df = macro_usa_trade_balance()\n print(macro_usa_trade_balance_df)\n # 金十数据中心-经济指标-美国-贸易状况-美国经常帐报告\n macro_usa_current_account_df = macro_usa_current_account()\n print(macro_usa_current_account_df)\n # 金十数据中心-经济指标-美国-产业指标-制造业-贝克休斯钻井报告\n macro_usa_rig_count_df = macro_usa_rig_count()\n print(macro_usa_rig_count_df)\n # 金十数据中心-经济指标-美国-产业指标-制造业-美国个人支出月率报告\n # 金十数据中心-经济指标-美国-产业指标-制造业-美国生产者物价指数(PPI)报告\n macro_usa_ppi_df = macro_usa_ppi()\n print(macro_usa_ppi_df)\n # 金十数据中心-经济指标-美国-产业指标-制造业-美国核心生产者物价指数(PPI)报告\n macro_usa_core_ppi_df = macro_usa_core_ppi()\n print(macro_usa_core_ppi_df)\n # 金十数据中心-经济指标-美国-产业指标-制造业-美国API原油库存报告\n macro_usa_api_crude_stock_df = macro_usa_api_crude_stock()\n print(macro_usa_api_crude_stock_df)\n # 金十数据中心-经济指标-美国-产业指标-制造业-美国Markit制造业PMI初值报告\n macro_usa_pmi_df = macro_usa_pmi()\n print(macro_usa_pmi_df)\n # 金十数据中心-经济指标-美国-产业指标-制造业-美国ISM制造业PMI报告\n macro_usa_ism_pmi_df = macro_usa_ism_pmi()\n print(macro_usa_ism_pmi_df)\n # 金十数据中心-经济指标-美国-产业指标-房地产-美国NAHB房产市场指数报告\n macro_usa_nahb_house_market_index_df = macro_usa_nahb_house_market_index()\n print(macro_usa_nahb_house_market_index_df)\n # 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋开工总数年化报告\n macro_usa_house_starts_df = macro_usa_house_starts()\n print(macro_usa_house_starts_df)\n # 金十数据中心-经济指标-美国-产业指标-房地产-美国新屋销售总数年化报告\n macro_usa_new_home_sales_df = macro_usa_new_home_sales()\n print(macro_usa_new_home_sales_df)\n # 金十数据中心-经济指标-美国-产业指标-房地产-美国营建许可总数报告\n macro_usa_building_permits_df = macro_usa_building_permits()\n print(macro_usa_building_permits_df)\n # 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋销售总数年化报告\n macro_usa_exist_home_sales_df = macro_usa_exist_home_sales()\n print(macro_usa_exist_home_sales_df)\n # 金十数据中心-经济指标-美国-产业指标-房地产-美国FHFA房价指数月率报告\n macro_usa_house_price_index_df = macro_usa_house_price_index()\n print(macro_usa_house_price_index_df)\n # 金十数据中心-经济指标-美国-产业指标-房地产-美国S&P/CS20座大城市房价指数年率报告\n macro_usa_spcs20_df = macro_usa_spcs20()\n print(macro_usa_spcs20_df)\n # 金十数据中心-经济指标-美国-产业指标-房地产-美国成屋签约销售指数月率报告\n macro_usa_pending_home_sales_df = macro_usa_pending_home_sales()\n print(macro_usa_pending_home_sales_df)\n # 金十数据中心-经济指标-美国-领先指标-美国谘商会消费者信心指数报告\n macro_usa_cb_consumer_confidence_df = macro_usa_cb_consumer_confidence()\n print(macro_usa_cb_consumer_confidence_df)\n # 金十数据中心-经济指标-美国-领先指标-美国NFIB小型企业信心指数报告\n macro_usa_nfib_small_business_df = macro_usa_nfib_small_business()\n print(macro_usa_nfib_small_business_df)\n # 金十数据中心-经济指标-美国-领先指标-美国密歇根大学消费者信心指数初值报告\n macro_usa_michigan_consumer_sentiment_df = macro_usa_michigan_consumer_sentiment()\n print(macro_usa_michigan_consumer_sentiment_df)\n\n # 金十数据中心-经济指标-美国-其他-美国EIA原油库存报告\n macro_usa_eia_crude_rate_df = macro_usa_eia_crude_rate()\n print(macro_usa_eia_crude_rate_df)\n # 金十数据中心-经济指标-美国-其他-美国初请失业金人数报告\n macro_usa_initial_jobless_df = macro_usa_initial_jobless()\n print(macro_usa_initial_jobless_df)\n # import matplotlib.pyplot as plt\n # macro_usa_initial_jobless_df.plot()\n # plt.title(macro_usa_initial_jobless_df.name)\n # plt.xlabel(\"year\")\n # plt.ylabel(\"10 thousand\")\n # plt.show()\n # 金十数据中心-经济指标-美国-其他-美国原油产量报告\n macro_usa_crude_inner_df = macro_usa_crude_inner()\n print(macro_usa_crude_inner_df)\n\n # 金十数据中心-美国商品期货交易委员会CFTC外汇类非商业持仓报告\n macro_usa_cftc_nc_holding_df = macro_usa_cftc_nc_holding()\n print(macro_usa_cftc_nc_holding_df)\n # 金十数据中心-美国商品期货交易委员会CFTC商品类非商业持仓报告\n macro_usa_cftc_c_holding_df = macro_usa_cftc_c_holding()\n print(macro_usa_cftc_c_holding_df)\n # 金十数据中心-美国商品期货交易委员会CFTC外汇类商业持仓报告\n macro_usa_cftc_merchant_currency_holding_df = macro_usa_cftc_merchant_currency_holding()\n print(macro_usa_cftc_merchant_currency_holding_df)\n # 金十数据中心-美国商品期货交易委员会CFTC商品类商业持仓报告\n macro_usa_cftc_merchant_goods_holding_df = macro_usa_cftc_merchant_goods_holding()\n print(macro_usa_cftc_merchant_goods_holding_df)\n" ]
[ [ "pandas.concat", "pandas.to_datetime", "pandas.to_numeric", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
VikrantKamble/lyaf_optdepth
[ "899048ab73e546513b3713b3818abfab3ce3ab05" ]
[ "lyaf_optdepth/utils.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import chi2\nfrom matplotlib.patches import Ellipse\nfrom scipy.special import erf\nfrom astroML.plotting.mcmc import convert_to_stdev as cts\nfrom scipy.optimize import curve_fit\n\n\ndef draw_ellipse(pos, cov, nsig=None, ax=None, label=\"temp\",\n set_label=True, **kwrgs):\n \"\"\"\n Plots an ellipse enclosing *volume* based on the specified covariance\n matrix (*cov*) and location (*pos*). Additional keyword arguments are\n passed on to the ellipse patch artist.\n\n Parameters\n ----------\n cov : The 2x2 covariance matrix to base the ellipse on\n pos : The location of the center of the ellipse. Expects a 2-element\n sequence of [x0, y0].\n volume : The volume inside the ellipse; defaults to 0.5\n ax : The axis that the ellipse will be plotted on. Defaults to the\n current axis.\n \"\"\"\n def eigsorted(cov):\n vals, vecs = np.linalg.eigh(cov)\n order = vals.argsort()[::-1]\n return vals[order], vecs[:, order]\n\n if ax is None:\n fig, ax = plt.subplots(1)\n if nsig is None:\n nsig = [1, 2]\n\n vals, vecs = eigsorted(cov)\n theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))\n\n # Width and height are \"full\" widths, not radius\n for ele in nsig:\n scale = np.sqrt(chi2.ppf(erf(ele / np.sqrt(2)), df=2))\n width, height = 2 * scale * np.sqrt(vals)\n ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwrgs)\n\n ax.add_artist(ellip)\n ellip.set_clip_box(ax.bbox)\n\n ellip.set_label(label)\n\n # Limit the axes correctly to show the plots\n ax.set_xlim(pos[0] - 2 * width, pos[0] + 2 * width)\n ax.set_ylim(pos[1] - 2 * height, pos[1] + 2 * height)\n\n if set_label:\n ax.legend(handles=[plt.plot([], ls=\"-\")[0]],\n labels=[ellip.get_label()])\n return ax\n\n\ndef fit_exp_model(y_pts, x_pts=None, ax=None, **kwargs):\n \"\"\"\n Fit a simple exponential model to obtain the correlation\n length\n \"\"\"\n if x_pts is None:\n x_pts = np.arange(len(y_pts))\n\n def model_exp(x, cl):\n return np.exp(- x / cl)\n\n corr_length, _ = curve_fit(model_exp, x_pts, y_pts)\n\n if ax is not None:\n ax.plot(x_pts, y_pts, '-ok')\n ax.plot(x_pts, model_exp(x_pts, corr_length), **kwargs)\n ax.axhline(0, ls='--')\n ax.set_xlabel(r\"$|i-j|$\")\n ax.set_ylabel(r\"$\\xi(|i-j|)$\")\n plt.show()\n\n return corr_length\n\n\ndef xfm(pos, shift, tilt, direction='down'):\n \"\"\"\n Perform conversion from one system to another\n\n dir : direction to do the transform\n up : orig to mod\n down(default) : mod to orig\n \"\"\"\n pos = np.atleast_2d(pos)\n if direction == 'up':\n return np.dot((pos - shift), tilt.T)\n elif direction == 'down':\n return np.dot(np.linalg.inv(tilt), pos.T).T + shift\n\n\ndef get_corrfunc(x, x_err, y=None, y_err=None, n_frac=2, viz=True,\n model=False, est=False, sfx=\"corr\", scale_factor=None):\n \"\"\"\n Auto correlation of a signal and mean estimation\n\n Parameters\n x : samples of the first variable\n x_err : error vector for the first variable\n n_frac : number of pixels over which to estimate correlation wrt\n the size of the samples\n viz : plot the correlation function\n model : model the correlation function\n est : Get estimates on the best-fit values using the covariance\n matrices estmated\n\n Returns:\n loc_simple, sig_simple : the mean and uncertainty using simple\n weighted estimation\n loc, sig : the mean and uncertainty on the location parameter\n after incorporating correlations\n \"\"\"\n if y is None:\n y = x.copy()\n y_err = x_err.copy()\n\n # Direct weighted average along each dimension\n loc_simple = np.sum(x / x_err ** 2) / np.sum(1 / x_err ** 2)\n sig_simple = 1. / np.sqrt(np.sum(1 / x_err ** 2))\n\n npp = len(x)\n x_data, y_data = x - x.mean(), y - y.mean()\n\n coef = [np.sum(x_data[:npp - j] * y_data[j:]) / \\\n np.sqrt(np.sum(x_data[:npp - j] ** 2) * np.sum(y_data[j:] ** 2)) for j in\n range(npp // n_frac)]\n np.savetxt(sfx + \".dat\", coef)\n\n if model:\n if scale_factor is None:\n scale_factor = fit_exp_model(coef[:5])\n\n if est:\n # Obtain band-diagonal correlation matrix\n from scipy.linalg import toeplitz\n\n rr = np.arange(npp)\n val = np.exp(- rr / scale_factor)\n Xi = toeplitz(val)\n\n cov = np.diag(x_err).dot(Xi.dot(np.diag(y_err)))\n\n if np.any(np.linalg.eigh(cov)[0] < 0):\n raise TypeError(\"The covariance matrix \\\n is not positive definite\")\n\n # Minimization using iminuit\n from iminuit import Minuit\n ico = np.linalg.inv(cov)\n\n def chi2(mu):\n dyi = x - mu\n return dyi.T.dot(ico.dot(dyi))\n\n mm = Minuit(chi2,\n mu=0.2, error_mu=x.std(), fix_mu=False,\n errordef=1., print_level=-1)\n mm.migrad()\n loc, sig = mm.values[\"mu\"], mm.errors[\"mu\"]\n\n if viz:\n fig, ax = plt.subplots(figsize=(9, 3))\n ax.errorbar(rr, x, x_err, fmt='.-', lw=0.6, color='k')\n\n # estimate including correlations\n ax.fill_between(rr, loc + sig,\n loc - sig, color='r', alpha=0.4)\n\n # simple weighted average estimate\n ax.fill_between(rr, loc_simple + sig_simple,\n loc_simple - sig_simple, color='b', alpha=0.4)\n plt.show()\n return loc_simple, sig_simple, loc, sig\n\n\ndef marg_estimates(xx, yy, logL, levels=None, par_labels=[\"x_0\", \"x_1\"],\n ax=None, plot_marg=True, label='temp', **kwargs):\n \"\"\"\n Marginalized statistics that follows from a jont likelihood.\n Simple mean and standard deviation estimates.\n\n Parameters:\n x0 : vector in x-direction of the grid\n x1 : vector in y-direction of the grid\n joint_pdf : posterior log probability on the 2D grid\n\n Returns:\n [loc_x0, sig_x0, loc_x1, sig_x1, sig_x0_x1]\n \"\"\"\n if levels is None:\n levels = [0.683, 0.955]\n\n pdf = np.exp(logL)\n\n # normalize the pdf too --> though not necessary for\n # getting mean and the standard deviation\n x0_pdf = np.sum(pdf, axis=1)\n x0_pdf /= x0_pdf.sum() * (xx[1] - xx[0])\n\n x1_pdf = np.sum(pdf, axis=0)\n x1_pdf /= x1_pdf.sum() * (yy[1] - yy[0])\n\n mu_x0 = (xx * x0_pdf).sum() / x0_pdf.sum()\n mu_x1 = (yy * x1_pdf).sum() / x1_pdf.sum()\n\n sig_x0 = np.sqrt((xx ** 2 * x0_pdf).sum() / x0_pdf.sum() - mu_x0 ** 2)\n sig_x1 = np.sqrt((yy ** 2 * x1_pdf).sum() / x1_pdf.sum() - mu_x1 ** 2)\n\n sig_x0_x1 = ((xx - mu_x0) * (yy[:, None] - mu_x1) * pdf.T).sum() / pdf.sum()\n\n print(\"param1 = %.4f pm %.4f\" % (mu_x0, sig_x0))\n print(\"param2 = %.4f pm %.4f\\n\" % (mu_x1, sig_x1))\n\n if ax is None:\n ax = plt.axes()\n CS = ax.contour(xx, yy, cts(logL.T),\n levels=levels, label=label, **kwargs)\n CS.collections[0].set_label(label)\n\n ax.set_xlim(mu_x0 - 4 * sig_x0, mu_x0 + 4 * sig_x0)\n ax.set_ylim(mu_x1 - 4 * sig_x1, mu_x1 + 4 * sig_x1)\n\n if plot_marg:\n xx_extent = 8 * sig_x0\n yy_extent = 8 * sig_x1\n\n pdf_xx_ext = x0_pdf.max() - x0_pdf.min()\n pdf_yy_ext = x1_pdf.max() - x1_pdf.min()\n\n ax.plot(xx, 0.2 * (x0_pdf - x0_pdf.min()) * yy_extent / pdf_xx_ext\n + ax.get_ylim()[0])\n ax.axvline(mu_x0 - sig_x0)\n ax.axvline(mu_x0 + sig_x0)\n ax.plot(0.2 * (x1_pdf - x1_pdf.min()) * xx_extent / pdf_yy_ext +\n ax.get_xlim()[0], yy)\n ax.axhline(mu_x1 - sig_x1)\n ax.axhline(mu_x1 + sig_x1)\n\n plt.title(r\"$%s = %.3f \\pm %.3f, %s = %.3f \\pm %.3f$\" %\n (par_labels[0], mu_x0, sig_x0, par_labels[1], mu_x1, sig_x1))\n plt.legend()\n plt.tight_layout()\n plt.show()\n\n return mu_x0, sig_x0, mu_x1, sig_x1, sig_x0_x1\n\n\ndef get_intrinsic_covariance(locs, covs):\n \"\"\" Computes the intrinsic covariance matrix in 2D\n\n Parameters:\n locs: the central values as a vector\n covs: the corresponding covariances\n\n Returns:\n loc: the best-fit location\n cov: the covariance of the above best-fit\n sys_cov: the estimated intrinsic covariance\n one_sig, two_sig: points along the one-sigma and two-sigma\n confidence intervals\n \"\"\"\n from numpy.linalg import inv, det\n from iminuit import Minuit\n from scipy.stats import chi2 as chisq\n\n def neg_ln_like(x0, x1, lnsig1, rho, lnsig2):\n sig_x0, sig_x1 = np.exp(lnsig1), np.exp(lnsig2)\n sig_x0_x1 = rho * sig_x0 * sig_x1\n\n cov_int = np.array([[sig_x0 ** 2, sig_x0_x1],\n [sig_x0_x1, sig_x1 ** 2]])\n\n mod_cov = covs + cov_int\n temp = [np.dot([x0, x1] - locs[i], np.dot(inv(mod_cov[i]), [x0, x1] - locs[i])) +\\\n np.log(det(mod_cov[i])) for i in range(len(locs))]\n foo = np.sum(temp, 0)\n return foo\n\n # optimize using Minuit\n mm = Minuit(neg_ln_like,\n x0=0, error_x0=0.1,\n x1=0, error_x1=0.01,\n lnsig1=-2, error_lnsig1=0.1, limit_lnsig1=(-6, 2),\n lnsig2=-2, error_lnsig2=0.2, limit_lnsig2=(-6, 2),\n rho=0, error_rho=0.1, limit_rho=(-1, 1),\n errordef=1, print_level=-1)\n\n __ = mm.migrad()\n __ = mm.minos()\n\n # Relevant data for plotting contours from minuit\n mm.set_errordef(chisq.ppf(0.683, 2))\n _, _, one_sig = mm.mncontour('x0', 'x1', numpoints=200)\n mm.set_errordef(chisq.ppf(0.955, 2))\n _, _, two_sig = mm.mncontour('x0', 'x1', numpoints=200)\n\n mm.set_errordef(1.)\n loc = np.array([mm.values['x0'], mm.values['x1']])\n corr = mm.np_matrix(correlation=True)[:2, :2]\n\n # Error bars - from MINOS\n mm_errors_x0 = (mm.merrors[('x0', 1.0)] - mm.merrors[('x0', -1.0)]) / 2.\n mm_errors_x1 = (mm.merrors[('x1', 1.0)] - mm.merrors[('x1', -1.0)]) / 2.\n\n error_mat = np.diag([mm_errors_x0, mm_errors_x1])\n cov = error_mat.T.dot(corr.dot(error_mat))\n\n # intrinsic covariance matrix\n mm_v = mm.np_values()\n\n sys_x0_x1 = mm_v[3] * np.exp(mm_v[2]) * np.exp(mm_v[4])\n sys_cov = np.array([[np.exp(mm_v[2]) ** 2, sys_x0_x1],\n [sys_x0_x1, np.exp(mm_v[4]) ** 2]])\n\n return loc, cov, sys_cov, one_sig, two_sig\n\n\n# =============================================================================\ndef combine_likelihoods(folder_name, indices, xx, yy, ax=None,\n individual=False, **kwargs):\n \"\"\" Computes the combined likelihood surface for a given set of\n restframe wavelength indices\n\n Parameters:\n folder_name: folder containing the individual likelihoods\n indices: which restframe wavelengths to use\n xx: the x vector of the grid\n yy: the y vector of the grid\n ax: axes object to draw the figure on\n individual: whether to draw contours for each wavelength\n\n Returns:\n ax: handle on the axes object for future manipulation\n joint_estimates: Gaussian estimates of the likelihood surface\n \"\"\"\n if ax is None:\n ax = plt.axes()\n\n joint_lnlike = np.zeros((len(xx), len(yy)))\n\n for index in indices:\n ll = np.loadtxt(folder_name + 'lnlike_%s.dat' % str(index))\n if individual:\n marg_estimates(xx, yy, ll.T, ax=ax,\n plot_marg=False, levels=[0.683], label=str(index),\n colors='k')\n joint_lnlike += ll\n joint_lnlike -= joint_lnlike.max()\n\n np.savetxt(folder_name + 'joint_pdf.dat', joint_lnlike)\n\n # Remember: Here we are modeling the combined likelihood in x0-x1 as\n # a 2D Gaussian - these are the actual values used for the statistical\n # estiamtes before applying the stretch corrections due to LSS\n joint_estimates = marg_estimates(xx, yy, joint_lnlike.T, ax=ax,\n label='joint', **kwargs)\n\n return ax, joint_estimates\n\n\ndef get_stretch_factor(folder_name, indices, **kwargs):\n \"\"\" Computes the stretch factor using the (16-50-84) percentile estimates\n of x0 - x1 for each restframe wavelength assuming orthogonality\n\n Parameters:\n folder_name: folder containing the individual likelihoods and their\n percentile estimates\n indices: which restframe wavelengths to use\n\n Returns:\n stretch_x0, stretch_x1: the stretch factors along x0 and x1\n \"\"\"\n x0_cen = np.zeros(len(indices))\n x0_err = np.zeros(len(indices))\n x1_cen = np.zeros(len(indices))\n x1_err = np.zeros(len(indices))\n\n for i, index in enumerate(indices):\n _, est_x0, est_x1 = np.loadtxt(folder_name + \\\n 'xx_percentile_est_%d.dat' % index)\n\n x0_cen[i] = est_x0[0]\n x0_err[i] = (est_x0[1] + est_x0[2]) / 2.\n\n x1_cen[i] = est_x1[0]\n x1_err[i] = (est_x1[1] + est_x1[2]) / 2.\n\n res0 = get_corrfunc(x0_cen, x0_err, model=True, est=True,\n sfx=folder_name + \"x0_corr\")\n res1 = get_corrfunc(x1_cen, x1_err, model=True, est=True,\n sfx=folder_name + \"x1_corr\")\n stretch_x0 = res0[3] / res0[1]\n stretch_x1 = res1[3] / res1[1]\n\n return stretch_x0, stretch_x1\n\n\ndef plot_percentile_estimates(folder_name, indices, basis='mod'):\n \"\"\" Plots the (16-50-84) percentile estimates\n of x0 - x1 for each restframe wavelength assuming orthogonality\n\n Parameters:\n folder_name: folder containing the individual likelihoods and their\n percentile estimates\n indices: which restframe wavelengths to use\n\n Returns:\n axs: handle to the axes object\n \"\"\"\n if basis == 'mod':\n n_params = 3\n prefix = folder_name + 'xx_percentile_est_'\n else:\n n_params = 2\n prefix = folder_name + 'tg_percentile_est_'\n\n e_cube = np.empty((len(indices), n_params, 3))\n for i, index in enumerate(indices):\n e_cube[i] = np.loadtxt(prefix + '%d.dat' % index)\n\n fig, axs = plt.subplots(nrows=n_params, sharex=True)\n\n for i in range(n_params):\n axs[i].errorbar(indices, e_cube[:, i, 0], yerr=[e_cube[:, i, 2], e_cube[:, i, 1]],\n fmt='.-', color='k', lw=0.6)\n plt.tight_layout()\n plt.show()\n\n return axs\n\n\ndef get_statistical_estimate(folder_name, indices, xx, yy,\n with_corr=False, **kwargs):\n \"\"\"\n Obtain statistical estimates on the optical depth parameters for a\n given folder with and without correlations\n \"\"\"\n _, estimates_no_stretch = combine_likelihoods(folder_name,\n indices, xx, yy, **kwargs)\n mu_x0, sig_x0, mu_x1, sig_x1, sig_x0_x1 = estimates_no_stretch\n\n loc_vec = np.array([mu_x0, mu_x1])\n cov_mat = np.array([[sig_x0 ** 2, sig_x0_x1],\n [sig_x0_x1, sig_x1 ** 2]])\n\n if with_corr:\n # stretch factor\n st_x0, st_x1 = get_stretch_factor(folder_name, indices, **kwargs)\n\n # expand the confidence intervals\n st_mat = np.diag([st_x0, st_x1])\n cov_new = st_mat.T.dot(cov_mat.dot(st_mat))\n\n return loc_vec, cov_mat, cov_new\n\n return loc_vec, cov_mat\n\n# =============================================================================\n# THE ROUTINES BELOW SHOULD ONLY BE RUN FROM THE CORRECT FOLDER\n\n\ndef save_statistical_estimates(template, indices, xx, yy, n_bins=7):\n # These are obtained from the full 2D likelihood surfaces\n loc_mod = np.zeros((n_bins, 2))\n cov_mod_no_stretch = np.zeros((n_bins, 2, 2))\n cov_mod_with_stretch = np.zeros((n_bins, 2, 2))\n\n print(\"======= COMPUTING STATISTICAL ESTIMATES =======\")\n for i in range(n_bins):\n # statistical likelihoods without LSS correlations\n folder_name = template.format(i+1)\n\n results = get_statistical_estimate(folder_name, indices, xx, yy)\n loc_vec, cov_mat, cov_new = results\n\n # Assign to variables to store later\n loc_mod[i] = loc_vec\n cov_mod_no_stretch[i] = cov_mat\n cov_mod_with_stretch[i] = cov_new\n\n # Write all relevant information in the modified basis to files\n print(\"======= WRITING TO FILES =======\")\n np.savetxt(\"central_values_bins.dat\", loc_mod)\n np.savetxt(\"cov_mat_bins_no_stretch.dat\",\n cov_mod_no_stretch.reshape(n_bins, 4))\n np.savetxt(\"cov_mat_bins_with_stretch.dat\",\n cov_mod_with_stretch.reshape(n_bins, 4))\n\n\ndef save_systematic_estimates(template, bins_to_use=None):\n if bins_to_use is None:\n bins_to_use = np.arange(7)\n\n n_bins = len(bins_to_use)\n\n # load in the statistical estimates\n locations = template + \"central_values_bins.dat\"\n covariances = template + \"cov_mat_bins_with_stretch.dat\"\n\n # get the systematic matrix modeled as intrinsic covariance\n res = get_intrinsic_covariance(locations, covariances)\n loc, cov, sys_cov, one_sig, two_sig = res\n\n print(\"======= WRITING TO FILES =======\")\n np.savetxt(\"best_fit_loc_{}.dat\".format(n_bins), loc)\n np.savetxt(\"best_fit_cov_mat_{}.dat\".format(n_bins), cov)\n np.savetxt(\"best_fit_sys_mat_{}.dat\".format(n_bins), sys_cov)\n np.savetxt(\"contour_one_sigma_{}.dat\".format(n_bins), one_sig)\n np.savetxt(\"contour_two_sigma_{}.dat\".format(n_bins), two_sig)\n" ]
[ [ "numpy.diag", "matplotlib.pyplot.legend", "numpy.dot", "numpy.sqrt", "numpy.arctan2", "matplotlib.pyplot.axes", "matplotlib.pyplot.plot", "numpy.exp", "scipy.optimize.curve_fit", "matplotlib.pyplot.tight_layout", "numpy.arange", "numpy.linalg.det", "numpy.zeros", "scipy.stats.chi2.ppf", "matplotlib.pyplot.title", "numpy.linalg.inv", "numpy.atleast_2d", "numpy.linalg.eigh", "numpy.savetxt", "numpy.array", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.patches.Ellipse", "scipy.linalg.toeplitz", "matplotlib.pyplot.subplots", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.12", "0.10" ], "tensorflow": [] } ]