repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
filipecn/maldives
[ "f20f17d817fc3dcad7f9674753744716d1d4c821" ]
[ "maldives/bot/exchanges/binance_exchange.py" ]
[ "from datetime import datetime, timezone\nimport logging\nimport os\nimport pandas as pd\nfrom ..models.order import Order\nfrom ..models.price import Price\nfrom ..models.dealer import Dealer\nfrom pandas import DataFrame\n\nfrom binance.client import Client\nfrom binance.enums import *\nfrom binance.websockets import BinanceSocketManager\n\n\ndef unix_time_millis(dt):\n epoch = datetime.utcfromtimestamp(0)\n return (dt - epoch).total_seconds() * 1000\n\n\nclass BinanceExchange(Dealer):\n currency: str\n\n def __init__(self, key: str, secret: str):\n self.apiKey = key\n self.apiSecret = secret\n self.socketManager = None\n self.socket = None\n self.currency = ''\n logging.info(\"connecting to binance api ...\")\n self.client = Client(self.apiKey, self.apiSecret)\n logging.info(\"... done\")\n self.name = self.__class__.__name__\n\n def set_currency(self, symbol: str):\n self.currency = symbol\n\n def compute_symbol_pair(self, asset):\n if type(asset) is str:\n return asset + self.currency\n return [a + self.currency for a in asset]\n\n def get_symbol(self, asset):\n return self.compute_symbol_pair(asset)\n\n def _single_symbol_ticker(self, symbol: str):\n response = self.client.get_symbol_ticker(symbol=self.compute_symbol_pair(symbol))\n return Price(currency=self.currency, symbol=symbol, current=response['price'], date=datetime.now())\n\n def symbol_ticker(self, symbols):\n if type(symbols) is str:\n return self._single_symbol_ticker(symbols)\n return [self._single_symbol_ticker(symbol) for symbol in symbols]\n\n def _single_asset_balance(self, asset):\n response = self.client.get_asset_balance(asset=asset)\n return response['free']\n\n def get_asset_balance(self, assets):\n if type(assets) is str:\n return self._single_asset_balance(assets)\n return [self._single_asset_balance(asset) for asset in assets]\n\n def _single_symbol_ticker_candle(self, symbol, interval_enum):\n return self.client.get_klines(symbol=symbol, interval=interval_enum)\n\n def symbol_ticker_candle(self, symbols, interval: str):\n pairs = self.compute_symbol_pair(symbols)\n interval_enum = interval # check if interval is valid\n if type(symbols) is str:\n return self._single_symbol_ticker_candle(pairs, interval_enum)\n return [self._single_symbol_ticker_candle(pair, interval_enum) for pair in pairs]\n\n def _single_historical_symbol_ticker_candle(self, symbol, start: datetime, end=None, interval: str = '1d'):\n if end:\n end = int(unix_time_millis(end))\n pair = self.compute_symbol_pair(symbol)\n output = []\n for candle in self.client.get_historical_klines_generator(pair, interval,\n int(unix_time_millis(start)), end):\n \"\"\"\n [\n [\n 1499040000000, 0# Open time\n \"0.01634790\", 1# Open\n \"0.80000000\", 2# High\n \"0.01575800\", 3# Low\n \"0.01577100\", 4# Close\n \"148976.11427815\", 5# Volume\n 1499644799999, # Close time\n \"2434.19055334\", # Quote asset volume\n 308, # Number of trades\n \"1756.87402397\", # Taker buy base asset volume\n \"28.46694368\", # Taker buy quote asset volume\n \"17928899.62484339\" # Can be ignored\n ]\n ]\n \"\"\"\n date = datetime.fromtimestamp(int(candle[0]) / 1000)\n open_price = float(candle[1])\n high = float(candle[2])\n low = float(candle[3])\n close = float(candle[4])\n volume = float(candle[5])\n self.historical_data.loc[len(self.historical_data) + 1] = [pair, date, close, open_price, low, high, volume,\n interval]\n output.append(\n Price(pair=pair, currency=self.currency, symbol=symbol,\n current=open_price, low=low, high=high, volume=volume, close=close, open=open_price,\n date=date, interval=interval)\n )\n return output\n\n def historical_symbol_ticker_candle(self, symbols, start: datetime, end=None, interval: str = ''):\n self.historical_data = DataFrame(\n columns=['symbol', 'date', 'close', 'open', 'low', 'high', 'volume', 'interval'])\n self.historical_data['date'] = pd.to_datetime(self.historical_data['date'])\n self.historical_data['close'] = pd.to_numeric(self.historical_data['close'])\n self.historical_data['open'] = pd.to_numeric(self.historical_data['open'])\n self.historical_data['low'] = pd.to_numeric(self.historical_data['low'])\n self.historical_data['high'] = pd.to_numeric(self.historical_data['high'])\n self.historical_data['volume'] = pd.to_numeric(self.historical_data['volume'])\n if type(symbols) is str:\n return self._single_historical_symbol_ticker_candle(symbols, start, end, interval)\n candles = {}\n for symbol in symbols:\n candles[symbol] = self._single_historical_symbol_ticker_candle(symbol, start, end, interval)\n return candles\n\n def order(self, order: Order):\n pass\n" ]
[ [ "pandas.to_datetime", "pandas.to_numeric", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
philiptzou/DeepSpeech
[ "eb2de2a5259ab000912eb6ad658651cf743212a8" ]
[ "data_utils/data.py" ]
[ "\"\"\"Contains data generator for orgnaizing various audio data preprocessing\npipeline and offering data reader interface of PaddlePaddle requirements.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\nimport tarfile\nimport multiprocessing\nimport numpy as np\nimport paddle.v2 as paddle\nfrom threading import local\nfrom data_utils.utility import read_manifest\nfrom data_utils.utility import xmap_readers_mp\nfrom data_utils.augmentor.augmentation import AugmentationPipeline\nfrom data_utils.featurizer.speech_featurizer import SpeechFeaturizer\nfrom data_utils.speech import SpeechSegment\nfrom data_utils.normalizer import FeatureNormalizer\n\n\nclass DataGenerator(object):\n \"\"\"\n DataGenerator provides basic audio data preprocessing pipeline, and offers\n data reader interfaces of PaddlePaddle requirements.\n\n :param vocab_filepath: Vocabulary filepath for indexing tokenized\n transcripts.\n :type vocab_filepath: basestring\n :param mean_std_filepath: File containing the pre-computed mean and stddev.\n :type mean_std_filepath: None|basestring\n :param augmentation_config: Augmentation configuration in json string.\n Details see AugmentationPipeline.__doc__.\n :type augmentation_config: str\n :param max_duration: Audio with duration (in seconds) greater than\n this will be discarded.\n :type max_duration: float\n :param min_duration: Audio with duration (in seconds) smaller than\n this will be discarded.\n :type min_duration: float\n :param stride_ms: Striding size (in milliseconds) for generating frames.\n :type stride_ms: float\n :param window_ms: Window size (in milliseconds) for generating frames.\n :type window_ms: float\n :param max_freq: Used when specgram_type is 'linear', only FFT bins\n corresponding to frequencies between [0, max_freq] are\n returned.\n :types max_freq: None|float\n :param specgram_type: Specgram feature type. Options: 'linear'.\n :type specgram_type: str\n :param use_dB_normalization: Whether to normalize the audio to -20 dB\n before extracting the features.\n :type use_dB_normalization: bool\n :param num_threads: Number of CPU threads for processing data.\n :type num_threads: int\n :param random_seed: Random seed.\n :type random_seed: int\n :param keep_transcription_text: If set to True, transcription text will\n be passed forward directly without\n converting to index sequence.\n :type keep_transcription_text: bool\n \"\"\"\n\n def __init__(self,\n vocab_filepath,\n mean_std_filepath,\n augmentation_config='{}',\n max_duration=float('inf'),\n min_duration=0.0,\n stride_ms=10.0,\n window_ms=20.0,\n max_freq=None,\n specgram_type='linear',\n use_dB_normalization=True,\n num_threads=multiprocessing.cpu_count() // 2,\n random_seed=0,\n keep_transcription_text=False):\n self._max_duration = max_duration\n self._min_duration = min_duration\n self._normalizer = FeatureNormalizer(mean_std_filepath)\n self._augmentation_pipeline = AugmentationPipeline(\n augmentation_config=augmentation_config, random_seed=random_seed)\n self._speech_featurizer = SpeechFeaturizer(\n vocab_filepath=vocab_filepath,\n specgram_type=specgram_type,\n stride_ms=stride_ms,\n window_ms=window_ms,\n max_freq=max_freq,\n use_dB_normalization=use_dB_normalization)\n self._num_threads = num_threads\n self._rng = random.Random(random_seed)\n self._keep_transcription_text = keep_transcription_text\n self._epoch = 0\n # for caching tar files info\n self._local_data = local()\n self._local_data.tar2info = {}\n self._local_data.tar2object = {}\n\n def process_utterance(self, audio_file, transcript):\n \"\"\"Load, augment, featurize and normalize for speech data.\n\n :param audio_file: Filepath or file object of audio file.\n :type audio_file: basestring | file\n :param transcript: Transcription text.\n :type transcript: basestring\n :return: Tuple of audio feature tensor and data of transcription part,\n where transcription part could be token ids or text.\n :rtype: tuple of (2darray, list)\n \"\"\"\n if isinstance(audio_file, basestring) and audio_file.startswith('tar:'):\n speech_segment = SpeechSegment.from_file(\n self._subfile_from_tar(audio_file), transcript)\n else:\n speech_segment = SpeechSegment.from_file(audio_file, transcript)\n self._augmentation_pipeline.transform_audio(speech_segment)\n specgram, transcript_part = self._speech_featurizer.featurize(\n speech_segment, self._keep_transcription_text)\n specgram = self._normalizer.apply(specgram)\n return specgram, transcript_part\n\n def batch_reader_creator(self,\n manifest_path,\n batch_size,\n min_batch_size=1,\n padding_to=-1,\n flatten=False,\n sortagrad=False,\n shuffle_method=\"batch_shuffle\"):\n \"\"\"\n Batch data reader creator for audio data. Return a callable generator\n function to produce batches of data.\n\n Audio features within one batch will be padded with zeros to have the\n same shape, or a user-defined shape.\n\n :param manifest_path: Filepath of manifest for audio files.\n :type manifest_path: basestring\n :param batch_size: Number of instances in a batch.\n :type batch_size: int\n :param min_batch_size: Any batch with batch size smaller than this will\n be discarded. (To be deprecated in the future.)\n :type min_batch_size: int\n :param padding_to: If set -1, the maximun shape in the batch\n will be used as the target shape for padding.\n Otherwise, `padding_to` will be the target shape.\n :type padding_to: int\n :param flatten: If set True, audio features will be flatten to 1darray.\n :type flatten: bool\n :param sortagrad: If set True, sort the instances by audio duration\n in the first epoch for speed up training.\n :type sortagrad: bool\n :param shuffle_method: Shuffle method. Options:\n '' or None: no shuffle.\n 'instance_shuffle': instance-wise shuffle.\n 'batch_shuffle': similarly-sized instances are\n put into batches, and then\n batch-wise shuffle the batches.\n For more details, please see\n ``_batch_shuffle.__doc__``.\n 'batch_shuffle_clipped': 'batch_shuffle' with\n head shift and tail\n clipping. For more\n details, please see\n ``_batch_shuffle``.\n If sortagrad is True, shuffle is disabled\n for the first epoch.\n :type shuffle_method: None|str\n :return: Batch reader function, producing batches of data when called.\n :rtype: callable\n \"\"\"\n\n def batch_reader():\n # read manifest\n manifest = read_manifest(\n manifest_path=manifest_path,\n max_duration=self._max_duration,\n min_duration=self._min_duration)\n # sort (by duration) or batch-wise shuffle the manifest\n if self._epoch == 0 and sortagrad:\n manifest.sort(key=lambda x: x[\"duration\"])\n else:\n if shuffle_method == \"batch_shuffle\":\n manifest = self._batch_shuffle(\n manifest, batch_size, clipped=False)\n elif shuffle_method == \"batch_shuffle_clipped\":\n manifest = self._batch_shuffle(\n manifest, batch_size, clipped=True)\n elif shuffle_method == \"instance_shuffle\":\n self._rng.shuffle(manifest)\n elif shuffle_method == None:\n pass\n else:\n raise ValueError(\"Unknown shuffle method %s.\" %\n shuffle_method)\n # prepare batches\n instance_reader, cleanup = self._instance_reader_creator(manifest)\n batch = []\n try:\n for instance in instance_reader():\n batch.append(instance)\n if len(batch) == batch_size:\n yield self._padding_batch(batch, padding_to, flatten)\n batch = []\n if len(batch) >= min_batch_size:\n yield self._padding_batch(batch, padding_to, flatten)\n finally:\n cleanup()\n self._epoch += 1\n\n return batch_reader\n\n @property\n def feeding(self):\n \"\"\"Returns data reader's feeding dict.\n\n :return: Data feeding dict.\n :rtype: dict\n \"\"\"\n feeding_dict = {\"audio_spectrogram\": 0, \"transcript_text\": 1}\n return feeding_dict\n\n @property\n def vocab_size(self):\n \"\"\"Return the vocabulary size.\n\n :return: Vocabulary size.\n :rtype: int\n \"\"\"\n return self._speech_featurizer.vocab_size\n\n @property\n def vocab_list(self):\n \"\"\"Return the vocabulary in list.\n\n :return: Vocabulary in list.\n :rtype: list\n \"\"\"\n return self._speech_featurizer.vocab_list\n\n def _parse_tar(self, file):\n \"\"\"Parse a tar file to get a tarfile object\n and a map containing tarinfoes\n \"\"\"\n result = {}\n f = tarfile.open(file)\n for tarinfo in f.getmembers():\n result[tarinfo.name] = tarinfo\n return f, result\n\n def _subfile_from_tar(self, file):\n \"\"\"Get subfile object from tar.\n\n It will return a subfile object from tar file\n and cached tar file info for next reading request.\n \"\"\"\n tarpath, filename = file.split(':', 1)[1].split('#', 1)\n if 'tar2info' not in self._local_data.__dict__:\n self._local_data.tar2info = {}\n if 'tar2object' not in self._local_data.__dict__:\n self._local_data.tar2object = {}\n if tarpath not in self._local_data.tar2info:\n object, infoes = self._parse_tar(tarpath)\n self._local_data.tar2info[tarpath] = infoes\n self._local_data.tar2object[tarpath] = object\n return self._local_data.tar2object[tarpath].extractfile(\n self._local_data.tar2info[tarpath][filename])\n\n def _instance_reader_creator(self, manifest):\n \"\"\"\n Instance reader creator. Create a callable function to produce\n instances of data.\n\n Instance: a tuple of ndarray of audio spectrogram and a list of\n token indices for transcript.\n \"\"\"\n\n def reader():\n for instance in manifest:\n yield instance\n\n reader, cleanup_callback = xmap_readers_mp(\n lambda instance: self.process_utterance(instance[\"audio_filepath\"], instance[\"text\"]),\n reader, self._num_threads, 4096)\n\n return reader, cleanup_callback\n\n def _padding_batch(self, batch, padding_to=-1, flatten=False):\n \"\"\"\n Padding audio features with zeros to make them have the same shape (or\n a user-defined shape) within one bach.\n\n If ``padding_to`` is -1, the maximun shape in the batch will be used\n as the target shape for padding. Otherwise, `padding_to` will be the\n target shape (only refers to the second axis).\n\n If `flatten` is True, features will be flatten to 1darray.\n \"\"\"\n new_batch = []\n # get target shape\n max_length = max([audio.shape[1] for audio, text in batch])\n if padding_to != -1:\n if padding_to < max_length:\n raise ValueError(\"If padding_to is not -1, it should be larger \"\n \"than any instance's shape in the batch\")\n max_length = padding_to\n # padding\n for audio, text in batch:\n padded_audio = np.zeros([audio.shape[0], max_length])\n padded_audio[:, :audio.shape[1]] = audio\n if flatten:\n padded_audio = padded_audio.flatten()\n padded_instance = [padded_audio, text, audio.shape[1]]\n new_batch.append(padded_instance)\n return new_batch\n\n def _batch_shuffle(self, manifest, batch_size, clipped=False):\n \"\"\"Put similarly-sized instances into minibatches for better efficiency\n and make a batch-wise shuffle.\n\n 1. Sort the audio clips by duration.\n 2. Generate a random number `k`, k in [0, batch_size).\n 3. Randomly shift `k` instances in order to create different batches\n for different epochs. Create minibatches.\n 4. Shuffle the minibatches.\n\n :param manifest: Manifest contents. List of dict.\n :type manifest: list\n :param batch_size: Batch size. This size is also used for generate\n a random number for batch shuffle.\n :type batch_size: int\n :param clipped: Whether to clip the heading (small shift) and trailing\n (incomplete batch) instances.\n :type clipped: bool\n :return: Batch shuffled mainifest.\n :rtype: list\n \"\"\"\n manifest.sort(key=lambda x: x[\"duration\"])\n shift_len = self._rng.randint(0, batch_size - 1)\n batch_manifest = zip(*[iter(manifest[shift_len:])] * batch_size)\n self._rng.shuffle(batch_manifest)\n batch_manifest = [item for batch in batch_manifest for item in batch]\n if not clipped:\n res_len = len(manifest) - shift_len - len(batch_manifest)\n batch_manifest.extend(manifest[-res_len:])\n batch_manifest.extend(manifest[0:shift_len])\n return batch_manifest\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tarasowski/customer-satisfaction-machine-learning
[ "850d8d2b3ae7eb9e27e82114c0dcfc79347a4a37" ]
[ "src/train_predict/predict.py" ]
[ "import argparse\nimport json\nimport os\nimport pickle\nimport sys\nimport sagemaker_containers\nimport pandas as pd\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data\n\nfrom model import LSTMClassifier\n\nfrom utils import review_to_words, convert_and_pad\n\ndef model_fn(model_dir):\n \"\"\"Load the PyTorch model from the `model_dir` directory.\"\"\"\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n\n # Load the store model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # Load the saved word_dict.\n word_dict_path = os.path.join(model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'rb') as f:\n model.word_dict = pickle.load(f)\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model\n\ndef input_fn(serialized_input_data, content_type):\n print('Deserializing the input data.')\n if content_type == 'text/plain':\n data = serialized_input_data.decode('utf-8')\n return data\n raise Exception('Requested unsupported ContentType in content_type: ' + content_type)\n\ndef output_fn(prediction_output, accept):\n print('Serializing the generated output.')\n return str(prediction_output)\n\ndef predict_fn(input_data, model):\n print('Inferring sentiment of input data.')\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n if model.word_dict is None:\n raise Exception('Model has not been loaded properly, no word_dict.')\n \n # TODO: Process input_data so that it is ready to be sent to our model.\n # You should produce two variables:\n # data_X - A sequence of length 500 which represents the converted review\n # data_len - The length of the review\n sentence = review_to_words(input_data)\n data_X, data_len = convert_and_pad(model.word_dict, sentence)\n \n\n # Using data_X and data_len we construct an appropriate input tensor. Remember\n # that our model expects input data of the form 'len, review[500]'.\n data_pack = np.hstack((data_len, data_X))\n data_pack = data_pack.reshape(1, -1)\n \n data = torch.from_numpy(data_pack)\n data = data.to(device)\n\n # Make sure to put the model into evaluation mode\n model.eval()\n \n # TODO: Compute the result of applying the model to the input data. The variable `result` should\n # be a numpy array which contains a single integer which is either 1 or 0\n\n result = np.round(model.forward(data).detach().numpy()).astype(int)\n\n return result\n" ]
[ [ "numpy.hstack", "torch.from_numpy", "torch.cuda.is_available", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
acse-jat20/ci_acse1
[ "9d90efc84f0fb8c4a2030c3b4da7b4c6582d7f8d" ]
[ "simple_functions/constants.py" ]
[ "\"\"\"Docsting to fulfil linting.\"\"\"\n\nfrom functools import lru_cache\nfrom numpy import sqrt\nfrom simple_functions.functions1 import factorial\n\n\n__all__ = ['pi']\n\n\ndef pi(terms=1):\n \"\"\" Calculating pi \"\"\"\n return 1./(2.*sqrt(2.)/9801.*rsum(terms))\n\n\n@lru_cache(maxsize=None) # Note: -> @cache in python >= 3.9\ndef rsum(n):\n \"\"\" Caclulating pi using ramadans sum\"\"\"\n t = factorial(4*n)*(1103+26390*n)/(factorial(n)**4*396**(4*n))\n return t + rsum(n-1) if n else t\n" ]
[ [ "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
A-suozhang/SpatioTemporalSegmentation-ScanNet
[ "479de1793afe6ec20bed6c0f68498b0c49e7315c", "479de1793afe6ec20bed6c0f68498b0c49e7315c" ]
[ "lib/transforms.py", "models_dev/pct_voxel_utils.py" ]
[ "# Copyright (c) Chris Choy ([email protected]). All Rights Reserved.\n#\n# Please cite \"4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural\n# Networks\", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part of\n# the code.\nimport random\n\nimport logging\nimport numpy as np\nimport torch\nimport scipy\nimport scipy.ndimage\nimport scipy.interpolate\nimport torch\nimport MinkowskiEngine as ME\n\n\nclass RandomDropout(object):\n\n def __init__(self, dropout_ratio=0.2, dropout_application_ratio=0.5):\n \"\"\"\n upright_axis: axis index among x,y,z, i.e. 2 for z\n \"\"\"\n self.dropout_ratio = dropout_ratio\n self.dropout_application_ratio = dropout_application_ratio\n\n def __call__(self, coords, feats, labels):\n if random.random() < self.dropout_ratio:\n N = len(coords)\n inds = np.random.choice(N, int(N * (1 - self.dropout_ratio)), replace=False)\n return coords[inds], feats[inds], labels[inds]\n return coords, feats, labels\n\n\nclass RandomHorizontalFlip(object):\n\n def __init__(self, upright_axis, is_temporal):\n \"\"\"\n upright_axis: axis index among x,y,z, i.e. 2 for z\n \"\"\"\n self.is_temporal = is_temporal\n self.D = 4 if is_temporal else 3\n self.upright_axis = {'x': 0, 'y': 1, 'z': 2}[upright_axis.lower()]\n # Use the rest of axes for flipping.\n self.horz_axes = set(range(self.D)) - set([self.upright_axis])\n\n def __call__(self, coords, feats, labels):\n coords = coords.numpy()\n if random.random() < 0.95:\n for curr_ax in self.horz_axes:\n if random.random() < 0.5:\n coord_max = np.max(coords[:, curr_ax])\n coords[:, curr_ax] = coord_max - coords[:, curr_ax]\n return coords, feats, labels\n\n\nclass ChromaticTranslation(object):\n \"\"\"Add random color to the image, input must be an array in [0,255] or a PIL image\"\"\"\n\n def __init__(self, trans_range_ratio=1e-1):\n \"\"\"\n trans_range_ratio: ratio of translation i.e. 255 * 2 * ratio * rand(-0.5, 0.5)\n \"\"\"\n self.trans_range_ratio = trans_range_ratio\n\n def __call__(self, coords, feats, labels):\n if random.random() < 0.95:\n tr = (np.random.rand(1, 3) - 0.5) * 255 * 2 * self.trans_range_ratio\n feats[:, :3] = np.clip(tr + feats[:, :3], 0, 255)\n return coords, feats, labels\n\n\nclass ChromaticAutoContrast(object):\n\n def __init__(self, randomize_blend_factor=True, blend_factor=0.5):\n self.randomize_blend_factor = randomize_blend_factor\n self.blend_factor = blend_factor\n\n def __call__(self, coords, feats, labels):\n if random.random() < 0.2:\n # mean = np.mean(feats, 0, keepdims=True)\n # std = np.std(feats, 0, keepdims=True)\n # lo = mean - std\n # hi = mean + std\n lo = np.min(feats, 0, keepdims=True)\n hi = np.max(feats, 0, keepdims=True)\n\n scale = 255 / (hi - lo)\n\n contrast_feats = (feats - lo) * scale\n\n blend_factor = random.random() if self.randomize_blend_factor else self.blend_factor\n feats = (1 - blend_factor) * feats + blend_factor * contrast_feats\n return coords, feats, labels\n\n\nclass ChromaticJitter(object):\n\n def __init__(self, std=0.01):\n self.std = std\n\n def __call__(self, coords, feats, labels):\n if random.random() < 0.95:\n noise = np.random.randn(feats.shape[0], 3)\n noise *= self.std * 255\n feats[:, :3] = np.clip(noise + feats[:, :3], 0, 255)\n return coords, feats, labels\n\n\nclass HueSaturationTranslation(object):\n\n @staticmethod\n def rgb_to_hsv(rgb):\n # Translated from source of colorsys.rgb_to_hsv\n # r,g,b should be a numpy arrays with values between 0 and 255\n # rgb_to_hsv returns an array of floats between 0.0 and 1.0.\n rgb = rgb.astype('float')\n hsv = np.zeros_like(rgb)\n # in case an RGBA array was passed, just copy the A channel\n hsv[..., 3:] = rgb[..., 3:]\n r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]\n maxc = np.max(rgb[..., :3], axis=-1)\n minc = np.min(rgb[..., :3], axis=-1)\n hsv[..., 2] = maxc\n mask = maxc != minc\n hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask]\n rc = np.zeros_like(r)\n gc = np.zeros_like(g)\n bc = np.zeros_like(b)\n rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask]\n gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask]\n bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask]\n hsv[..., 0] = np.select([r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc)\n hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0\n return hsv\n\n @staticmethod\n def hsv_to_rgb(hsv):\n # Translated from source of colorsys.hsv_to_rgb\n # h,s should be a numpy arrays with values between 0.0 and 1.0\n # v should be a numpy array with values between 0.0 and 255.0\n # hsv_to_rgb returns an array of uints between 0 and 255.\n rgb = np.empty_like(hsv)\n rgb[..., 3:] = hsv[..., 3:]\n h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2]\n i = (h * 6.0).astype('uint8')\n f = (h * 6.0) - i\n p = v * (1.0 - s)\n q = v * (1.0 - s * f)\n t = v * (1.0 - s * (1.0 - f))\n i = i % 6\n conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5]\n rgb[..., 0] = np.select(conditions, [v, q, p, p, t, v], default=v)\n rgb[..., 1] = np.select(conditions, [v, v, v, q, p, p], default=t)\n rgb[..., 2] = np.select(conditions, [v, p, t, v, v, q], default=p)\n return rgb.astype('uint8')\n\n def __init__(self, hue_max, saturation_max):\n self.hue_max = hue_max\n self.saturation_max = saturation_max\n\n def __call__(self, coords, feats, labels):\n # Assume feat[:, :3] is rgb\n hsv = HueSaturationTranslation.rgb_to_hsv(feats[:, :3])\n hue_val = (random.random() - 0.5) * 2 * self.hue_max\n sat_ratio = 1 + (random.random() - 0.5) * 2 * self.saturation_max\n hsv[..., 0] = np.remainder(hue_val + hsv[..., 0] + 1, 1)\n hsv[..., 1] = np.clip(sat_ratio * hsv[..., 1], 0, 1)\n feats[:, :3] = np.clip(HueSaturationTranslation.hsv_to_rgb(hsv), 0, 255)\n\n # pcd = o3d.PointCloud()\n # pcd.points = o3d.Vector3dVector(coords)\n # pcd.colors = o3d.Vector3dVector(feats / 255)\n # o3d.draw_geometries([pcd])\n\n return coords, feats, labels\n\n\nclass HeightTranslation(object):\n\n def __init__(self, std=0.01):\n self.std = std\n\n def __call__(self, coords, feats, labels):\n if feats.shape[1] > 3 and random.random() < 0.95:\n feats[:, -1] += np.random.randn(1) * self.std\n return coords, feats, labels\n\n\nclass HeightJitter(object):\n\n def __init__(self, std):\n self.std = std\n\n def __call__(self, coords, feats, labels):\n if feats.shape[1] > 3 and random.random() < 0.95:\n feats[:, -1] += np.random.randn(feats.shape[0]) * self.std\n return coords, feats, labels\n\n\nclass NormalJitter(object):\n\n def __init__(self, std):\n self.std = std\n\n def __call__(self, coords, feats, labels):\n # normal jitter\n if feats.shape[1] > 6 and random.random() < 0.95:\n feats[:, 3:6] += np.random.randn(feats.shape[0], 3) * self.std\n return coords, feats, labels\n\n\nclass Compose(object):\n \"\"\"Composes several transforms together.\"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, *args):\n for t in self.transforms:\n args = t(*args)\n return args\n\n\nclass cfl_collate_fn_factory:\n \"\"\"Generates collate function for coords, feats, labels.\n\n Args:\n limit_numpoints: If 0 or False, does not alter batch size. If positive integer, limits batch\n size so that the number of input coordinates is below limit_numpoints.\n \"\"\"\n\n def __init__(self, limit_numpoints):\n self.limit_numpoints = limit_numpoints\n\n def __call__(self, list_data):\n coords, feats, labels, unique_map, inverse_map = list(zip(*list_data))\n coords_batch, feats_batch, labels_batch = [], [], []\n batch_id = 0\n batch_num_points = 0\n\n coords, feats, labels = list(coords), list(feats), list(labels) # convert tuple to list to allow element assignmnet\n for batch_id, _ in enumerate(coords):\n num_points = coords[batch_id].shape[0]\n batch_num_points += num_points\n\n # DEBUG: stupid dropping batch while exceeding limit-points, fix it with random sample(hard, keep old)\n # == Older version, passing the last batach\n # if self.limit_numpoints and batch_num_points > self.limit_numpoints:\n # num_full_points = sum(len(c) for c in coords)\n # num_full_batch_size = len(coords)\n # # logging.warning(\n # print( # show in terminal but dont write into log\n # f'\\t\\tCannot fit {num_full_points} points into {self.limit_numpoints} points '\n # f'limit. Truncating batch size at {batch_id} out of {num_full_batch_size} with {batch_num_points - num_points}.'\n # )\n # break\n\n # === newer version, num-limit points for each batch, if more, random sample ====\n # troublesome with mask!\n if self.limit_numpoints and coords[batch_id].shape[0] > self.limit_numpoints:\n # choices = torch.randint(0,coords[batch_id].shape[0],[self.limit_numpoints]) # DEBUG: STUDID! this is not sample with replacement\n print( # show in terminal but dont write into log\n f'\\t\\tCannot fit {coords[batch_id].shape[0]} points into {self.limit_numpoints} points '\n f'limit. random sample the original point cloud'\n )\n\n choices = torch.randperm(coords[batch_id].shape[0])[:self.limit_numpoints]\n\n coords[batch_id] = coords[batch_id][choices,:]\n feats[batch_id] = feats[batch_id][choices,:]\n labels[batch_id] = labels[batch_id][choices]\n\n coords_batch.append(coords[batch_id])\n feats_batch.append(torch.from_numpy(feats[batch_id]))\n labels_batch.append(torch.from_numpy(labels[batch_id]))\n\n # DEBUG: for some extreme cases in S3DIS, points even cannot fit in the 1 batch\n # maybe change the skip in sparse-collate to sample?\n\n coords, feats, labels = tuple(coords), tuple(feats), tuple(labels) # convert back to tuple\n # Concatenate all lists\n coords_batch, feats_batch, labels_batch = ME.utils.sparse_collate(coords_batch, feats_batch, labels_batch)\n\n return coords_batch, feats_batch, labels_batch, unique_map, inverse_map\n\n# class cflp_collate_fn_factory:\n# \"\"\"Generates collate function for coords, feats, labels, and point cloud\n\n# Args:\n# limit_numpoints: If 0 or False, does not alter batch size. If positive integer, limits batch\n# size so that the number of input coordinates is below limit_numpoints.\n# \"\"\"\n\n# def __init__(self, limit_numpoints):\n# self.limit_numpoints = limit_numpoints\n\n# def __call__(self, list_data):\n# coords, feats, labels, unique_map, inverse_map, point_cloud = list(zip(*list_data))\n# coords_batch, feats_batch, labels_batch, pc_batch = [], [], [], []\n# batch_id = 0\n# batch_num_points = 0\n# for batch_id, _ in enumerate(coords):\n# num_points = coords[batch_id].shape[0]\n# batch_num_points += num_points\n\n# if self.limit_numpoints and batch_num_points > self.limit_numpoints:\n# num_full_points = sum(len(c) for c in coords)\n# num_full_batch_size = len(coords)\n# # logging.warning(\n# print( # show in terminal but dont write into log\n# f'\\t\\tCannot fit {num_full_points} points into {self.limit_numpoints} points '\n# f'limit. Truncating batch size at {batch_id} out of {num_full_batch_size} with {batch_num_points - num_points}.'\n# )\n# break\n# coords_batch.append(coords[batch_id])\n# feats_batch.append(torch.from_numpy(feats[batch_id]))\n# labels_batch.append(torch.from_numpy(labels[batch_id]))\n# pc_batch.append(torch.from_numpy(point_cloud[batch_id]))\n\n# # Concatenate all lists\n# coords_batch, feats_batch, labels_batch = \\\n# ME.utils.sparse_collate(coords_batch, feats_batch, labels_batch)\n# pc_batch_coord, pc_batch, _ = ME.utils.sparse_collate(pc_batch, pc_batch, pc_batch)\n# pc_batch_coord = pc_batch_coord.type(torch.float32)\n# pc_batch_coord[:,1:] = pc_batch \n\n# return coords_batch, feats_batch, labels_batch, unique_map, inverse_map, pc_batch_coord\n\nclass cflt_collate_fn_factory:\n \"\"\"Generates collate function for coords, feats, labels, point_clouds, transformations.\n\n Args:\n limit_numpoints: If 0 or False, does not alter batch size. If positive integer, limits batch\n size so that the number of input coordinates is below limit_numpoints.\n \"\"\"\n\n def __init__(self, limit_numpoints):\n self.limit_numpoints = limit_numpoints\n\n def __call__(self, list_data):\n coords, feats, labels, pointclouds, transformations = list(zip(*list_data))\n cfl_collate_fn = cfl_collate_fn_factory(limit_numpoints=self.limit_numpoints)\n coords_batch, feats_batch, labels_batch = cfl_collate_fn(list(zip(coords, feats, labels)))\n\n batch_id = 0\n batch_num_points = 0\n pointclouds_batch, transformations_batch = [], []\n for pointcloud, transformation in zip(pointclouds, transformations):\n num_points = len(pointcloud)\n batch_num_points += num_points\n if self.limit_numpoints and batch_num_points > self.limit_numpoints:\n break\n\n pointclouds_batch.append(\n torch.cat((torch.ones(pointcloud.shape[0], 1) * batch_id, torch.from_numpy(pointcloud)), 1))\n transformations_batch.append(\n torch.cat((torch.ones(transformation.shape[0], 1) * batch_id, torch.from_numpy(transformation)), 1))\n\n batch_id += 1\n\n pointclouds_batch = torch.cat(pointclouds_batch, 0).float()\n transformations_batch = torch.cat(transformations_batch, 0).float()\n return coords_batch, feats_batch, labels_batch, pointclouds_batch, transformations_batch\n\n\ndef elastic_distortion(pointcloud, granularity, magnitude):\n \"\"\"Apply elastic distortion on sparse coordinate space.\n\n pointcloud: numpy array of (number of points, at least 3 spatial dims)\n granularity: size of the noise grid (in same scale[m/cm] as the voxel grid)\n magnitude: noise multiplier\n \"\"\"\n blurx = np.ones((3, 1, 1, 1)).astype('float32') / 3\n blury = np.ones((1, 3, 1, 1)).astype('float32') / 3\n blurz = np.ones((1, 1, 3, 1)).astype('float32') / 3\n coords = pointcloud[:, :3]\n coords_min = coords.min(0)\n\n # Create Gaussian noise tensor of the size given by granularity.\n noise_dim = ((coords - coords_min).max(0) // granularity).astype(int) + 3\n noise = np.random.randn(*noise_dim, 3).astype(np.float32)\n\n # Smoothing.\n for _ in range(2):\n noise = scipy.ndimage.filters.convolve(noise, blurx, mode='constant', cval=0)\n noise = scipy.ndimage.filters.convolve(noise, blury, mode='constant', cval=0)\n noise = scipy.ndimage.filters.convolve(noise, blurz, mode='constant', cval=0)\n\n # Trilinear interpolate noise filters for each spatial dimensions.\n ax = [\n np.linspace(d_min, d_max, d)\n for d_min, d_max, d in zip(coords_min - granularity, coords_min + granularity *\n (noise_dim - 2), noise_dim)\n ]\n interp = scipy.interpolate.RegularGridInterpolator(ax, noise, bounds_error=0, fill_value=0)\n pointcloud[:, :3] = coords + interp(coords) * magnitude\n return pointcloud\n\n# ----- the collate_fn used for nuscenes dataset ------\ndef collate_fn_BEV(data):\n data2stack = np.stack([d[0] for d in data]).astype(np.float32)\n label2stack = np.stack([d[1] for d in data]).astype(np.int)\n grid_ind_stack = [d[2] for d in data]\n point_label = [d[3] for d in data]\n xyz = [d[4] for d in data]\n return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz\n\n\ndef collate_fn_BEV_test(data):\n data2stack = np.stack([d[0] for d in data]).astype(np.float32)\n label2stack = np.stack([d[1] for d in data]).astype(np.int)\n grid_ind_stack = [d[2] for d in data]\n point_label = [d[3] for d in data]\n xyz = [d[4] for d in data]\n index = [d[5] for d in data]\n return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz, index\n\n\n", "import itertools\nimport operator\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport logging\n\nimport numpy as np\nimport numpy.ma as ma\n\nfrom pointnet2_utils import furthest_point_sample as farthest_point_sample_cuda\nfrom pointnet2_utils import gather_operation as index_points_cuda_transpose\nfrom pointnet2_utils import grouping_operation as grouping_operation_cuda\nfrom pointnet2_utils import ball_query as query_ball_point_cuda\nfrom pointnet2_utils import QueryAndGroup\nfrom pointnet2_utils import three_nn\nfrom pointnet2_utils import three_interpolate\n\nfrom knn_cuda import KNN\nimport MinkowskiEngine as ME\nimport MinkowskiEngine.MinkowskiOps as me\n\ndef square_distance(src, dst):\n \"\"\"\n Calculate Euclid distance between each two points.\n src^T * dst = xn * xm + yn * ym + zn * zm;\n sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;\n sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;\n dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2\n = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst\n Input:\n src: source points, [B, N, C]\n dst: target points, [B, M, C]\n Output:\n dist: per-point square distance, [B, N, M]\n \"\"\"\n return torch.sum((src[:, :, None] - dst[:, None]) ** 2, dim=-1)\n\ndef index_points_cuda(points, idx):\n \"\"\"\n\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S]\n Return:\n new_points:, indexed points data, [B, S, C]\n \"\"\"\n points = points.transpose(1,2).contiguous() #[B, C, N]\n new_points = index_points_cuda_transpose(points, idx) #[B, C, S]\n\n return new_points.transpose(1,2).contiguous()\n\ndef sample_and_group_cuda(npoint, k, xyz, points, cat_xyz_feature=True, fps_only=False):\n \"\"\"\n Input:\n npoint:\n k:\n xyz: input points position data, [B, N, 3]\n points: input points data, [B, C, N]\n Return:\n new_xyz: sampled points position data, [B, 3, npoint]\n new_points: sampled points data, [B, C+C_xyz, npoint, k]\n grouped_xyz_norm: sampled relative points position data, [B, 3, npoint, k]\n \"\"\"\n k = min(npoint, k)\n knn = KNN(k=k, transpose_mode=True)\n\n B, N, C_xyz = xyz.shape\n\n if npoint < N:\n # fps_idx = torch.arange(npoint).repeat(xyz.shape[0], 1).int().cuda() # DEBUG ONLY\n fps_idx = farthest_point_sample_cuda(xyz, npoint) # [B, npoint]\n torch.cuda.empty_cache()\n new_xyz = index_points_cuda(xyz, fps_idx) #[B, npoint, 3]\n new_points = index_points_cuda(points.transpose(1,2), fps_idx)\n else:\n new_xyz = xyz\n\n if fps_only:\n return new_xyz.transpose(1,2), new_points.transpose(1,2), fps_idx\n\n torch.cuda.empty_cache()\n _, idx = knn(xyz.contiguous(), new_xyz) # B, npoint, k\n idx = idx.int()\n\n torch.cuda.empty_cache()\n grouped_xyz = grouping_operation_cuda(xyz.transpose(1,2).contiguous(), idx).permute(0,2,3,1) # [B, npoint, k, C_xyz]\n torch.cuda.empty_cache()\n grouped_xyz_norm = grouped_xyz - new_xyz.view(B, npoint, 1, C_xyz) # [B, npoint, k, 3]\n grouped_xyz_norm = grouped_xyz_norm.permute(0,3,1,2).contiguous()# [B, 3, npoint, k]\n torch.cuda.empty_cache()\n\n grouped_points = grouping_operation_cuda(points.contiguous(), idx) #B, C, npoint, k\n\n if cat_xyz_feature:\n new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=1) # [B, C+C_xyz, npoint, k]\n else:\n new_points = grouped_points # [B, C+C_xyz, npoint, k]\n\n return new_xyz.transpose(1,2), grouped_xyz_norm, new_points, idx\n\ndef voxel2points(x: ME.SparseTensor):\n '''\n pack the ME Sparse Tensor feature(batch-dim information within first col of coord)\n [N_voxel_all_batches, dims] -> [bs, max_n_voxel_per_batch, dim]\n\n idx are used to denote the mask\n '''\n\n x_c, mask, idx = separate_batch(x.C)\n B = x_c.shape[0]\n N = x_c.shape[1]\n dim = x.F.shape[1]\n idx_ = idx.reshape(-1,1).repeat(1,dim)\n x_f = torch.zeros(B*N, dim).cuda()\n x_f.scatter_(dim=0, index=idx_, src=x.F)\n x_f = x_f.reshape([B,N,dim])\n\n return x_c, x_f, idx\n\ndef points2voxel(x, idx):\n '''\n revert the points into voxel's feature\n returns the new feat\n '''\n # the origi_x provides the cooed_map\n B, N, dim = list(x.shape)\n new_x = torch.gather(x.reshape(B*N, dim), dim=0, index=idx.reshape(-1,1).repeat(1,dim))\n return new_x\n\nclass TDLayer(nn.Module):\n def __init__(self, input_dim, out_dim, k=16, kernel_size=2):\n super().__init__()\n '''\n Transition Down Layer\n npoint: number of input points\n nsample: k in kNN, default 16\n in_dim: feature dimension of the input feature x (output of the PCTLayer)\n out_dim: feature dimension of the TDLayer\n '''\n self.k = k\n self.input_dim = input_dim\n self.out_dim = out_dim\n self.kernel_size = kernel_size\n\n '''a few additional cfg for TDLayer'''\n self.POINT_TR_LIKE = False\n self.FPS_ONLY = True\n self.cat_xyz_feature = True\n\n self.STRIDE = 4\n # self.STRIDE = 2\n\n if self.POINT_TR_LIKE:\n if self.FPS_ONLY:\n if self.cat_xyz_feature:\n self.conv = nn.Sequential(\n ME.MinkowskiConvolution(input_dim+3, out_dim, kernel_size=1, bias=True, dimension=3),\n ME.MinkowskiBatchNorm(out_dim),\n ME.MinkowskiReLU(),\n )\n else:\n self.conv = nn.Sequential(\n ME.MinkowskiConvolution(input_dim, out_dim, kernel_size=1, bias=True, dimension=3),\n ME.MinkowskiBatchNorm(out_dim),\n ME.MinkowskiReLU(),\n )\n\n self.mlp_bns = nn.ModuleList()\n\n if self.cat_xyz_feature:\n self.mlp_convs.append(nn.Conv2d(input_dim+3, input_dim, 1))\n else:\n self.mlp_convs.append(nn.Conv2d(input_dim, input_dim, 1))\n self.mlp_convs.append(nn.Conv2d(input_dim, out_dim, 1))\n self.mlp_bns.append(nn.BatchNorm2d(input_dim))\n self.mlp_bns.append(nn.BatchNorm2d(out_dim))\n\n else:\n self.conv = nn.Sequential(\n ME.MinkowskiConvolution(input_dim,out_dim,kernel_size=self.STRIDE, stride=self.STRIDE,bias=True,dimension=3),\n ME.MinkowskiBatchNorm(out_dim),\n ME.MinkowskiReLU()\n )\n\n def forward(self, x : ME.SparseTensor):\n \"\"\"\n Input:\n xyz: input points position data, [B, 3, N]\n points: input points data, [B, C, N]\n Return:\n new_xyz: sampled points position data, [B, C, S]\n new_points_concat: sample points feature data, [B, D', S]\n \"\"\"\n if self.POINT_TR_LIKE:\n x_c, mask, idx = separate_batch(x.C)\n B = x_c.shape[0]\n N = x_c.shape[1]\n dim = x.F.shape[1]\n idx_ = idx.reshape(-1,1).repeat(1,dim)\n x_f = torch.zeros(B*N, dim).cuda()\n x_f.scatter_(dim=0, index=idx_, src=x.F)\n x_f = x_f.reshape([B,N,dim])\n\n k = 16\n ds_ratio = 4\n npoint = N//ds_ratio\n # x_c = x_c.transpose(1,2).float()\n x_f = x_f.transpose(1,2)\n\n if self.FPS_ONLY:\n # just using the FPS's result for subsample, without projection\n new_xyz, new_points, fps_idx = sample_and_group_cuda(npoint, k, x_c.float(), x_f, cat_xyz_feature=self.cat_xyz_feature, fps_only=True)\n if self.cat_xyz_feature:\n additional_xyz = new_xyz / new_xyz.mean()\n new_points_pooled = torch.cat([new_xyz/new_xyz.mean(),new_points], dim=1) # norm the xyz to some extent\n else:\n new_points_pooled = new_points\n\n B, new_dim, new_N = list(new_points_pooled.shape)\n\n # idx: [N-voxel] -> value in range(0, B*N)\n # fps_idx: [B,N//2] -> check whether in idx(transform): -> fps_mask: [<B*N/2] value: [0, B,N//2]\n\n batch_ids = torch.arange(B).unsqueeze(-1).repeat(1,new_N).reshape(-1,1).cuda()\n\n # if no masked points are sampled, we could simply use a full-idx to gather new_feature\n new_idx = torch.arange(B*new_N).cuda()\n new_xyz = torch.gather(new_xyz.transpose(1,2).reshape(B*new_N, 3), dim=0, index=new_idx.reshape(-1,1).repeat(1,3))\n new_xyz = torch.cat([batch_ids, new_xyz], dim=1)\n new_points_pooled = torch.gather(new_points_pooled.transpose(1,2).reshape(B*new_N, new_dim), dim=0, index=new_idx.reshape(-1,1).repeat(1,new_dim))\n\n else:\n new_xyz, grouped_xyz_norm, new_points, new_indices = sample_and_group_cuda(npoint, k, x_c.float(), x_f, cat_xyz_feature=self.cat_xyz_feature)\n\n # --- make the new idx, and ck if all new coord in old_coord ---\n # to_sum = (torch.arange(B).reshape(-1,1)*N).cuda() # the batch-dim\n # new_idx = torch.sort(new_indices[:,:,0],dim=-1)[0]\n # new_idx = new_idx + to_sum\n # new_idx = new_idx.view(-1) # should be roughly half the size of the 'idx'\n # # there should not be a outlier point\n # ck_in = [not i in idx for i in new_idx] # didnt find a torch func to do that\n # assert sum(ck_in) == 0\n\n for i, conv in enumerate(self.mlp_convs):\n bn = self.mlp_bns[i]\n new_points = F.relu(bn(conv(new_points)))\n\n new_points_pooled = torch.max(new_points, 3)[0]\n\n B, new_dim, new_N = new_points_pooled.shape\n new_idx = torch.arange(B*new_N).cuda() # the keep all idxs\n\n new_points_pooled = new_points_pooled.transpose(1,2).reshape(B*new_N, new_dim)\n new_points_pooled = torch.gather(new_points_pooled, dim=0, index=new_idx.reshape(-1,1).repeat(1,new_dim))\n new_xyz = torch.gather(new_xyz.transpose(1,2).reshape(B*new_N, 3), dim=0, index=new_idx.reshape(-1,1).repeat(1,3))\n batch_ids = torch.arange(B).unsqueeze(-1).repeat(1,new_N).reshape(-1,1).cuda()\n new_xyz = torch.cat([batch_ids, new_xyz], dim=1)\n\n y = ME.SparseTensor(features=new_points_pooled,coordinates=new_xyz,coordinate_manager=x.coordinate_manager)\n\n if self.FPS_ONLY:\n y = self.conv(y)\n\n else:\n y = self.conv(x)\n\n return y\n\nclass ResNetLikeTU(nn.Module):\n def __init__(self, input_a_dim, input_b_dim, out_dim, kernel_size=2):\n super().__init__()\n '''\n Deconv x_a\n concat with x_b\n then apply output-projection\n '''\n self.input_a_dim = input_a_dim\n self.input_b_dim = input_b_dim\n self.out_dim = out_dim\n self.conv_a = nn.Sequential(\n ME.MinkowskiConvolutionTranspose(in_channels=input_a_dim, out_channels=input_a_dim ,kernel_size=4,stride=4,dimension=3),\n ME.MinkowskiBatchNorm(input_a_dim),\n ME.MinkowskiReLU(),\n )\n\n self.conv_proj = nn.Sequential(\n ME.MinkowskiConvolution(in_channels=input_a_dim + input_b_dim, out_channels=out_dim,kernel_size=3,stride=1,dimension=3),\n ME.MinkowskiBatchNorm(out_dim),\n ME.MinkowskiReLU(),\n )\n\n def forward(self, x_a, x_b):\n x_a = self.conv_a(x_a)\n x = ME.cat(x_a, x_b)\n x = self.conv_proj(x)\n return x\n\nclass TULayer(nn.Module):\n def __init__(self, input_a_dim, input_b_dim, out_dim,k=3):\n super().__init__()\n '''\n Transition Up Layer\n npoint: number of input points\n nsample: k in kNN, default 3\n input_a_dim: feature dimension of the input a(needs upsampling)\n input_b_dim: feature dimension of the input b (directly concat)\n out_dim: feature dimension of the TDLayer(fixed as the input_a_dim // 2) + input_b_dim\n\n '''\n self.k = k\n self.input_a_dim = input_a_dim\n self.input_b_dim = input_b_dim\n self.intermediate_dim = (input_a_dim // 2) + input_b_dim\n self.out_dim = out_dim\n\n self.POINT_TR_LIKE = False\n self.SUM_FEATURE = True # only used when POINTTR_LIKE is False, somehow have some peoblems\n\n # -------- Point TR like -----------\n if self.POINT_TR_LIKE:\n self.linear_a = nn.Linear(input_a_dim, out_dim)\n self.linear_b = nn.Linear(input_b_dim, out_dim)\n else:\n if self.SUM_FEATURE:\n self.conv_a = nn.Sequential(\n ME.MinkowskiConvolutionTranspose(in_channels=input_a_dim, out_channels=out_dim,kernel_size=2,stride=2,dimension=3),\n ME.MinkowskiBatchNorm(out_dim),\n ME.MinkowskiReLU(),\n )\n self.conv_b = nn.Sequential(\n ME.MinkowskiConvolution(in_channels=input_b_dim, out_channels=out_dim,kernel_size=1,stride=1,dimension=3),\n ME.MinkowskiBatchNorm(out_dim),\n ME.MinkowskiReLU(),\n )\n else:\n self.conv = ME.MinkowskiConvolutionTranspose(\n in_channels=input_a_dim,\n out_channels=input_a_dim // 2,\n kernel_size=2,\n stride=2,\n dimension=3\n )\n self.bn = ME.MinkowskiBatchNorm(\n self.input_a_dim // 2\n )\n self.relu = ME.MinkowskiReLU()\n\n # -----------------------------------------\n\n self.out_conv = ME.MinkowskiConvolution(\n in_channels=input_a_dim//2 + input_b_dim,\n out_channels=out_dim,\n kernel_size=3,\n stride=1,\n dimension=3\n )\n self.out_bn = ME.MinkowskiBatchNorm(\n self.out_dim\n )\n self.out_relu = ME.MinkowskiReLU()\n\n def forward(self, x_a : ME.SparseTensor, x_b: ME.SparseTensor):\n \"\"\"\n Input:\n M < N\n xyz_1: input points position data, [B, 3, M]\n xyz_2: input points position data, [B, 3, N]\n points_1: input points data, [B, C, M]\n points_2: input points data, [B, C, N]\n\n interpolate xyz_2's coordinates feature with knn neighbor's features weighted by inverse distance\n\n TODO: For POINT_TR_LIKE, add support for no x_b is fed, simply upsample the x_a\n\n Return:\n new_xyz: sampled points position data, [B, C, S]\n new_points_concat: sample points feature data, [B, D', S]\n \"\"\"\n\n if self.POINT_TR_LIKE:\n\n dim = x_b.F.shape[1]\n assert dim == self.out_dim\n\n x_ac, mask_a, idx_a = separate_batch(x_a.C)\n B = x_ac.shape[0]\n N_a = x_ac.shape[1]\n x_af = torch.zeros(B*N_a, dim).cuda()\n idx_a = idx_a.reshape(-1,1).repeat(1,dim)\n x_af.scatter_(dim=0, index=idx_a, src=self.linear_a(x_a.F))\n x_af = x_af.reshape([B, N_a, dim])\n\n x_bc, mask_b, idx_b = separate_batch(x_b.C)\n B = x_bc.shape[0]\n N_b = x_bc.shape[1]\n x_bf = torch.zeros(B*N_b, dim).cuda()\n idx_b = idx_b.reshape(-1,1).repeat(1,dim)\n x_bf.scatter_(dim=0, index=idx_b, src=self.linear_b(x_b.F))\n x_bf = x_bf.reshape([B, N_b, dim])\n\n dists, idx = three_nn(x_bc.float(), x_ac.float())\n\n mask = (dists.sum(dim=-1)>0).unsqueeze(-1).repeat(1,1,3)\n\n dist_recip = 1.0 / (dists + 1e-1)\n norm = torch.sum(dist_recip, dim=2, keepdim=True)\n weight = dist_recip / norm\n weight = weight*mask # mask the zeros part\n\n interpolated_points = three_interpolate(x_af.transpose(1,2).contiguous(), idx, weight).transpose(1,2) # [B, N_b, dim]\n out = interpolated_points + x_bf\n\n out = torch.gather(out.reshape(B*N_b,dim), dim=0, index=idx_b) # should be the same size with x_a.F\n x = ME.SparseTensor(features = out, coordinate_map_key=x_b.coordinate_map_key, coordinate_manager=x_b.coordinate_manager)\n\n else:\n if self.SUM_FEATURE:\n x_a = self.conv_a(x_a)\n x_b = self.conv_b(x_b)\n x = x_a + x_b\n else:\n x_a = self.conv(x_a)\n x_a = self.bn(x_a)\n x_a = self.relu(x_a)\n x = me.cat(x_a, x_b)\n x = self.out_conv(x)\n x = self.out_bn(x)\n x = self.out_relu(x)\n\n return x\n\ndef index_points(points, idx):\n \"\"\"\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S, [K]]\n Return:\n new_points:, indexed points data, [B, S, [K], C]\n \"\"\"\n raw_size = idx.size()\n idx = idx.reshape(raw_size[0], -1)\n res = torch.gather(points, 1, idx[..., None].expand(-1, -1, points.size(-1)))\n return res.reshape(*raw_size, -1)\n\nclass StackedPTBlock(nn.Module):\n def __init__(self, in_dim, hidden_dim, is_firstlayer=False, n_sample=16, r=10, skip_knn=False, kernel_size=1):\n super().__init__()\n\n self.block1 = PTBlock(in_dim, hidden_dim, is_firstlayer, n_sample, r, skip_knn, kernel_size)\n self.block2 = PTBlock(in_dim, hidden_dim, is_firstlayer, n_sample, r, skip_knn, kernel_size)\n\n def forward(self, x : ME.SparseTensor):\n x = self.block1(x)\n x = self.block2(x)\n return x\n\nclass PTBlock(nn.Module):\n def __init__(self, in_dim, hidden_dim, is_firstlayer=False, n_sample=16, r=10, skip_knn=False, kernel_size=1):\n super().__init__()\n '''\n Point Transformer Layer\n\n in_dim: feature dimension of the input feature x\n out_dim: feature dimension of the Point Transformer Layer(currently same with hidden-dim)\n '''\n\n self.r = r # neighborhood cube radius\n self.skip_knn = skip_knn\n self.kernel_size = kernel_size\n\n self.in_dim = in_dim\n self.hidden_dim = hidden_dim\n self.out_dim = self.hidden_dim\n self.vector_dim = self.out_dim // 1\n self.n_sample = n_sample\n\n self.KS_1 = True\n self.USE_KNN = True\n self.use_vector_attn = True # whether to use the vector att or the original attention\n self.WITH_POSE_ENCODING = True\n self.SKIP_ATTN=False\n\n if self.KS_1:\n self.kernel_size = 1\n\n if not self.use_vector_attn:\n self.nhead = 4\n\n self.linear_top = nn.Sequential(\n ME.MinkowskiConvolution(in_dim, self.hidden_dim, kernel_size=self.kernel_size, dimension=3),\n ME.MinkowskiBatchNorm(self.hidden_dim),\n )\n self.linear_down = nn.Sequential(\n ME.MinkowskiConvolution(self.out_dim, self.in_dim, kernel_size=self.kernel_size, dimension=3),\n ME.MinkowskiBatchNorm(self.in_dim),\n )\n # feature transformations\n self.phi = nn.Sequential(\n ME.MinkowskiConvolution(self.hidden_dim, self.out_dim, kernel_size=self.kernel_size, dimension=3)\n )\n self.psi = nn.Sequential(\n ME.MinkowskiConvolution(self.hidden_dim, self.out_dim, kernel_size=self.kernel_size, dimension=3)\n )\n\n if self.SKIP_ATTN:\n KERNEL_SIZE = 1\n self.alpha = nn.Sequential(\n nn.Conv1d(self.in_dim, self.in_dim, KERNEL_SIZE),\n nn.BatchNorm1d(self.in_dim),\n nn.ReLU(),\n nn.Conv1d(self.in_dim, self.hidden_dim, KERNEL_SIZE),\n nn.BatchNorm1d(self.hidden_dim),\n nn.ReLU(),\n )\n else:\n self.alpha = nn.Sequential(\n ME.MinkowskiConvolution(self.hidden_dim, self.out_dim, kernel_size=self.kernel_size, dimension=3)\n )\n\n self.gamma = nn.Sequential(\n nn.Conv1d(self.out_dim, self.hidden_dim, 1),\n nn.BatchNorm1d(self.hidden_dim),\n nn.ReLU(),\n nn.Conv1d(self.hidden_dim, self.vector_dim, 1),\n nn.BatchNorm1d(self.vector_dim),\n )\n\n self.delta = nn.Sequential(\n nn.Conv2d(3, self.hidden_dim, 1),\n nn.BatchNorm2d(self.hidden_dim),\n nn.ReLU(),\n nn.Conv2d(self.hidden_dim, self.out_dim, 1),\n nn.BatchNorm2d(self.out_dim),\n )\n\n def forward(self, x : ME.SparseTensor, aux=None):\n '''\n input_p: B, 3, npoint\n input_x: B, in_dim, npoint\n '''\n PT_begin = time.perf_counter()\n self.B = (x.C[:,0]).max().item() + 1 # batch size\n npoint, in_dim = tuple(x.F.size())\n self.k = min(self.n_sample, npoint)\n if not self.use_vector_attn:\n h = self.nhead\n\n res = x\n\n if self.skip_knn:\n # --- for debugging only ---\n x = self.linear_top(x)\n y = self.linear_down(x)\n return y+res\n\n else:\n self.cube_query = cube_query(r=self.r, k=self.k, knn=self.USE_KNN)\n\n # neighbor: [B*npoint, k, bxyz]\n # mask: [B*npoint, k]\n # idx: [B_nq], used for scatter/gather\n neighbor, mask, idx_ = self.cube_query.get_neighbor(x, x)\n\n self.register_buffer('neighbor_map', neighbor)\n self.register_buffer('input_map', x.C)\n\n # check for duplicate neighbor(not enough voxels within radius that fits k)\n # CHECK_FOR_DUP_NEIGHBOR=True\n # if CHECK_FOR_DUP_NEIGHBOR:\n # dist_map = (neighbor - neighbor[:,0,:].unsqueeze(1))[:,1:,:].abs()\n # num_different = (dist_map.sum(-1)>0).sum(-1) # how many out of ks are the same, of shape [nvoxel]\n # outlier_point = (num_different < int(self.k*1/2)-1).sum()\n # if not (outlier_point < max(npoint//10, 10)): # sometimes npoint//100 could be 3\n # pass\n # logging.info('Detected Abnormal neighbors, num outlier {}, all points {}'.format(outlier_point, x.shape[0]))\n\n x = self.linear_top(x) # [B, in_dim, npoint], such as [16, 32, 4096]\n\n '''\n illustration on dimension notations:\n - B: batch size\n - nvoxel: number of all voxels of the whole batch\n - k: k neighbors\n - feat_dim: feature dimension, or channel as others call it\n - nvoxel_batch: the maximum voxel number of a single SparseTensor in the current batch\n '''\n\n '''Gene the pos_encoding'''\n relative_xyz = neighbor - x.C[:,None,:].repeat(1,self.k,1) # (nvoxel, k, bxyz), we later pad it to [B, xyz, nvoxel_batch, k]\n\n '''\n mask the neighbor when not in the same instance-class\n '''\n if aux is not None:\n neighbor_mask = aux.features_at_coordinates(neighbor.reshape(-1,4).float()).reshape(-1,self.k) # [N, k]\n neighbor_mask = (neighbor_mask - neighbor_mask[:,0].unsqueeze(-1) != 0).int()\n # logging.info('Cur Mask Ratio {}'.format(neighbor_mask.sum()/neighbor_mask.nelement()))\n\n neighbor_mask = torch.ones_like(neighbor_mask) - neighbor_mask\n else:\n neighbor_mask = None\n\n if self.WITH_POSE_ENCODING:\n relative_xyz[:,0,0] = x.C[:,0] # get back the correct batch index, because we messed batch index in the subtraction above\n relative_xyz = pad_zero(relative_xyz, mask) # [B, xyz, nvoxel_batch, k]\n pose_tensor = self.delta(relative_xyz.float()) # (B, feat_dim, nvoxel_batch, k)\n pose_tensor = make_position_tensor(pose_tensor, mask, idx_, x.C.shape[0]) # (nvoxel, k, feat_dim)S\n\n if self.SKIP_ATTN:\n grouped_x = get_neighbor_feature(neighbor, x) # (nvoxel, k, feat_dim)\n if self.WITH_POSE_ENCODING:\n alpha = self.alpha((grouped_x + pose_tensor).transpose(1,2))\n else:\n alpha = self.alpha((grouped_x).transpose(1,2))\n y = alpha.max(dim=-1)[0]\n y = ME.SparseTensor(features = y, coordinate_map_key=x.coordinate_map_key, coordinate_manager=x.coordinate_manager)\n\n y = self.linear_down(y)\n return y+res\n\n phi = self.phi(x).F # (nvoxel, feat_dim)\n phi = phi[:,None,:].repeat(1,self.k,1) # (nvoxel, k, feat_dim)\n psi = get_neighbor_feature(neighbor, self.psi(x)) # (nvoxel, k, feat_dim)\n alpha = get_neighbor_feature(neighbor, self.alpha(x)) # (nvoxel, k, feat_dim)\n\n '''The Self-Attn Part'''\n if self.use_vector_attn:\n '''\n the attn_map: [vector_dim];\n the alpha: [out_dim]\n attn_map = F.softmax(self.gamma(phi - psi + pos_encoding), dim=-1) # [B, in_dim, npoint, k], such as [16, 32, 4096, 16]\n y = attn_map.repeat(1, self.out_dim // self.vector_dim,1,1)*(alpha + pos_encoding) # multiplies attention weight\n self.out_dim and self.vector_dim are all 32 here, so y is still [16, 32, 4096, 16]\n y = y.sum(dim=-1) # feature aggregation, y becomes [B, out_dim, npoint]\n '''\n if self.WITH_POSE_ENCODING:\n attn_map = F.softmax(self.gamma((phi - psi + pose_tensor).transpose(1,2)), dim=-1)\n else:\n attn_map = F.softmax(self.gamma((phi - psi).transpose(1,2)), dim=-1)\n if self.WITH_POSE_ENCODING:\n self_feat = (alpha + pose_tensor).permute(0,2,1) # (nvoxel, k, feat_dim) -> (nvoxel, feat_dim, k)\n else:\n self_feat = (alpha).permute(0,2,1) # (nvoxel, k, feat_dim) -> (nvoxel, feat_dim, k)\n\n # use aux info and mask the attn_map\n if neighbor_mask is not None:\n attn_map = attn_map*(neighbor_mask.unsqueeze(1))\n\n y = attn_map.repeat(1, self.out_dim // self.vector_dim, 1, 1) * self_feat # (nvoxel, feat_dim, k)\n y = y.sum(dim=-1).view(x.C.shape[0], -1) # feature aggregation, y becomes (nvoxel, feat_dim)\n y = ME.SparseTensor(features = y, coordinate_map_key=x.coordinate_map_key, coordinate_manager=x.coordinate_manager)\n else:\n phi = phi.permute([2,1,0]) # [out_dim, k, npoint]\n psi = psi.permute([2,0,1]) # [out_dim. npoint, k]\n attn_map = F.softmax(torch.matmul(phi,psi), dim=0) # [out_dim, k, k]\n alpha = (alpha+pose_tensor).permute([2,0,1]) # [out_dim, npoint, k]\n y = torch.matmul(alpha, attn_map) # [out_dim, npoint, k]\n y = y.sum(-1).transpose(0,1) # [out_dim. npoint]\n y = ME.SparseTensor(features = y, coordinate_map_key=x.coordinate_map_key, coordinate_manager=x.coordinate_manager)\n\n y = self.linear_down(y)\n\n self.register_buffer('attn_map', attn_map.detach().cpu().data) # pack it with nn parameter to save in state-dict\n\n return y+res\n\ndef make_position_tensor(pose_encoding : torch.Tensor, mask : torch.Tensor, idx_: torch.Tensor, nvoxel : int):\n \"\"\"\n Mask positional encoding into k ME.SparseTensors\n\n Input:\n pose_encoding: (B, feat_dim, nvoxel_batch, k)\n batch_tensor: (B, N)\n \"\"\"\n\n assert idx_.shape[0] == nvoxel # the idx and the nvoxel should be the same\n\n B, feat_dim, nvoxel_batch, k = pose_encoding.shape\n pose_encoding = pose_encoding.permute(0, 2, 3, 1) # (B, feat_dim, nvoxel_batch, k) -> (B, nvoxel_batch, k, feat_dim)\n\n '''use idx to scatter the result'''\n masked_encoding = torch.gather(\n pose_encoding.reshape(-1, k, feat_dim),\n 0,\n idx_.reshape(-1,1,1).repeat(1, k, feat_dim)\n ).reshape(nvoxel, k, feat_dim)\n return masked_encoding # (nvoxel, k, feat_dim)\n\ndef get_neighbor_feature(neighbor: torch.Tensor, x: ME.SparseTensor):\n \"\"\"\n fetch neighbor voxel's feature tensor.\n Input:\n neighbor: torch.Tensor [B*npoint, k, xyz]\n x: ME.SparseTensor\n \"\"\"\n B_npoint, k, _ = tuple(neighbor.size())\n neighbor = neighbor.view(-1, 4).float() # [B*npoint*k, bxyz]\n features = x.features_at_coordinates(neighbor)\n _, dim = features.shape\n features = features.view(-1, k, dim)\n return features\n\ndef pad_zero(tensor : torch.Tensor, mask: torch.Tensor):\n '''\n input is [B*npoint, k, bxyz], we want [B, xyz, npoint, k]\n need to pad zero because each batch may have different voxel number\n B = int(max(tensor[:,0,0]).item() + 1)\n k = tuple(tensor.shape)[1]\n '''\n B, N = mask.shape\n _, k, bxyz = tensor.shape\n result = torch.zeros([B, N, k, 4], dtype=torch.int, device=tensor.device)\n pointer = 0\n for b_idx in range(B):\n nvoxel = mask.sum(-1)[b_idx]\n result[b_idx, :nvoxel, :, :] = tensor[pointer:pointer+nvoxel, :, :]\n pointer += nvoxel\n result = result[:,:,:,1:] # (B, N, k, 3)\n result = result.permute(0, 3, 1, 2) # (B, N, k, 3) -> (B, 3, N, k)\n return result\n\ndef manhattan_dist(dxyz: tuple):\n dxyz = [abs(v) for v in dxyz]\n return sum(dxyz[1:])\n\ndef separate_batch(coord: torch.Tensor):\n \"\"\"\n Input:\n coord: (N_voxel, 4) coordinate tensor, coord=b,x,y,z\n Return:\n tensor: (B, N(max n-voxel cur batch), 3), batch index separated\n mask: (B, N), 1->valid, 0->invalid\n \"\"\"\n\n # Features donot have batch-ids\n N_voxel = coord.shape[0]\n B = (coord[:,0].max().item() + 1)\n\n batch_ids = coord[:,0]\n\n # get the splits of different i_batchA\n splits_at = torch.stack([torch.where(batch_ids == i)[0][-1] for i in torch.unique(batch_ids)]).int() # iter at i_batch_level\n # the returned indices of torch.where is from [0 ~ N-1], but when we use the x[start:end] style indexing, should cover [1:N]\n # example: x[0:1] & x[:1] are the same, contain 1 element, but x[:0] is []\n # example: x[:N] would not raise error but x[N] would\n\n splits_at = splits_at+1\n splits_at_leftshift_one = splits_at.roll(shifts=1) # left shift the splits_at\n splits_at_leftshift_one[0] = 0\n\n len_per_batch = splits_at - splits_at_leftshift_one\n # len_per_batch[0] = len_per_batch[0]+1 # DBEUG: dirty fix since 0~1566 has 1567 values\n N = len_per_batch.max().int()\n\n assert len_per_batch.sum() == N_voxel\n\n mask = torch.zeros([B*N], device=coord.device).int()\n new_coord = torch.zeros([B*N, 3], device=coord.device).int() # (B, N, xyz)\n\n '''\n new_coord: [B,N,3]\n coord-part : [n_voxel, 3]\n idx: [b_voxel, 3]\n '''\n idx_ = torch.cat([torch.arange(len_, device=coord.device)+i*N for i, len_ in enumerate(len_per_batch)])\n idx = idx_.reshape(-1,1).repeat(1,3)\n new_coord.scatter_(dim=0, index=idx, src=coord[:,1:])\n mask.scatter_(dim=0, index=idx_, src=torch.ones_like(idx_, device=idx.device).int())\n mask = mask.reshape([B,N])\n new_coord = new_coord.reshape([B,N,3])\n\n return new_coord, mask, idx_\n\n# cube query for sparse tensors\nclass cube_query(object):\n \"\"\"\n Cube query for ME.SparseTensor\n ref : ME.SparseTensor, coord dim = [B * nr, 3]\n reference sparse tensor\n query: ME.SparseTensor, coord dim = [B * nq, 3]\n query sparse tensor, whose neighbors we are look for\n return:\n result: torch.Tensor [B * nq, k, 4], 4 is B,x,y,z\n mask: torch.Tensor [B * nq, k], zero means less than k neighbors\n\n __init__():\n input:\n r: cube query radius\n k: k neighbors\n \"\"\"\n def __init__(self, r, k, knn=False):\n self.r = r\n self.k = k\n if knn:\n self.use_knn = True\n self.knn = KNN(k=k, transpose_mode=True)\n else:\n self.use_knn = False\n\n def get_neighbor(self, ref : ME.SparseTensor, query : ME.SparseTensor):\n B_nq, _ = query.C.shape\n\n coord = query.C # (N, 4)\n batch_info = coord[:,0]\n coord, mask, idx_ = separate_batch(coord) # (b, n, 3)\n b, n, _ = coord.shape\n\n if self.use_knn:\n _, idx = self.knn(coord.contiguous(), coord)\n grouped_coord = grouping_operation_cuda(coord.float().transpose(1,2).contiguous(), idx.int())\n result_padded = grouped_coord.permute([0,2,3,1])\n else:\n query_and_group_cuda = QueryAndGroup(radius=self.r, nsample=self.k, use_xyz=False)\n coord = coord.float()\n\n idxs = query_and_group_cuda(\n xyz=coord,\n new_xyz=coord,\n features=coord.transpose(1,2).contiguous(),\n ) # idx: [bs, xyz, npoint, nsample]\n idxs = idxs.permute([0,2,3,1]) # idx: [bs, npoint, nsample, xyz]\n result_padded = idxs\n\n # unpad result (b, n, k, 3) -> (B_nq, k, 4) by applying mask\n result = torch.zeros([B_nq, self.k, 4], dtype=torch.int32, device=query.device)\n result[:,:,1:] = torch.gather(\n result_padded.reshape(-1, self.k, 3),\n 0,\n idx_.reshape(-1, 1, 1).repeat(1, self.k, 3)\n )\n result[:,:,0] = batch_info.unsqueeze(-1).repeat(1, self.k)\n\n return result, mask, idx_\n\nif __name__ == \"__main__\":\n import torch\n import MinkowskiEngine as ME\n feature = torch.tensor([[0.2, 0.3], [0.4, 0.5]])\n coord =torch.tensor([[0.6, 0.8, 0.3], [0.4, 0.3, 0.5]])\n x = ME.SparseTensor(\n features = feature,\n coordinates = ME.utils.batched_coordinates([coord / 0.1])\n )\n cq = cube_query(1, 2)\n result, mask = cq.get_neighbor(x, x)\n" ]
[ [ "torch.ones", "numpy.linspace", "numpy.min", "numpy.clip", "numpy.empty_like", "torch.cat", "torch.randperm", "scipy.interpolate.RegularGridInterpolator", "torch.from_numpy", "numpy.stack", "numpy.ones", "numpy.max", "numpy.zeros_like", "numpy.remainder", "numpy.select", "numpy.random.randn", "numpy.random.rand", "scipy.ndimage.filters.convolve" ], [ "torch.nn.BatchNorm1d", "torch.max", "torch.cat", "torch.zeros", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.sum", "torch.cuda.empty_cache", "torch.arange", "torch.tensor", "torch.nn.Linear", "torch.matmul", "torch.unique", "torch.nn.BatchNorm2d", "torch.nn.Conv1d", "torch.where", "torch.nn.ReLU", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "1.6", "0.15", "1.4", "1.3", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "0.16" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kasim95/pandas
[ "3526a7104c78ed498a84e778a60314df7daf439e" ]
[ "pandas/tests/series/indexing/test_setitem.py" ]
[ "from datetime import date\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DatetimeIndex,\n Index,\n MultiIndex,\n NaT,\n Series,\n Timestamp,\n date_range,\n period_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.indexing import IndexingError\n\nfrom pandas.tseries.offsets import BDay\n\n\nclass TestSetitemDT64Values:\n def test_setitem_none_nan(self):\n series = Series(date_range(\"1/1/2000\", periods=10))\n series[3] = None\n assert series[3] is NaT\n\n series[3:5] = None\n assert series[4] is NaT\n\n series[5] = np.nan\n assert series[5] is NaT\n\n series[5:7] = np.nan\n assert series[6] is NaT\n\n def test_setitem_multiindex_empty_slice(self):\n # https://github.com/pandas-dev/pandas/issues/35878\n idx = MultiIndex.from_tuples([(\"a\", 1), (\"b\", 2)])\n result = Series([1, 2], index=idx)\n expected = result.copy()\n result.loc[[]] = 0\n tm.assert_series_equal(result, expected)\n\n def test_setitem_with_string_index(self):\n # GH#23451\n ser = Series([1, 2, 3], index=[\"Date\", \"b\", \"other\"])\n ser[\"Date\"] = date.today()\n assert ser.Date == date.today()\n assert ser[\"Date\"] == date.today()\n\n def test_setitem_with_different_tz_casts_to_object(self):\n # GH#24024\n ser = Series(date_range(\"2000\", periods=2, tz=\"US/Central\"))\n ser[0] = Timestamp(\"2000\", tz=\"US/Eastern\")\n expected = Series(\n [\n Timestamp(\"2000-01-01 00:00:00-05:00\", tz=\"US/Eastern\"),\n Timestamp(\"2000-01-02 00:00:00-06:00\", tz=\"US/Central\"),\n ],\n dtype=object,\n )\n tm.assert_series_equal(ser, expected)\n\n def test_setitem_tuple_with_datetimetz_values(self):\n # GH#20441\n arr = date_range(\"2017\", periods=4, tz=\"US/Eastern\")\n index = [(0, 1), (0, 2), (0, 3), (0, 4)]\n result = Series(arr, index=index)\n expected = result.copy()\n result[(0, 1)] = np.nan\n expected.iloc[0] = np.nan\n tm.assert_series_equal(result, expected)\n\n\nclass TestSetitemPeriodDtype:\n @pytest.mark.parametrize(\"na_val\", [None, np.nan])\n def test_setitem_na_period_dtype_casts_to_nat(self, na_val):\n ser = Series(period_range(\"2000-01-01\", periods=10, freq=\"D\"))\n\n ser[3] = na_val\n assert ser[3] is NaT\n\n ser[3:5] = na_val\n assert ser[4] is NaT\n\n\nclass TestSetitemScalarIndexer:\n def test_setitem_negative_out_of_bounds(self):\n ser = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))\n\n msg = \"index -11 is out of bounds for axis 0 with size 10\"\n with pytest.raises(IndexError, match=msg):\n ser[-11] = \"foo\"\n\n\nclass TestSetitemSlices:\n def test_setitem_slice_float_raises(self, datetime_series):\n msg = (\n \"cannot do slice indexing on DatetimeIndex with these indexers \"\n r\"\\[{key}\\] of type float\"\n )\n with pytest.raises(TypeError, match=msg.format(key=r\"4\\.0\")):\n datetime_series[4.0:10.0] = 0\n\n with pytest.raises(TypeError, match=msg.format(key=r\"4\\.5\")):\n datetime_series[4.5:10.0] = 0\n\n\nclass TestSetitemBooleanMask:\n def test_setitem_boolean(self, string_series):\n mask = string_series > string_series.median()\n\n # similar indexed series\n result = string_series.copy()\n result[mask] = string_series * 2\n expected = string_series * 2\n tm.assert_series_equal(result[mask], expected[mask])\n\n # needs alignment\n result = string_series.copy()\n result[mask] = (string_series * 2)[0:5]\n expected = (string_series * 2)[0:5].reindex_like(string_series)\n expected[-mask] = string_series[mask]\n tm.assert_series_equal(result[mask], expected[mask])\n\n def test_setitem_boolean_corner(self, datetime_series):\n ts = datetime_series\n mask_shifted = ts.shift(1, freq=BDay()) > ts.median()\n\n msg = (\n r\"Unalignable boolean Series provided as indexer \\(index of \"\n r\"the boolean Series and of the indexed object do not match\"\n )\n with pytest.raises(IndexingError, match=msg):\n ts[mask_shifted] = 1\n\n with pytest.raises(IndexingError, match=msg):\n ts.loc[mask_shifted] = 1\n\n def test_setitem_boolean_different_order(self, string_series):\n ordered = string_series.sort_values()\n\n copy = string_series.copy()\n copy[ordered > 0] = 0\n\n expected = string_series.copy()\n expected[expected > 0] = 0\n\n tm.assert_series_equal(copy, expected)\n\n @pytest.mark.parametrize(\"func\", [list, np.array, Series])\n def test_setitem_boolean_python_list(self, func):\n # GH19406\n ser = Series([None, \"b\", None])\n mask = func([True, False, True])\n ser[mask] = [\"a\", \"c\"]\n expected = Series([\"a\", \"b\", \"c\"])\n tm.assert_series_equal(ser, expected)\n\n @pytest.mark.parametrize(\"value\", [None, NaT, np.nan])\n def test_setitem_boolean_td64_values_cast_na(self, value):\n # GH#18586\n series = Series([0, 1, 2], dtype=\"timedelta64[ns]\")\n mask = series == series[0]\n series[mask] = value\n expected = Series([NaT, 1, 2], dtype=\"timedelta64[ns]\")\n tm.assert_series_equal(series, expected)\n\n def test_setitem_boolean_nullable_int_types(self, any_numeric_dtype):\n # GH: 26468\n ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)\n ser[ser > 6] = Series(range(4), dtype=any_numeric_dtype)\n expected = Series([5, 6, 2, 3], dtype=any_numeric_dtype)\n tm.assert_series_equal(ser, expected)\n\n ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)\n ser.loc[ser > 6] = Series(range(4), dtype=any_numeric_dtype)\n tm.assert_series_equal(ser, expected)\n\n ser = Series([5, 6, 7, 8], dtype=any_numeric_dtype)\n loc_ser = Series(range(4), dtype=any_numeric_dtype)\n ser.loc[ser > 6] = loc_ser.loc[loc_ser > 1]\n tm.assert_series_equal(ser, expected)\n\n\nclass TestSetitemViewCopySemantics:\n def test_setitem_invalidates_datetime_index_freq(self):\n # GH#24096 altering a datetime64tz Series inplace invalidates the\n # `freq` attribute on the underlying DatetimeIndex\n\n dti = date_range(\"20130101\", periods=3, tz=\"US/Eastern\")\n ts = dti[1]\n ser = Series(dti)\n assert ser._values is not dti\n assert ser._values._data.base is not dti._data._data.base\n assert dti.freq == \"D\"\n ser.iloc[1] = NaT\n assert ser._values.freq is None\n\n # check that the DatetimeIndex was not altered in place\n assert ser._values is not dti\n assert ser._values._data.base is not dti._data._data.base\n assert dti[1] == ts\n assert dti.freq == \"D\"\n\n def test_dt64tz_setitem_does_not_mutate_dti(self):\n # GH#21907, GH#24096\n dti = date_range(\"2016-01-01\", periods=10, tz=\"US/Pacific\")\n ts = dti[0]\n ser = Series(dti)\n assert ser._values is not dti\n assert ser._values._data.base is not dti._data._data.base\n assert ser._mgr.blocks[0].values is not dti\n assert ser._mgr.blocks[0].values._data.base is not dti._data._data.base\n\n ser[::3] = NaT\n assert ser[0] is NaT\n assert dti[0] == ts\n\n\nclass TestSetitemCallable:\n def test_setitem_callable_key(self):\n # GH#12533\n ser = Series([1, 2, 3, 4], index=list(\"ABCD\"))\n ser[lambda x: \"A\"] = -1\n\n expected = Series([-1, 2, 3, 4], index=list(\"ABCD\"))\n tm.assert_series_equal(ser, expected)\n\n def test_setitem_callable_other(self):\n # GH#13299\n inc = lambda x: x + 1\n\n ser = Series([1, 2, -1, 4])\n ser[ser < 0] = inc\n\n expected = Series([1, 2, inc, 4])\n tm.assert_series_equal(ser, expected)\n\n\[email protected](\n \"obj,expected,key\",\n [\n (\n # these induce dtype changes\n Series([2, 3, 4, 5, 6, 7, 8, 9, 10]),\n Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan]),\n slice(None, None, 2),\n ),\n (\n # gets coerced to float, right?\n Series([True, True, False, False]),\n Series([np.nan, 1, np.nan, 0]),\n slice(None, None, 2),\n ),\n (\n # these induce dtype changes\n Series(np.arange(10)),\n Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9]),\n slice(None, 5),\n ),\n (\n # changes dtype GH#4463\n Series([1, 2, 3]),\n Series([np.nan, 2, 3]),\n 0,\n ),\n (\n # changes dtype GH#4463\n Series([False]),\n Series([np.nan]),\n 0,\n ),\n (\n # changes dtype GH#4463\n Series([False, True]),\n Series([np.nan, 1.0]),\n 0,\n ),\n ],\n)\nclass TestSetitemCastingEquivalents:\n \"\"\"\n Check each of several methods that _should_ be equivalent to `obj[key] = np.nan`\n\n We assume that\n - obj.index is the default Index(range(len(obj)))\n - the setitem does not expand the obj\n \"\"\"\n\n def test_int_key(self, obj, key, expected, indexer_sli):\n if not isinstance(key, int):\n return\n\n obj = obj.copy()\n indexer_sli(obj)[key] = np.nan\n tm.assert_series_equal(obj, expected)\n\n def test_slice_key(self, obj, key, expected, indexer_si):\n # Note: no .loc because that handles slice edges differently\n obj = obj.copy()\n indexer_si(obj)[key] = np.nan\n tm.assert_series_equal(obj, expected)\n\n def test_intlist_key(self, obj, key, expected, indexer_sli):\n ilkey = list(range(len(obj)))[key]\n\n obj = obj.copy()\n indexer_sli(obj)[ilkey] = np.nan\n tm.assert_series_equal(obj, expected)\n\n def test_mask_key(self, obj, key, expected, indexer_sli):\n # setitem with boolean mask\n mask = np.zeros(obj.shape, dtype=bool)\n mask[key] = True\n\n obj = obj.copy()\n indexer_sli(obj)[mask] = np.nan\n tm.assert_series_equal(obj, expected)\n\n def test_series_where(self, obj, key, expected):\n mask = np.zeros(obj.shape, dtype=bool)\n mask[key] = True\n\n obj = obj.copy()\n res = obj.where(~mask, np.nan)\n tm.assert_series_equal(res, expected)\n\n def test_index_where(self, obj, key, expected, request):\n mask = np.zeros(obj.shape, dtype=bool)\n mask[key] = True\n\n if obj.dtype == bool and not mask.all():\n # When mask is all True, casting behavior does not apply\n msg = \"Index/Series casting behavior inconsistent GH#38692\"\n mark = pytest.mark.xfail(reason=msg)\n request.node.add_marker(mark)\n\n res = Index(obj).where(~mask, np.nan)\n tm.assert_index_equal(res, Index(expected))\n\n @pytest.mark.xfail(reason=\"Index/Series casting behavior inconsistent GH#38692\")\n def test_index_putmask(self, obj, key, expected):\n mask = np.zeros(obj.shape, dtype=bool)\n mask[key] = True\n\n res = Index(obj).putmask(mask, np.nan)\n tm.assert_index_equal(res, Index(expected))\n\n\nclass TestSetitemWithExpansion:\n def test_setitem_empty_series(self):\n # GH#10193\n key = Timestamp(\"2012-01-01\")\n series = Series(dtype=object)\n series[key] = 47\n expected = Series(47, [key])\n tm.assert_series_equal(series, expected)\n\n def test_setitem_empty_series_datetimeindex_preserves_freq(self):\n # GH#33573 our index should retain its freq\n series = Series([], DatetimeIndex([], freq=\"D\"), dtype=object)\n key = Timestamp(\"2012-01-01\")\n series[key] = 47\n expected = Series(47, DatetimeIndex([key], freq=\"D\"))\n tm.assert_series_equal(series, expected)\n assert series.index.freq == expected.index.freq\n\n\ndef test_setitem_scalar_into_readonly_backing_data():\n # GH#14359: test that you cannot mutate a read only buffer\n\n array = np.zeros(5)\n array.flags.writeable = False # make the array immutable\n series = Series(array)\n\n for n in range(len(series)):\n msg = \"assignment destination is read-only\"\n with pytest.raises(ValueError, match=msg):\n series[n] = 1\n\n assert array[n] == 0\n\n\ndef test_setitem_slice_into_readonly_backing_data():\n # GH#14359: test that you cannot mutate a read only buffer\n\n array = np.zeros(5)\n array.flags.writeable = False # make the array immutable\n series = Series(array)\n\n msg = \"assignment destination is read-only\"\n with pytest.raises(ValueError, match=msg):\n series[1:3] = 1\n\n assert not array.any()\n" ]
[ [ "pandas.Series", "pandas.period_range", "numpy.arange", "pandas.MultiIndex.from_tuples", "pandas._testing.rands_array", "pandas.Index", "pandas.DatetimeIndex", "pandas.date_range", "pandas._testing.assert_series_equal", "pandas.Timestamp", "numpy.zeros", "pandas.tseries.offsets.BDay" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.19", "0.24", "0.20", "1.0", "0.25" ], "scipy": [], "tensorflow": [] } ]
wtyuan96/Real-time-self-adaptive-deep-stereo
[ "e630bc610c134d348c8c15e660533b2f464bba5f" ]
[ "Nets/Stereo_net.py" ]
[ "import tensorflow as tf\nimport abc\nfrom collections import OrderedDict\n\n\nclass StereoNet(object):\n __metaclass__ = abc.ABCMeta\n \"\"\"\n Meta parent class for all the convnets\n \"\"\"\n #=======================Static Class Fields=============\n _valid_args = [\n (\"split_layer\", \"name of the layer where the network will be splitted\"),\n (\"sequence\", \"flag to use network on a video sequence instead of on single images\"),\n (\"train_portion\", \"one among 'BEGIN' or 'END' specify which portion of the network will be trained, respectivelly before and after split\"),\n (\"is_training\", \"boolean or placeholder to specify if the network is in train or inference mode\")\n ]\n _netName=\"stereoNet\"\n #=====================Static Class Methods==============\n\n @classmethod\n def getPossibleArsg(cls):\n return cls._valid_args\n\n #==================PRIVATE METHODS======================\n def __init__(self, **kwargs):\n self._layers = OrderedDict()\n self._disparities = []\n self._placeholders = []\n self._placeholderable = []\n self._trainable_variables = OrderedDict() \n self._layer_to_var = {}\n self._after_split = False\n print('=' * 50)\n print('Starting Creation of {}'.format(self._netName))\n print('=' * 50)\n\n args = self._validate_args(kwargs)\n print('Args Validated, setting up graph')\n\n self._preprocess_inputs(args)\n print('Meta op to preprocess data created')\n\n self._build_network(args)\n print('Network ready')\n print('=' * 50)\n\n def _get_placeholder_name(self, name):\n \"\"\"\n convert a layer name to its placehodler version and return it\n \"\"\"\n return name + '_placeholder'\n\n def _add_to_layers(self, name, op):\n \"\"\"\n Add the layer to the network ones and check if name is among the layer where the network should split, if so create a placeholder and return it, otherways return the real layer. Add teh variables to the trainable colelction \n Args:\n name: name of the layer that need to be addded to the network collection\n op: tensorflow op \n \"\"\"\n self._layers[name] = op\n\n # extract variables\n scope = '/'.join(op.name.split('/')[0:-1])\n variables = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope) \n self._layer_to_var[name] = variables\n\n if not self._after_split:\n # append layer name among those that can be turned into placeholder\n self._placeholderable.append(name)\n if self._after_split != self._train_beginning: # XOR\n # add variables in scope to the one that needs to be trained\n for v in variables:\n self._trainable_variables[v] = True\n\n if name in self._split_layers_list:\n # enable flag to mark the first layer after split\n self._after_split = True\n\n def _get_layer_as_input(self, name):\n # Check if a placeholder for this layer already exist\n if self._get_placeholder_name(name) in self._layers:\n return self._layers[self._get_placeholder_name(name)]\n # check if layer need to be transformed into a placeholder\n elif self._after_split and (not self._sequence) and name in self._placeholderable:\n real_op = self._layers[name]\n placeholder_op = tf.placeholder(\n tf.float32, shape=real_op.get_shape())\n self._layers[self._get_placeholder_name(name)] = placeholder_op\n self._placeholders.append((real_op, placeholder_op))\n return self._layers[self._get_placeholder_name(name)]\n # check if real layer exist\n elif name in self._layers:\n return self._layers[name]\n else:\n raise Exception('Trying to fetch an unknown layer!')\n\n def __str__(self):\n \"\"\"to string method\"\"\"\n ss = \"\"\n for k, l in self._layers.items():\n if l in self._disparities:\n ss += \"Prediction Layer {}: {}\\n\".format(k, str(l.shape))\n else:\n ss += \"Layer {}: {}\\n\".format(k, str(l.shape))\n return ss\n\n def __repr__(self):\n \"\"\"to string method\"\"\"\n return self.__str__()\n\n def __getitem__(self, key):\n \"\"\"\n Returns a layer by name\n \"\"\"\n return self._layers[key]\n\n #========================ABSTRACT METHODs============================\n @abc.abstractmethod\n def _preprocess_inputs(self, args):\n \"\"\"\n Abstract method to create metaop that preprocess data before feeding them in the network\n \"\"\"\n pass\n\n @abc.abstractmethod\n def _build_network(self, args):\n \"\"\"\n Should build the elaboration graph\n \"\"\"\n pass\n\n @abc.abstractmethod\n def _validate_args(self, args):\n \"\"\"\n Should validate the argument and add default values\n \"\"\"\n portion_options = ['BEGIN', 'END']\n # Check common args\n if 'split_layers' not in args:\n print(\n 'WARNING: no split points selected, the network will flow without interruption')\n args['split_layers'] = [None]\n if 'train_portion' not in args:\n print('WARNING: train_portion not specified, using default END')\n args['train_portion'] = 'END' if args['split_layers'] != [\n None] else 'BEGIN'\n elif args['train_portion'] not in portion_options:\n raise Exception('Invalid portion options {}'.format(\n args['train_portion']))\n if 'sequence' not in args:\n print('WARNING: sequence flag not setted, configuring the network for single image adaptation')\n args['sequence'] = False\n if 'is_training' not in args:\n print('WARNING: flag for trainign not setted, using default False')\n args['is_training']=False\n\n # save args value\n self._split_layers_list = args['split_layers']\n self._train_beginning = (args['train_portion'] == 'BEGIN')\n self._sequence = args['sequence']\n self._isTraining=False\n\n #==============================PUBLIC METHODS==================================\n def get_placeholders(self):\n \"\"\"\n Get all the placeholder defined internally in the network\n Returns:\n list of couples of layers that became placeholder, each couple is (real_layer,placeholder)\n \"\"\"\n return self._placeholders\n\n def get_placeholder(self, name):\n \"\"\"\n Return the placeholder corresponding to the layer named name\n Args:\n name of the layer where there should be a placeholder\n Returns:\n placeholder for the layer\n \"\"\"\n placeholder_name = self._get_placeholder_name(name)\n if placeholder_name not in self._layers:\n raise Exception(\n 'Unable to find placeholder for layer {}'.format(placeholder_name))\n else:\n return self._layers[placeholder_name]\n\n def get_all_layers(self):\n \"\"\"\n Returns all network layers\n \"\"\"\n return self._layers\n \n def get_layers_names(self):\n \"\"\"\n Returns all layers name\n \"\"\"\n return self._layers.keys()\n\n def get_disparities(self):\n \"\"\"\n Return all the disparity predicted with increasing resolution\n \"\"\"\n return self._disparities\n\n def get_trainable_variables(self):\n \"\"\"\n Returns the list of trainable variables\n \"\"\"\n return list(self._trainable_variables.keys())\n\n def get_variables(self, layer_name):\n \"\"\"\n Returns the colelction of variables associated to layer_name\n Args:\n layer_name: name of the layer for which we want to access variables\n \"\"\"\n if layer_name in self._layers and layer_name not in self._layer_to_var:\n return []\n else:\n return self._layer_to_var[layer_name]\n" ]
[ [ "tensorflow.get_collection" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
annusgit/forestcoverUnet
[ "8ba4eafc6e5d637d3b08fa20d029e25173f96074", "8ba4eafc6e5d637d3b08fa20d029e25173f96074" ]
[ "Statistical_Classifiers/inference_statistical_models.py", "utilities/utils/random_code.py" ]
[ "\"\"\"\n Given the path to a single test image, this function generates its corresponding segmentation map\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nimport os\nimport gdal\nimport time\nimport torch\nimport shutil\nimport random\nimport argparse\nimport numpy as np\nimport pickle as cPickle\nfrom torch.utils.data import Dataset, DataLoader\nimport matplotlib.image as matimg\nnp.random.seed(int(time.time()))\nrandom.seed(int(time.time()))\n\nrasterized_shapefiles_path = \"/home/azulfiqar_bee15seecs/District_Shapefiles_as_Clipping_bands/\"\nFOREST_LABEL, NON_FOREST_LABEL, NULL_LABEL = 2, 1, 0\n\n\ndef mask_landsat8_image_using_rasterized_shapefile(district, this_landsat8_bands_list):\n this_shapefile_path = os.path.join(rasterized_shapefiles_path, \"{}_shapefile.tif\".format(district))\n ds = gdal.Open(this_shapefile_path)\n assert ds.RasterCount == 1\n shapefile_mask = np.array(ds.GetRasterBand(1).ReadAsArray(), dtype=np.uint8)\n clipped_full_spectrum = list()\n for idx, this_band in enumerate(this_landsat8_bands_list):\n print(\"{}: Band-{} Size: {}\".format(district, idx, this_band.shape))\n clipped_full_spectrum.append(np.multiply(this_band, shapefile_mask))\n x_prev, y_prev = clipped_full_spectrum[0].shape\n x_fixed, y_fixed = int(128 * np.ceil(x_prev / 128)), int(128 * np.ceil(y_prev / 128))\n diff_x, diff_y = x_fixed - x_prev, y_fixed - y_prev\n diff_x_before, diff_y_before = diff_x//2, diff_y//2\n clipped_full_spectrum_resized = [np.pad(x, [(diff_x_before, diff_x-diff_x_before), (diff_y_before, diff_y-diff_y_before)], mode='constant')\n for x in clipped_full_spectrum]\n clipped_full_spectrum_stacked_image = np.dstack(clipped_full_spectrum_resized)\n print(\"{}: Generated Image Size: {}\".format(district, clipped_full_spectrum_stacked_image.shape))\n return clipped_full_spectrum_stacked_image\n\n\ndef toTensor(**kwargs):\n image = kwargs['image']\n 'will convert image and label from numpy to torch tensor'\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n # image = image.transpose((2, 0, 1))\n return torch.from_numpy(np.nan_to_num(image)).float()\n\n\ndef get_inference_loader(district, image_path, model_input_size=128, num_classes=3, one_hot=False, batch_size=64, num_workers=4):\n\n # This function is faster because we have already saved our data as subset pickle files\n print('inside dataloading code...')\n class dataset(Dataset):\n def __init__(self, image_path, bands, stride=model_input_size, transformation=None):\n super(dataset, self).__init__()\n self.model_input_size = model_input_size\n self.image_path = image_path\n self.all_images = []\n self.total_images = 0\n self.stride = stride\n self.one_hot = one_hot\n self.num_classes = num_classes\n self.transformation = transformation\n self.temp_dir = 'temp_numpy_saves'\n if os.path.exists(self.temp_dir):\n shutil.rmtree(self.temp_dir)\n os.mkdir(self.temp_dir)\n print('LOG: Generating data map now...')\n image_ds = gdal.Open(image_path, gdal.GA_ReadOnly)\n all_raster_bands = [image_ds.GetRasterBand(x).ReadAsArray() for x in bands]\n # mask the image and adjust its size at this point\n test_image = mask_landsat8_image_using_rasterized_shapefile(district=district, this_landsat8_bands_list=all_raster_bands)\n temp_image_path = os.path.join(self.temp_dir, 'temp_image.npy')\n np.save(temp_image_path, test_image)\n self.temp_test_image = np.load(temp_image_path, mmap_mode='r')\n row_limit = self.temp_test_image.shape[0] - model_input_size\n col_limit = self.temp_test_image.shape[1] - model_input_size\n test_image, image_ds, all_raster_bands = [None] * 3 # release memory\n for i in range(0, row_limit+1, self.stride):\n for j in range(0, col_limit+1, self.stride):\n self.all_images.append((i, j))\n self.total_images += 1\n self.shape = [i+self.stride, j+self.stride]\n pass\n\n def __getitem__(self, k):\n (this_row, this_col) = self.all_images[k]\n this_example_subset = self.temp_test_image[this_row:this_row + self.model_input_size, this_col:this_col + self.model_input_size, :]\n this_example_subset = toTensor(image=this_example_subset)\n return {'coordinates': np.asarray([this_row, this_row + self.model_input_size, this_col, this_col + self.model_input_size]),\n 'input': this_example_subset}\n\n def __len__(self):\n return self.total_images\n\n def get_image_size(self):\n return self.shape\n\n def clear_mem(self):\n shutil.rmtree(self.temp_dir)\n print('Log: Temporary memory cleared')\n\n ######################################################################################\n transformation = None\n ######################################################################################\n # create dataset class instances\n inference_data = dataset(image_path=image_path, bands=[x for x in range(1, 12)], transformation=transformation)\n print('LOG: inference_data ->', len(inference_data))\n inference_loader = DataLoader(dataset=inference_data, batch_size=batch_size, shuffle=False, num_workers=num_workers)\n return inference_loader\n\n\[email protected]_grad()\ndef run_inference(args):\n trained_classifier = None\n with open(args.model_path, 'rb') as model_file:\n trained_classifier = cPickle.load(model_file)\n print('Log: Loaded pretrained from {}'.format(args.model_path))\n all_districts = [\"abbottabad\", \"battagram\", \"buner\", \"chitral\", \"hangu\", \"haripur\", \"karak\", \"kohat\", \"kohistan\", \"lower_dir\", \"malakand\", \"mansehra\",\n \"nowshehra\", \"shangla\", \"swat\", \"tor_ghar\", \"upper_dir\"]\n years = [2014, 2016, 2017, 2018, 2019, 2020]\n # change this to do this for all the images in that directory\n for district in all_districts:\n for year in years:\n print(\"(LOG): On District: {} @ Year: {}\".format(district, year))\n test_image_path = os.path.join(args.dir_path, 'landsat8_4326_30_{}_region_{}.tif'.format(year, district))\n inference_loader = get_inference_loader(district=district, image_path=test_image_path, model_input_size=128, num_classes=3, one_hot=True,\n batch_size=args.bs, num_workers=4)\n # we need to fill our new generated test image\n generated_map = np.empty(shape=inference_loader.dataset.get_image_size())\n for idx, data in enumerate(inference_loader):\n coordinates, test_x = data['coordinates'].tolist(), data['input']\n # print(test_x.shape)\n pred_numpy = trained_classifier.predict(test_x.reshape(-1, 11))\n # print(pred_numpy.shape)\n pred_numpy = pred_numpy.reshape((-1, 128, 128))\n # print(pred_numpy.shape)\n if idx % 5 == 0:\n print('LOG: on {} of {}'.format(idx, len(inference_loader)))\n for k in range(test_x.shape[0]):\n x, x_, y, y_ = coordinates[k]\n generated_map[x:x_, y:y_] = pred_numpy[k,:,:]\n # save generated map as png image, not numpy array\n forest_map_rband = np.zeros_like(generated_map)\n forest_map_gband = np.zeros_like(generated_map)\n forest_map_bband = np.zeros_like(generated_map)\n forest_map_rband[generated_map == NON_FOREST_LABEL] = 255\n forest_map_gband[generated_map == FOREST_LABEL] = 255\n forest_map_for_visualization = np.dstack([forest_map_rband, forest_map_gband, forest_map_bband]).astype(np.uint8)\n save_this_map_path = os.path.join(args.dest, '{}_{}.png'.format(district, year))\n matimg.imsave(save_this_map_path, forest_map_for_visualization)\n print('Saved: {} @ {}'.format(save_this_map_path, forest_map_for_visualization.shape))\n # save_path = os.path.join(args.dest, 'generated_map_{}_{}.npy'.format(district, year))\n # np.save(save_path, generated_map)\n #########################################################################################3\n inference_loader.dataset.clear_mem()\n pass\n pass\n pass\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--m', '--model', dest='model_path', type=str)\n parser.add_argument('--d', dest='dir_path', type=str)\n parser.add_argument('--s', dest='dest', type=str)\n parser.add_argument('--b', dest='bs', type=int)\n parser.add_argument('--cuda', dest='cuda', type=int)\n parser.add_argument('--device', dest='device', type=int)\n args = parser.parse_args()\n run_inference(args)\n\n\nif __name__ == '__main__':\n main()\n", "\n\n\"\"\"\n Get the data in npy files in ~/Desktop/rit18_data and show as png files\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport cv2\nimport numpy as np\nfrom PIL import Image\nfrom scipy.misc import imsave\nimport matplotlib.pyplot as plt\n\n\ndef resize(arr):\n return np.resize(arr, new_shape=(800, 800)).astype(np.uint16)\n\ndef rescale(arr):\n return (255 / 65536 * arr).astype(np.uint16)\n\ndef expand(arr):\n return np.expand_dims(arr, axis=2)\n\n\ndef process(np_arr, name):\n print('================================================')\n print('shape of array ==> ', np_arr.shape)\n c, w,h = np_arr.shape\n arr = np_arr.reshape((w, h, c))\n print('reshaped array ==> ', arr.shape)\n r, g, b = arr[:, :, 4], arr[:, :, 5], arr[:, :, 6]\n # r, g, b = map(rescale, [r,g,b])\n r, g, b = map(resize, [r,g,b])\n r, g, b = map(expand, [r,g,b])\n r, g, b = map(cv2.equalizeHist, [r, g, b])\n # print(map(np.shape, (r,g,b)))\n rgb = np.asarray(cv2.merge((r,g,b)))\n print('rgb shape ==> ', rgb.shape, rgb.dtype)\n cv2.imwrite(name, rgb)\n print('{} saved!'.format(name))\n plt.imshow(rgb)\n plt.axis('off')\n plt.show()\n print(np.mean(np.mean(rgb)), np.mean(np.mean(rgb)))\n print('================================================')\n pass\n\n\ndef read_():\n # set new wd\n os.chdir('/home/annus/Desktop/rit18_data/')\n # read the data files\n # process(np.load('train_data.npy'))\n process(np.load('val_data.npy'), name='train_rgb.png')\n # process(np.load('test_val.npy'))\n pass\n\n\ndef main():\n read_()\n pass\n\n\nif __name__ == '__main__':\n main()\n\n\n\n" ]
[ [ "numpy.pad", "numpy.multiply", "matplotlib.image.imsave", "numpy.asarray", "torch.utils.data.DataLoader", "numpy.nan_to_num", "numpy.dstack", "numpy.save", "numpy.ceil", "torch.no_grad", "numpy.zeros_like", "numpy.load" ], [ "matplotlib.pyplot.imshow", "numpy.expand_dims", "numpy.resize", "numpy.mean", "matplotlib.pyplot.axis", "numpy.load", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SpioradObrach/python-control
[ "a4b4c43e51f0fc2cbf389336a90230a6a741c0dc", "a4b4c43e51f0fc2cbf389336a90230a6a741c0dc" ]
[ "control/tests/modelsimp_test.py", "control/freqplot.py" ]
[ "#!/usr/bin/env python\n#\n# modelsimp_test.py - test model reduction functions\n# RMM, 30 Mar 2011 (based on TestModelSimp from v0.4a)\n\nimport unittest\nimport numpy as np\nfrom control.modelsimp import *\nfrom control.matlab import *\nfrom control.exception import slycot_check\n\nclass TestModelsimp(unittest.TestCase):\n @unittest.skipIf(not slycot_check(), \"slycot not installed\")\n def testHSVD(self):\n A = np.matrix(\"1. -2.; 3. -4.\")\n B = np.matrix(\"5.; 7.\")\n C = np.matrix(\"6. 8.\")\n D = np.matrix(\"9.\")\n sys = ss(A,B,C,D)\n hsv = hsvd(sys)\n hsvtrue = [24.42686, 0.5731395] # from MATLAB\n np.testing.assert_array_almost_equal(hsv, hsvtrue)\n\n def testMarkov(self):\n U = np.matrix(\"1.; 1.; 1.; 1.; 1.\")\n Y = U\n M = 3\n H = markov(Y,U,M)\n Htrue = np.matrix(\"1.; 0.; 0.\")\n np.testing.assert_array_almost_equal( H, Htrue )\n\n def testModredMatchDC(self):\n #balanced realization computed in matlab for the transfer function:\n # num = [1 11 45 32], den = [1 15 60 200 60]\n A = np.matrix('-1.958, -1.194, 1.824, -1.464; \\\n -1.194, -0.8344, 2.563, -1.351; \\\n -1.824, -2.563, -1.124, 2.704; \\\n -1.464, -1.351, -2.704, -11.08')\n B = np.matrix('-0.9057; -0.4068; -0.3263; -0.3474')\n C = np.matrix('-0.9057, -0.4068, 0.3263, -0.3474')\n D = np.matrix('0.')\n sys = ss(A,B,C,D)\n rsys = modred(sys,[2, 3],'matchdc')\n Artrue = np.matrix('-4.431, -4.552; -4.552, -5.361')\n Brtrue = np.matrix('-1.362; -1.031')\n Crtrue = np.matrix('-1.362, -1.031')\n Drtrue = np.matrix('-0.08384')\n np.testing.assert_array_almost_equal(rsys.A, Artrue,decimal=3)\n np.testing.assert_array_almost_equal(rsys.B, Brtrue,decimal=3)\n np.testing.assert_array_almost_equal(rsys.C, Crtrue,decimal=3)\n np.testing.assert_array_almost_equal(rsys.D, Drtrue,decimal=2)\n\n def testModredUnstable(self):\n # Check if an error is thrown when an unstable system is given\n A = np.matrix('4.5418, 3.3999, 5.0342, 4.3808; \\\n 0.3890, 0.3599, 0.4195, 0.1760; \\\n -4.2117, -3.2395, -4.6760, -4.2180; \\\n 0.0052, 0.0429, 0.0155, 0.2743')\n B = np.matrix('1.0, 1.0; 2.0, 2.0; 3.0, 3.0; 4.0, 4.0')\n C = np.matrix('1.0, 2.0, 3.0, 4.0; 1.0, 2.0, 3.0, 4.0')\n D = np.matrix('0.0, 0.0; 0.0, 0.0')\n sys = ss(A,B,C,D)\n np.testing.assert_raises(ValueError, modred, sys, [2, 3])\n\n def testModredTruncate(self):\n #balanced realization computed in matlab for the transfer function:\n # num = [1 11 45 32], den = [1 15 60 200 60]\n A = np.matrix('-1.958, -1.194, 1.824, -1.464; \\\n -1.194, -0.8344, 2.563, -1.351; \\\n -1.824, -2.563, -1.124, 2.704; \\\n -1.464, -1.351, -2.704, -11.08')\n B = np.matrix('-0.9057; -0.4068; -0.3263; -0.3474')\n C = np.matrix('-0.9057, -0.4068, 0.3263, -0.3474')\n D = np.matrix('0.')\n sys = ss(A,B,C,D)\n rsys = modred(sys,[2, 3],'truncate')\n Artrue = np.matrix('-1.958, -1.194; -1.194, -0.8344')\n Brtrue = np.matrix('-0.9057; -0.4068')\n Crtrue = np.matrix('-0.9057, -0.4068')\n Drtrue = np.matrix('0.')\n np.testing.assert_array_almost_equal(rsys.A, Artrue)\n np.testing.assert_array_almost_equal(rsys.B, Brtrue)\n np.testing.assert_array_almost_equal(rsys.C, Crtrue)\n np.testing.assert_array_almost_equal(rsys.D, Drtrue)\n\n\n @unittest.skipIf(not slycot_check(), \"slycot not installed\")\n def testBalredTruncate(self):\n #controlable canonical realization computed in matlab for the transfer function:\n # num = [1 11 45 32], den = [1 15 60 200 60]\n A = np.matrix('-15., -7.5, -6.25, -1.875; \\\n 8., 0., 0., 0.; \\\n 0., 4., 0., 0.; \\\n 0., 0., 1., 0.')\n B = np.matrix('2.; 0.; 0.; 0.')\n C = np.matrix('0.5, 0.6875, 0.7031, 0.5')\n D = np.matrix('0.')\n sys = ss(A,B,C,D)\n orders = 2\n rsys = balred(sys,orders,method='truncate')\n Artrue = np.matrix('-1.958, -1.194; -1.194, -0.8344')\n Brtrue = np.matrix('0.9057; 0.4068')\n Crtrue = np.matrix('0.9057, 0.4068')\n Drtrue = np.matrix('0.')\n np.testing.assert_array_almost_equal(rsys.A, Artrue,decimal=2)\n np.testing.assert_array_almost_equal(rsys.B, Brtrue,decimal=4)\n np.testing.assert_array_almost_equal(rsys.C, Crtrue,decimal=4)\n np.testing.assert_array_almost_equal(rsys.D, Drtrue,decimal=4)\n\n @unittest.skipIf(not slycot_check(), \"slycot not installed\")\n def testBalredMatchDC(self):\n #controlable canonical realization computed in matlab for the transfer function:\n # num = [1 11 45 32], den = [1 15 60 200 60]\n A = np.matrix('-15., -7.5, -6.25, -1.875; \\\n 8., 0., 0., 0.; \\\n 0., 4., 0., 0.; \\\n 0., 0., 1., 0.')\n B = np.matrix('2.; 0.; 0.; 0.')\n C = np.matrix('0.5, 0.6875, 0.7031, 0.5')\n D = np.matrix('0.')\n sys = ss(A,B,C,D)\n orders = 2\n rsys = balred(sys,orders,method='matchdc')\n Artrue = np.matrix('-4.43094773, -4.55232904; -4.55232904, -5.36195206')\n Brtrue = np.matrix('1.36235673; 1.03114388')\n Crtrue = np.matrix('1.36235673, 1.03114388')\n Drtrue = np.matrix('-0.08383902')\n np.testing.assert_array_almost_equal(rsys.A, Artrue,decimal=2)\n np.testing.assert_array_almost_equal(rsys.B, Brtrue,decimal=4)\n np.testing.assert_array_almost_equal(rsys.C, Crtrue,decimal=4)\n np.testing.assert_array_almost_equal(rsys.D, Drtrue,decimal=4)\n\ndef suite():\n return unittest.TestLoader().loadTestsFromTestCase(TestModelsimp)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# freqplot.py - frequency domain plots for control systems\n#\n# Author: Richard M. Murray\n# Date: 24 May 09\n#\n# This file contains some standard control system plots: Bode plots,\n# Nyquist plots and pole-zero diagrams. The code for Nichols charts\n# is in nichols.py.\n#\n# Copyright (c) 2010 by California Institute of Technology\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the California Institute of Technology nor\n# the names of its contributors may be used to endorse or promote\n# products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH\n# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF\n# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\n# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n#\n# $Id$\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport scipy as sp\nimport numpy as np\nimport math\nfrom .ctrlutil import unwrap\nfrom .bdalg import feedback\nfrom .margins import stability_margins\nfrom . import config\n\n__all__ = ['bode_plot', 'nyquist_plot', 'gangof4_plot',\n 'bode', 'nyquist', 'gangof4']\n\n# Default values for module parameter variables\n_freqplot_defaults = {\n 'freqplot.feature_periphery_decades': 1,\n 'freqplot.number_of_samples': None,\n}\n\n#\n# Main plotting functions\n#\n# This section of the code contains the functions for generating\n# frequency domain plots\n#\n\n#\n# Bode plot\n#\n\n# Default values for Bode plot configuration variables\n_bode_defaults = {\n 'bode.dB': False, # Plot gain in dB\n 'bode.deg': True, # Plot phase in degrees\n 'bode.Hz': False, # Plot frequency in Hertz\n 'bode.grid': True, # Turn on grid for gain and phase\n}\n\n\ndef bode_plot(syslist, omega=None,\n Plot=True, omega_limits=None, omega_num=None,\n margins=None, *args, **kwargs):\n \"\"\"Bode plot for a system\n\n Plots a Bode plot for the system over a (optional) frequency range.\n\n Parameters\n ----------\n syslist : linsys\n List of linear input/output systems (single system is OK)\n omega : list\n List of frequencies in rad/sec to be used for frequency response\n dB : bool\n If True, plot result in dB. Default is false.\n Hz : bool\n If True, plot frequency in Hz (omega must be provided in rad/sec).\n Default value (False) set by config.defaults['bode.Hz']\n deg : bool\n If True, plot phase in degrees (else radians). Default value (True)\n config.defaults['bode.deg']\n Plot : bool\n If True, plot magnitude and phase\n omega_limits: tuple, list, ... of two values\n Limits of the to generate frequency vector.\n If Hz=True the limits are in Hz otherwise in rad/s.\n omega_num: int\n Number of samples to plot. Defaults to\n config.defaults['freqplot.number_of_samples'].\n margins : bool\n If True, plot gain and phase margin.\n \\*args, \\**kwargs:\n Additional options to matplotlib (color, linestyle, etc)\n\n Returns\n -------\n mag : array (list if len(syslist) > 1)\n magnitude\n phase : array (list if len(syslist) > 1)\n phase in radians\n omega : array (list if len(syslist) > 1)\n frequency in rad/sec\n\n Other Parameters\n ----------------\n grid : bool\n If True, plot grid lines on gain and phase plots. Default is set by\n config.defaults['bode.grid'].\n\n The default values for Bode plot configuration parameters can be reset\n using the `config.defaults` dictionary, with module name 'bode'.\n\n Notes\n -----\n 1. Alternatively, you may use the lower-level method (mag, phase, freq)\n = sys.freqresp(freq) to generate the frequency response for a system,\n but it returns a MIMO response.\n\n 2. If a discrete time model is given, the frequency response is plotted\n along the upper branch of the unit circle, using the mapping z = exp(j\n \\omega dt) where omega ranges from 0 to pi/dt and dt is the discrete\n timebase. If not timebase is specified (dt = True), dt is set to 1.\n\n Examples\n --------\n >>> sys = ss(\"1. -2; 3. -4\", \"5.; 7\", \"6. 8\", \"9.\")\n >>> mag, phase, omega = bode(sys)\n\n \"\"\"\n # Make a copy of the kwargs dictonary since we will modify it\n kwargs = dict(kwargs)\n\n # Get values for params (and pop from list to allow keyword use in plot)\n dB = config._get_param('bode', 'dB', kwargs, _bode_defaults, pop=True)\n deg = config._get_param('bode', 'deg', kwargs, _bode_defaults, pop=True)\n Hz = config._get_param('bode', 'Hz', kwargs, _bode_defaults, pop=True)\n grid = config._get_param('bode', 'grid', kwargs, _bode_defaults, pop=True)\n Plot = config._get_param('bode', 'grid', Plot, True)\n margins = config._get_param('bode', 'margins', margins, False)\n\n # If argument was a singleton, turn it into a list\n if not getattr(syslist, '__iter__', False):\n syslist = (syslist,)\n\n if omega is None:\n if omega_limits is None:\n # Select a default range if none is provided\n omega = default_frequency_range(syslist, Hz=Hz,\n number_of_samples=omega_num)\n else:\n omega_limits = np.array(omega_limits)\n if Hz:\n omega_limits *= 2. * math.pi\n if omega_num:\n omega = sp.logspace(np.log10(omega_limits[0]),\n np.log10(omega_limits[1]),\n num=omega_num,\n endpoint=True)\n else:\n omega = sp.logspace(np.log10(omega_limits[0]),\n np.log10(omega_limits[1]),\n endpoint=True)\n\n mags, phases, omegas, nyquistfrqs = [], [], [], []\n for sys in syslist:\n if sys.inputs > 1 or sys.outputs > 1:\n # TODO: Add MIMO bode plots.\n raise NotImplementedError(\n \"Bode is currently only implemented for SISO systems.\")\n else:\n omega_sys = np.array(omega)\n if sys.isdtime(True):\n nyquistfrq = 2. * math.pi * 1. / sys.dt / 2.\n omega_sys = omega_sys[omega_sys < nyquistfrq]\n # TODO: What distance to the Nyquist frequency is appropriate?\n else:\n nyquistfrq = None\n # Get the magnitude and phase of the system\n mag_tmp, phase_tmp, omega_sys = sys.freqresp(omega_sys)\n mag = np.atleast_1d(np.squeeze(mag_tmp))\n phase = np.atleast_1d(np.squeeze(phase_tmp))\n phase = unwrap(phase)\n\n mags.append(mag)\n phases.append(phase)\n omegas.append(omega_sys)\n nyquistfrqs.append(nyquistfrq)\n # Get the dimensions of the current axis, which we will divide up\n # TODO: Not current implemented; just use subplot for now\n\n if Plot:\n nyquistfrq_plot = None\n if Hz:\n omega_plot = omega_sys / (2. * math.pi)\n if nyquistfrq:\n nyquistfrq_plot = nyquistfrq / (2. * math.pi)\n else:\n omega_plot = omega_sys\n if nyquistfrq:\n nyquistfrq_plot = nyquistfrq\n\n # Set up the axes with labels so that multiple calls to\n # bode_plot will superimpose the data. This was implicit\n # before matplotlib 2.1, but changed after that (See\n # https://github.com/matplotlib/matplotlib/issues/9024).\n # The code below should work on all cases.\n\n # Get the current figure\n\n if 'sisotool' in kwargs:\n fig = kwargs['fig']\n ax_mag = fig.axes[0]\n ax_phase = fig.axes[2]\n sisotool = kwargs['sisotool']\n del kwargs['fig']\n del kwargs['sisotool']\n else:\n fig = plt.gcf()\n ax_mag = None\n ax_phase = None\n sisotool = False\n\n # Get the current axes if they already exist\n for ax in fig.axes:\n if ax.get_label() == 'control-bode-magnitude':\n ax_mag = ax\n elif ax.get_label() == 'control-bode-phase':\n ax_phase = ax\n\n # If no axes present, create them from scratch\n if ax_mag is None or ax_phase is None:\n plt.clf()\n ax_mag = plt.subplot(211,\n label='control-bode-magnitude')\n ax_phase = plt.subplot(212,\n label='control-bode-phase',\n sharex=ax_mag)\n\n # Magnitude plot\n if dB:\n pltline = ax_mag.semilogx(omega_plot, 20 * np.log10(mag),\n *args, **kwargs)\n else:\n pltline = ax_mag.loglog(omega_plot, mag, *args, **kwargs)\n\n if nyquistfrq_plot:\n ax_mag.axvline(nyquistfrq_plot,\n color=pltline[0].get_color())\n\n # Add a grid to the plot + labeling\n ax_mag.grid(grid and not margins, which='both')\n ax_mag.set_ylabel(\"Magnitude (dB)\" if dB else \"Magnitude\")\n\n # Phase plot\n if deg:\n phase_plot = phase * 180. / math.pi\n else:\n phase_plot = phase\n ax_phase.semilogx(omega_plot, phase_plot, *args, **kwargs)\n\n # Show the phase and gain margins in the plot\n if margins:\n margin = stability_margins(sys)\n gm, pm, Wcg, Wcp = \\\n margin[0], margin[1], margin[3], margin[4]\n # TODO: add some documentation describing why this is here\n phase_at_cp = phases[0][(np.abs(omegas[0] - Wcp)).argmin()]\n if phase_at_cp >= 0.:\n phase_limit = 180.\n else:\n phase_limit = -180.\n\n if Hz:\n Wcg, Wcp = Wcg/(2*math.pi), Wcp/(2*math.pi)\n\n ax_mag.axhline(y=0 if dB else 1, color='k', linestyle=':',\n zorder=-20)\n ax_phase.axhline(y=phase_limit if deg else\n math.radians(phase_limit),\n color='k', linestyle=':', zorder=-20)\n mag_ylim = ax_mag.get_ylim()\n phase_ylim = ax_phase.get_ylim()\n\n if pm != float('inf') and Wcp != float('nan'):\n if dB:\n ax_mag.semilogx(\n [Wcp, Wcp], [0., -1e5],\n color='k', linestyle=':', zorder=-20)\n else:\n ax_mag.loglog(\n [Wcp, Wcp], [1., 1e-8],\n color='k', linestyle=':', zorder=-20)\n\n if deg:\n ax_phase.semilogx(\n [Wcp, Wcp], [1e5, phase_limit+pm],\n color='k', linestyle=':', zorder=-20)\n ax_phase.semilogx(\n [Wcp, Wcp], [phase_limit + pm, phase_limit],\n color='k', zorder=-20)\n else:\n ax_phase.semilogx(\n [Wcp, Wcp], [1e5, math.radians(phase_limit) +\n math.radians(pm)],\n color='k', linestyle=':', zorder=-20)\n ax_phase.semilogx(\n [Wcp, Wcp], [math.radians(phase_limit) +\n math.radians(pm),\n math.radians(phase_limit)],\n color='k', zorder=-20)\n\n if gm != float('inf') and Wcg != float('nan'):\n if dB:\n ax_mag.semilogx(\n [Wcg, Wcg], [-20.*np.log10(gm), -1e5],\n color='k', linestyle=':', zorder=-20)\n ax_mag.semilogx(\n [Wcg, Wcg], [0, -20*np.log10(gm)],\n color='k', zorder=-20)\n else:\n ax_mag.loglog(\n [Wcg, Wcg], [1./gm, 1e-8], color='k',\n linestyle=':', zorder=-20)\n ax_mag.loglog(\n [Wcg, Wcg], [1., 1./gm], color='k', zorder=-20)\n\n if deg:\n ax_phase.semilogx(\n [Wcg, Wcg], [1e-8, phase_limit],\n color='k', linestyle=':', zorder=-20)\n else:\n ax_phase.semilogx(\n [Wcg, Wcg], [1e-8, math.radians(phase_limit)],\n color='k', linestyle=':', zorder=-20)\n\n ax_mag.set_ylim(mag_ylim)\n ax_phase.set_ylim(phase_ylim)\n\n if sisotool:\n ax_mag.text(\n 0.04, 0.06,\n 'G.M.: %.2f %s\\nFreq: %.2f %s' %\n (20*np.log10(gm) if dB else gm,\n 'dB ' if dB else '',\n Wcg, 'Hz' if Hz else 'rad/s'),\n horizontalalignment='left',\n verticalalignment='bottom',\n transform=ax_mag.transAxes,\n fontsize=8 if int(mpl.__version__[0]) == 1 else 6)\n ax_phase.text(\n 0.04, 0.06,\n 'P.M.: %.2f %s\\nFreq: %.2f %s' %\n (pm if deg else math.radians(pm),\n 'deg' if deg else 'rad',\n Wcp, 'Hz' if Hz else 'rad/s'),\n horizontalalignment='left',\n verticalalignment='bottom',\n transform=ax_phase.transAxes,\n fontsize=8 if int(mpl.__version__[0]) == 1 else 6)\n else:\n plt.suptitle(\n \"Gm = %.2f %s(at %.2f %s), \"\n \"Pm = %.2f %s (at %.2f %s)\" %\n (20*np.log10(gm) if dB else gm,\n 'dB ' if dB else '\\b',\n Wcg, 'Hz' if Hz else 'rad/s',\n pm if deg else math.radians(pm),\n 'deg' if deg else 'rad',\n Wcp, 'Hz' if Hz else 'rad/s'))\n\n if nyquistfrq_plot:\n ax_phase.axvline(\n nyquistfrq_plot, color=pltline[0].get_color())\n\n # Add a grid to the plot + labeling\n ax_phase.set_ylabel(\"Phase (deg)\" if deg else \"Phase (rad)\")\n\n def gen_zero_centered_series(val_min, val_max, period):\n v1 = np.ceil(val_min / period - 0.2)\n v2 = np.floor(val_max / period + 0.2)\n return np.arange(v1, v2 + 1) * period\n if deg:\n ylim = ax_phase.get_ylim()\n ax_phase.set_yticks(gen_zero_centered_series(\n ylim[0], ylim[1], 45.))\n ax_phase.set_yticks(gen_zero_centered_series(\n ylim[0], ylim[1], 15.), minor=True)\n else:\n ylim = ax_phase.get_ylim()\n ax_phase.set_yticks(gen_zero_centered_series(\n ylim[0], ylim[1], math.pi / 4.))\n ax_phase.set_yticks(gen_zero_centered_series(\n ylim[0], ylim[1], math.pi / 12.), minor=True)\n ax_phase.grid(grid and not margins, which='both')\n # ax_mag.grid(which='minor', alpha=0.3)\n # ax_mag.grid(which='major', alpha=0.9)\n # ax_phase.grid(which='minor', alpha=0.3)\n # ax_phase.grid(which='major', alpha=0.9)\n\n # Label the frequency axis\n ax_phase.set_xlabel(\"Frequency (Hz)\" if Hz\n else \"Frequency (rad/sec)\")\n\n if len(syslist) == 1:\n return mags[0], phases[0], omegas[0]\n else:\n return mags, phases, omegas\n\n#\n# Nyquist plot\n#\n\ndef nyquist_plot(syslist, omega=None, Plot=True, color=None,\n labelFreq=0, *args, **kwargs):\n \"\"\"\n Nyquist plot for a system\n\n Plots a Nyquist plot for the system over a (optional) frequency range.\n\n Parameters\n ----------\n syslist : list of LTI\n List of linear input/output systems (single system is OK)\n omega : freq_range\n Range of frequencies (list or bounds) in rad/sec\n Plot : boolean\n If True, plot magnitude\n color : string\n Used to specify the color of the plot\n labelFreq : int\n Label every nth frequency on the plot\n \\*args, \\**kwargs:\n Additional options to matplotlib (color, linestyle, etc)\n\n Returns\n -------\n real : array\n real part of the frequency response array\n imag : array\n imaginary part of the frequency response array\n freq : array\n frequencies\n\n Examples\n --------\n >>> sys = ss(\"1. -2; 3. -4\", \"5.; 7\", \"6. 8\", \"9.\")\n >>> real, imag, freq = nyquist_plot(sys)\n\n \"\"\"\n # If argument was a singleton, turn it into a list\n if not getattr(syslist, '__iter__', False):\n syslist = (syslist,)\n\n # Select a default range if none is provided\n if omega is None:\n omega = default_frequency_range(syslist)\n\n # Interpolate between wmin and wmax if a tuple or list are provided\n elif isinstance(omega, list) or isinstance(omega, tuple):\n # Only accept tuple or list of length 2\n if len(omega) != 2:\n raise ValueError(\"Supported frequency arguments are (wmin,wmax)\"\n \"tuple or list, or frequency vector. \")\n omega = np.logspace(np.log10(omega[0]), np.log10(omega[1]),\n num=50, endpoint=True, base=10.0)\n\n for sys in syslist:\n if sys.inputs > 1 or sys.outputs > 1:\n # TODO: Add MIMO nyquist plots.\n raise NotImplementedError(\n \"Nyquist is currently only implemented for SISO systems.\")\n else:\n # Get the magnitude and phase of the system\n mag_tmp, phase_tmp, omega = sys.freqresp(omega)\n mag = np.squeeze(mag_tmp)\n phase = np.squeeze(phase_tmp)\n\n # Compute the primary curve\n x = sp.multiply(mag, sp.cos(phase))\n y = sp.multiply(mag, sp.sin(phase))\n\n if Plot:\n # Plot the primary curve and mirror image\n p = plt.plot(x, y, '-', color=color, *args, **kwargs)\n c = p[0].get_color()\n ax = plt.gca()\n # Plot arrow to indicate Nyquist encirclement orientation\n ax.arrow(x[0], y[0], (x[1]-x[0])/2, (y[1]-y[0])/2, fc=c, ec=c,\n head_width=0.2, head_length=0.2)\n\n plt.plot(x, -y, '-', color=c, *args, **kwargs)\n ax.arrow(\n x[-1], -y[-1], (x[-1]-x[-2])/2, (y[-1]-y[-2])/2,\n fc=c, ec=c, head_width=0.2, head_length=0.2)\n\n # Mark the -1 point\n plt.plot([-1], [0], 'r+')\n\n # Label the frequencies of the points\n if labelFreq:\n ind = slice(None, None, labelFreq)\n for xpt, ypt, omegapt in zip(x[ind], y[ind], omega[ind]):\n # Convert to Hz\n f = omegapt / (2 * sp.pi)\n\n # Factor out multiples of 1000 and limit the\n # result to the range [-8, 8].\n pow1000 = max(min(get_pow1000(f), 8), -8)\n\n # Get the SI prefix.\n prefix = gen_prefix(pow1000)\n\n # Apply the text. (Use a space before the text to\n # prevent overlap with the data.)\n #\n # np.round() is used because 0.99... appears\n # instead of 1.0, and this would otherwise be\n # truncated to 0.\n plt.text(xpt, ypt, ' ' +\n str(int(np.round(f / 1000 ** pow1000, 0))) + ' ' +\n prefix + 'Hz')\n\n if Plot:\n ax = plt.gca()\n ax.set_xlabel(\"Real axis\")\n ax.set_ylabel(\"Imaginary axis\")\n ax.grid(color=\"lightgray\")\n\n return x, y, omega\n\n#\n# Gang of Four plot\n#\n\n# TODO: think about how (and whether) to handle lists of systems\ndef gangof4_plot(P, C, omega=None):\n \"\"\"Plot the \"Gang of 4\" transfer functions for a system\n\n Generates a 2x2 plot showing the \"Gang of 4\" sensitivity functions\n [T, PS; CS, S]\n\n Parameters\n ----------\n P, C : LTI\n Linear input/output systems (process and control)\n omega : array\n Range of frequencies (list or bounds) in rad/sec\n\n Returns\n -------\n None\n \"\"\"\n if P.inputs > 1 or P.outputs > 1 or C.inputs > 1 or C.outputs > 1:\n # TODO: Add MIMO go4 plots.\n raise NotImplementedError(\n \"Gang of four is currently only implemented for SISO systems.\")\n else:\n\n # Select a default range if none is provided\n # TODO: This needs to be made more intelligent\n if omega is None:\n omega = default_frequency_range((P, C))\n\n # Compute the senstivity functions\n L = P * C\n S = feedback(1, L)\n T = L * S\n\n # Set up the axes with labels so that multiple calls to\n # gangof4_plot will superimpose the data. See details in bode_plot.\n plot_axes = {'t': None, 's': None, 'ps': None, 'cs': None}\n for ax in plt.gcf().axes:\n label = ax.get_label()\n if label.startswith('control-gangof4-'):\n key = label[len('control-gangof4-'):]\n if key not in plot_axes:\n raise RuntimeError(\n \"unknown gangof4 axis type '{}'\".format(label))\n plot_axes[key] = ax\n\n # if any of the axes are missing, start from scratch\n if any((ax is None for ax in plot_axes.values())):\n plt.clf()\n plot_axes = {'t': plt.subplot(221, label='control-gangof4-t'),\n 'ps': plt.subplot(222, label='control-gangof4-ps'),\n 'cs': plt.subplot(223, label='control-gangof4-cs'),\n 's': plt.subplot(224, label='control-gangof4-s')}\n\n #\n # Plot the four sensitivity functions\n #\n\n # TODO: Need to add in the mag = 1 lines\n mag_tmp, phase_tmp, omega = T.freqresp(omega)\n mag = np.squeeze(mag_tmp)\n plot_axes['t'].loglog(omega, mag)\n\n mag_tmp, phase_tmp, omega = (P * S).freqresp(omega)\n mag = np.squeeze(mag_tmp)\n plot_axes['ps'].loglog(omega, mag)\n\n mag_tmp, phase_tmp, omega = (C * S).freqresp(omega)\n mag = np.squeeze(mag_tmp)\n plot_axes['cs'].loglog(omega, mag)\n\n mag_tmp, phase_tmp, omega = S.freqresp(omega)\n mag = np.squeeze(mag_tmp)\n plot_axes['s'].loglog(omega, mag)\n\n#\n# Utility functions\n#\n# This section of the code contains some utility functions for\n# generating frequency domain plots\n#\n\n# Compute reasonable defaults for axes\ndef default_frequency_range(syslist, Hz=None, number_of_samples=None,\n feature_periphery_decades=None):\n \"\"\"Compute a reasonable default frequency range for frequency\n domain plots.\n\n Finds a reasonable default frequency range by examining the features\n (poles and zeros) of the systems in syslist.\n\n Parameters\n ----------\n syslist : list of LTI\n List of linear input/output systems (single system is OK)\n Hz : bool\n If True, the limits (first and last value) of the frequencies\n are set to full decades in Hz so it fits plotting with logarithmic\n scale in Hz otherwise in rad/s. Omega is always returned in rad/sec.\n number_of_samples : int, optional\n Number of samples to generate. The default value is read from\n ``config.defaults['freqplot.number_of_samples']. If None, then the\n default from `numpy.logspace` is used.\n feature_periphery_decades : float, optional\n Defines how many decades shall be included in the frequency range on\n both sides of features (poles, zeros). The default value is read from\n ``config.defaults['freqplot.feature_periphery_decades']``.\n\n Returns\n -------\n omega : array\n Range of frequencies in rad/sec\n\n Examples\n --------\n >>> from matlab import ss\n >>> sys = ss(\"1. -2; 3. -4\", \"5.; 7\", \"6. 8\", \"9.\")\n >>> omega = default_frequency_range(sys)\n\n \"\"\"\n # This code looks at the poles and zeros of all of the systems that\n # we are plotting and sets the frequency range to be one decade above\n # and below the min and max feature frequencies, rounded to the nearest\n # integer. It excludes poles and zeros at the origin. If no features\n # are found, it turns logspace(-1, 1)\n\n # Set default values for options\n number_of_samples = config._get_param(\n 'freqplot', 'number_of_samples', number_of_samples)\n feature_periphery_decades = config._get_param(\n 'freqplot', 'feature_periphery_decades', feature_periphery_decades, 1)\n\n # Find the list of all poles and zeros in the systems\n features = np.array(())\n freq_interesting = []\n\n # detect if single sys passed by checking if it is sequence-like\n if not getattr(syslist, '__iter__', False):\n syslist = (syslist,)\n\n for sys in syslist:\n try:\n # Add new features to the list\n if sys.isctime():\n features_ = np.concatenate((np.abs(sys.pole()),\n np.abs(sys.zero())))\n # Get rid of poles and zeros at the origin\n features_ = features_[features_ != 0.0]\n features = np.concatenate((features, features_))\n elif sys.isdtime(strict=True):\n fn = math.pi * 1. / sys.dt\n # TODO: What distance to the Nyquist frequency is appropriate?\n freq_interesting.append(fn * 0.9)\n\n features_ = np.concatenate((sys.pole(),\n sys.zero()))\n # Get rid of poles and zeros\n # * at the origin and real <= 0 & imag==0: log!\n # * at 1.: would result in omega=0. (logaritmic plot!)\n features_ = features_[\n (features_.imag != 0.0) | (features_.real > 0.)]\n features_ = features_[\n np.bitwise_not((features_.imag == 0.0) &\n (np.abs(features_.real - 1.0) < 1.e-10))]\n # TODO: improve\n features__ = np.abs(np.log(features_) / (1.j * sys.dt))\n features = np.concatenate((features, features__))\n else:\n # TODO\n raise NotImplementedError(\n \"type of system in not implemented now\")\n except:\n pass\n\n # Make sure there is at least one point in the range\n if features.shape[0] == 0:\n features = np.array([1.])\n\n if Hz:\n features /= 2. * math.pi\n features = np.log10(features)\n lsp_min = np.floor(np.min(features) - feature_periphery_decades)\n lsp_max = np.ceil(np.max(features) + feature_periphery_decades)\n lsp_min += np.log10(2. * math.pi)\n lsp_max += np.log10(2. * math.pi)\n else:\n features = np.log10(features)\n lsp_min = np.floor(np.min(features) - feature_periphery_decades)\n lsp_max = np.ceil(np.max(features) + feature_periphery_decades)\n if freq_interesting:\n lsp_min = min(lsp_min, np.log10(min(freq_interesting)))\n lsp_max = max(lsp_max, np.log10(max(freq_interesting)))\n\n # TODO: Add a check in discrete case to make sure we don't get aliasing\n # (Attention: there is a list of system but only one omega vector)\n\n # Set the range to be an order of magnitude beyond any features\n if number_of_samples:\n omega = sp.logspace(\n lsp_min, lsp_max, num=number_of_samples, endpoint=True)\n else:\n omega = sp.logspace(lsp_min, lsp_max, endpoint=True)\n return omega\n\n#\n# KLD 5/23/11: Two functions to create nice looking labels\n#\n\ndef get_pow1000(num):\n \"\"\"Determine exponent for which significand of a number is within the\n range [1, 1000).\n \"\"\"\n # Based on algorithm from http://www.mail-archive.com/[email protected]/msg14433.html, accessed 2010/11/7\n # by Jason Heeris 2009/11/18\n from decimal import Decimal\n from math import floor\n dnum = Decimal(str(num))\n if dnum == 0:\n return 0\n elif dnum < 0:\n dnum = -dnum\n return int(floor(dnum.log10() / 3))\n\n\ndef gen_prefix(pow1000):\n \"\"\"Return the SI prefix for a power of 1000.\n \"\"\"\n # Prefixes according to Table 5 of [BIPM 2006] (excluding hecto,\n # deca, deci, and centi).\n if pow1000 < -8 or pow1000 > 8:\n raise ValueError(\n \"Value is out of the range covered by the SI prefixes.\")\n return ['Y', # yotta (10^24)\n 'Z', # zetta (10^21)\n 'E', # exa (10^18)\n 'P', # peta (10^15)\n 'T', # tera (10^12)\n 'G', # giga (10^9)\n 'M', # mega (10^6)\n 'k', # kilo (10^3)\n '', # (10^0)\n 'm', # milli (10^-3)\n r'$\\mu$', # micro (10^-6)\n 'n', # nano (10^-9)\n 'p', # pico (10^-12)\n 'f', # femto (10^-15)\n 'a', # atto (10^-18)\n 'z', # zepto (10^-21)\n 'y'][8 - pow1000] # yocto (10^-24)\n\n\ndef find_nearest_omega(omega_list, omega):\n omega_list = np.asarray(omega_list)\n return omega_list[(np.abs(omega_list - omega)).argmin()]\n\n\n# Function aliases\nbode = bode_plot\nnyquist = nyquist_plot\ngangof4 = gangof4_plot\n" ]
[ [ "numpy.matrix", "numpy.testing.assert_raises", "numpy.testing.assert_array_almost_equal" ], [ "numpy.asarray", "numpy.squeeze", "matplotlib.pyplot.plot", "numpy.concatenate", "numpy.max", "numpy.round", "scipy.cos", "scipy.logspace", "matplotlib.pyplot.gca", "numpy.arange", "matplotlib.pyplot.gcf", "numpy.ceil", "matplotlib.pyplot.subplot", "scipy.sin", "numpy.log", "numpy.min", "numpy.log10", "numpy.floor", "numpy.array", "numpy.abs", "matplotlib.pyplot.clf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MolSSI/dqm_server
[ "ceff64fe032590095e0f865bc1d0c2da4684404e" ]
[ "qcfractal/interface/collections/collection.py" ]
[ "\"\"\"\nMongo QCDB Abstract basic Collection class\n\nHelper\n\"\"\"\n\nimport abc\nimport copy\nimport json\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Union\n\nimport pandas as pd\n\nfrom ..models import ProtoModel\n\nif TYPE_CHECKING: # pragma: no cover\n from .. import FractalClient\n from ..models import ObjectId\n\n\nclass Collection(abc.ABC):\n def __init__(self, name: str, client: Optional[\"FractalClient\"] = None, **kwargs: Any):\n \"\"\"\n Initializer for the Collection objects. If no Portal is supplied or the Collection name\n is not present on the server that the Portal is connected to a blank Collection will be\n created.\n\n Parameters\n ----------\n name : str\n The name of the Collection object as ID'ed on the storage backend.\n client : FractalClient, optional\n A FractalClient connected to a server\n **kwargs : Dict[str, Any]\n Additional keywords which are passed to the Collection and the initial data constructor\n It is up to the individual implementations of the Collection to do things with that data\n \"\"\"\n\n self.client = client\n if (self.client is not None) and not (self.client.__class__.__name__ == \"FractalClient\"):\n raise TypeError(\"Expected FractalClient as `client` kwarg, found {}.\".format(type(self.client)))\n\n if \"collection\" not in kwargs:\n kwargs[\"collection\"] = self.__class__.__name__.lower()\n\n kwargs[\"name\"] = name\n\n # Create the data model\n self.data = self.DataModel(**kwargs)\n\n class DataModel(ProtoModel):\n \"\"\"\n Internal Data structure base model typed by PyDantic\n\n This structure validates input, allows server-side validation and data security,\n and will create the information to pass back and forth between server and client\n\n Subclasses of Collection can extend this class internally to change the set of\n additional data defined by the Collection\n \"\"\"\n\n id: str = \"local\"\n name: str\n\n collection: str\n provenance: Dict[str, str] = {}\n\n tags: List[str] = []\n tagline: Optional[str] = None\n description: Optional[str] = None\n\n group: str = \"default\"\n visibility: bool = True\n\n view_url_hdf5: Optional[str] = None\n view_url_plaintext: Optional[str] = None\n view_metadata: Optional[Dict[str, str]] = None\n view_available: bool = False\n\n metadata: Dict[str, Any] = {}\n\n def __str__(self) -> str:\n \"\"\"\n A simple string representation of the Collection.\n\n Returns\n -------\n ret : str\n A representation of the Collection.\n\n Examples\n --------\n\n >>> repr(obj)\n Collection(name=`S22`, id='5b7f1fd57b87872d2c5d0a6d', client=`localhost:8888`)\n \"\"\"\n\n client = None\n if self.client:\n client = self.client.address\n\n class_name = self.__class__.__name__\n ret = \"{}(\".format(class_name)\n ret += \"name=`{}`, \".format(self.data.name)\n ret += \"id='{}', \".format(self.data.id)\n ret += \"client='{}') \".format(client)\n\n return ret\n\n def __repr__(self) -> str:\n return f\"<{self}>\"\n\n def _check_client(self):\n if self.client is None:\n raise AttributeError(\"This method requires a FractalClient and no client was set\")\n\n @property\n def name(self) -> str:\n return self.data.name\n\n @classmethod\n def from_server(cls, client: \"FractalClient\", name: str) -> \"Collection\":\n \"\"\"Creates a new class from a server\n\n Parameters\n ----------\n client : FractalClient\n A FractalClient connected to a server\n name : str\n The name of the collection to pull from.\n\n Returns\n -------\n Collection\n A constructed collection.\n\n \"\"\"\n\n if not (client.__class__.__name__ == \"FractalClient\"):\n raise TypeError(\"Expected a FractalClient as first argument, found {}.\".format(type(client)))\n\n class_name = cls.__name__.lower()\n tmp_data = client.get_collection(class_name, name, full_return=True)\n if tmp_data.meta.n_found == 0:\n raise KeyError(\"Warning! `{}: {}` not found.\".format(class_name, name))\n\n return cls.from_json(tmp_data.data[0], client=client)\n\n @classmethod\n def from_json(cls, data: Dict[str, Any], client: \"FractalClient\" = None) -> \"Collection\":\n \"\"\"Creates a new class from a JSON blob\n\n Parameters\n ----------\n data : Dict[str, Any]\n The JSON blob to create a new class from.\n client : FractalClient, optional\n A FractalClient connected to a server\n\n Returns\n -------\n Collection\n A constructed collection.\n\n \"\"\"\n # Check we are building the correct object\n class_name = cls.__name__.lower()\n if \"collection\" not in data:\n raise KeyError(\"Attempted to create Collection from JSON, but no `collection` field found.\")\n\n if data[\"collection\"] != class_name:\n raise KeyError(\n \"Attempted to create Collection from JSON with class {}, but found collection type of {}.\".format(\n class_name, data[\"collection\"]\n )\n )\n\n name = data.pop(\"name\")\n # Allow PyDantic to handle type validation\n ret = cls(name, client=client, **data)\n return ret\n\n def to_json(self, filename: Optional[str] = None):\n \"\"\"\n If a filename is provided, dumps the file to disk. Otherwise returns a copy of the current data.\n\n Parameters\n ----------\n filename : str, Optional, Default: None\n The filename to drop the data to.\n\n Returns\n -------\n ret : dict\n A JSON representation of the Collection\n \"\"\"\n data = self.data.dict()\n if filename is not None:\n with open(filename, \"w\") as open_file:\n json.dump(data, open_file)\n else:\n return copy.deepcopy(data)\n\n @abc.abstractmethod\n def _pre_save_prep(self, client: \"FractalClient\"):\n \"\"\"\n Additional actions to take before saving, done as the last step before data is written.\n\n This does not return anything but can prep the `self.data` field before storing it.\n\n Has access to the `client` in case its needed to do pre-conditioning.\n\n Parameters\n ----------\n client : FractalClient\n A FractalClient connected to a server used for storage access\n \"\"\"\n\n # Setters\n def save(self, client: Optional[\"FractalClient\"] = None) -> \"ObjectId\":\n \"\"\"Uploads the overall structure of the Collection (indices, options, new molecules, etc)\n to the server.\n\n Parameters\n ----------\n client : FractalClient, optional\n A FractalClient connected to a server to upload to\n\n Returns\n -------\n ObjectId\n The ObjectId of the saved collection.\n\n \"\"\"\n class_name = self.__class__.__name__.lower()\n if self.data.name == \"\":\n raise AttributeError(\"Collection:save: {} must have a name!\".format(class_name))\n\n if client is None:\n self._check_client()\n client = self.client\n\n self._pre_save_prep(client)\n\n # Add the database\n if self.data.id == self.data.__fields__[\"id\"].default:\n response = client.add_collection(self.data.dict(), overwrite=False, full_return=True)\n if response.meta.success is False:\n raise KeyError(f\"Error adding collection: \\n{response.meta.error_description}\")\n self.data.__dict__[\"id\"] = response.data\n else:\n response = client.add_collection(self.data.dict(), overwrite=True, full_return=True)\n if response.meta.success is False:\n raise KeyError(f\"Error updating collection: \\n{response.meta.error_description}\")\n\n return self.data.id\n\n ### General helpers\n\n @staticmethod\n def _add_molecules_by_dict(client, molecules):\n\n flat_map_keys = []\n flat_map_mols = []\n for k, v in molecules.items():\n flat_map_keys.append(k)\n flat_map_mols.append(v)\n\n CHUNK_SIZE = client.query_limit\n mol_ret = []\n for i in range(0, len(flat_map_mols), CHUNK_SIZE):\n mol_ret.extend(client.add_molecules(flat_map_mols[i : i + CHUNK_SIZE]))\n\n return {k: v for k, v in zip(flat_map_keys, mol_ret)}\n\n\nclass BaseProcedureDataset(Collection):\n def __init__(self, name: str, client: \"FractalClient\" = None, **kwargs):\n if client is None:\n raise KeyError(\"{self.__class__.__name__} must initialize with a client.\")\n\n super().__init__(name, client=client, **kwargs)\n\n self.df = pd.DataFrame(index=self._get_index())\n\n class DataModel(Collection.DataModel):\n\n records: Dict[str, Any] = {}\n history: Set[str] = set()\n specs: Dict[str, Any] = {}\n\n class Config(Collection.DataModel.Config):\n pass\n\n @abc.abstractmethod\n def _internal_compute_add(self, spec: Any, entry: Any, tag: str, priority: str) -> \"ObjectId\":\n pass\n\n def _pre_save_prep(self, client: \"FractalClient\") -> None:\n pass\n\n def _get_index(self):\n\n return [x.name for x in self.data.records.values()]\n\n def _add_specification(self, name: str, spec: Any, overwrite=False) -> None:\n \"\"\"\n Parameters\n ----------\n name : str\n The name of the specification\n spec : Any\n The specification object\n overwrite : bool, optional\n Overwrite existing specification names\n\n \"\"\"\n\n lname = name.lower()\n if (lname in self.data.specs) and (not overwrite):\n raise KeyError(f\"{self.__class__.__name__} '{name}' already present, use `overwrite=True` to replace.\")\n\n self.data.specs[lname] = spec\n self.save()\n\n def _get_procedure_ids(self, spec: str, sieve: Optional[List[str]] = None) -> Dict[str, \"ObjectId\"]:\n \"\"\"Aquires the\n\n Parameters\n ----------\n spec : str\n The specification to get the map of\n sieve : Optional[List[str]], optional\n A\n Description\n\n Returns\n -------\n Dict[str, ObjectId]\n A dictionary of identifier to id mappings.\n\n \"\"\"\n\n spec = self.get_specification(spec)\n\n mapper = {}\n for rec in self.data.records.values():\n if sieve and rec.name not in sieve:\n continue\n\n try:\n td_id = rec.object_map[spec.name]\n mapper[rec.name] = td_id\n except KeyError:\n pass\n\n return mapper\n\n def get_specification(self, name: str) -> Any:\n \"\"\"\n Parameters\n ----------\n name : str\n The name of the specification\n\n Returns\n -------\n Specification\n The requested specification.\n\n \"\"\"\n try:\n return self.data.specs[name.lower()].copy()\n except KeyError:\n raise KeyError(f\"Specification '{name}' not found.\")\n\n def list_specifications(self, description=True) -> Union[List[str], pd.DataFrame]:\n \"\"\"Lists all available specifications\n\n Parameters\n ----------\n description : bool, optional\n If True returns a DataFrame with\n Description\n\n Returns\n -------\n Union[List[str], 'DataFrame']\n A list of known specification names.\n\n \"\"\"\n if description:\n data = [(x.name, x.description) for x in self.data.specs.values()]\n return pd.DataFrame(data, columns=[\"Name\", \"Description\"]).set_index(\"Name\")\n else:\n return [x.name for x in self.data.specs.values()]\n\n def _check_entry_exists(self, name):\n \"\"\"\n Checks if an entry exists or not.\n \"\"\"\n\n if name.lower() in self.data.records:\n raise KeyError(f\"Record {name} already in the dataset.\")\n\n def _add_entry(self, name, record, save):\n \"\"\"\n Adds an entry to the records\n \"\"\"\n\n self._check_entry_exists(name)\n self.data.records[name.lower()] = record\n if save:\n self.save()\n\n def get_entry(self, name: str) -> Any:\n \"\"\"Obtains a record from the Dataset\n\n Parameters\n ----------\n name : str\n The record name to pull from.\n\n Returns\n -------\n Record\n The requested record\n \"\"\"\n try:\n return self.data.records[name.lower()]\n except KeyError:\n raise KeyError(f\"Could not find entry name '{name}' in the dataset.\")\n\n def get_record(self, name: str, specification: str) -> Any:\n \"\"\"Pulls an individual computational record of the requested name and column.\n\n Parameters\n ----------\n name : str\n The index name to pull the record of.\n specification : str\n The name of specification to pull the record of.\n\n Returns\n -------\n Any\n The requested Record\n\n \"\"\"\n spec = self.get_specification(specification)\n rec_id = self.get_entry(name).object_map.get(spec.name, None)\n\n if rec_id is None:\n raise KeyError(f\"Could not find a record for ({name}: {specification}).\")\n\n return self.client.query_procedures(id=rec_id)[0]\n\n def compute(\n self, specification: str, subset: Set[str] = None, tag: Optional[str] = None, priority: Optional[str] = None\n ) -> int:\n \"\"\"Computes a specification for all entries in the dataset.\n\n Parameters\n ----------\n specification : str\n The specification name.\n subset : Set[str], optional\n Computes only a subset of the dataset.\n tag : Optional[str], optional\n The queue tag to use when submitting compute requests.\n priority : Optional[str], optional\n The priority of the jobs low, medium, or high.\n\n Returns\n -------\n int\n The number of submitted computations\n \"\"\"\n\n specification = specification.lower()\n spec = self.get_specification(specification)\n if subset:\n subset = set(subset)\n\n submitted = 0\n for entry in self.data.records.values():\n if (subset is not None) and (entry.name not in subset):\n continue\n\n if spec.name in entry.object_map:\n continue\n\n entry.object_map[spec.name] = self._internal_compute_add(spec, entry, tag, priority)\n submitted += 1\n\n self.data.history.add(specification)\n\n # Nothing to save\n if submitted:\n self.save()\n\n return submitted\n\n def query(self, specification: str, force: bool = False) -> pd.Series:\n \"\"\"Queries a given specification from the server\n\n Parameters\n ----------\n specification : str\n The specification name to query\n force : bool, optional\n Force a fresh query if the specification already exists.\n\n Returns\n -------\n pd.Series\n Records collected from the server\n \"\"\"\n # Try to get the specification, will throw if not found.\n spec = self.get_specification(specification)\n\n if not force and (spec.name in self.df):\n return spec.name\n\n mapper = self._get_procedure_ids(spec.name)\n query_ids = list(mapper.values())\n\n # Chunk up the queries\n procedures: List[Dict[str, Any]] = []\n for i in range(0, len(query_ids), self.client.query_limit):\n chunk_ids = query_ids[i : i + self.client.query_limit]\n procedures.extend(self.client.query_procedures(id=chunk_ids))\n\n proc_lookup = {x.id: x for x in procedures}\n\n data = []\n for name, oid in mapper.items():\n try:\n data.append([name, proc_lookup[oid]])\n except KeyError:\n data.append([name, None])\n\n df = pd.DataFrame(data, columns=[\"index\", spec.name])\n df.set_index(\"index\", inplace=True)\n\n self.df[spec.name] = df[spec.name]\n\n return df[spec.name]\n\n def status(\n self,\n specs: Union[str, List[str]] = None,\n collapse: bool = True,\n status: Optional[str] = None,\n detail: bool = False,\n ) -> pd.DataFrame:\n \"\"\"Returns the status of all current specifications.\n\n Parameters\n ----------\n collapse : bool, optional\n Collapse the status into summaries per specification or not.\n status : Optional[str], optional\n If not None, only returns results that match the provided status.\n detail : bool, optional\n Shows a detailed description of the current status of incomplete jobs.\n\n Returns\n -------\n DataFrame\n A DataFrame of all known statuses\n\n \"\"\"\n\n # Simple no detail case\n if detail is False:\n # detail = False can handle multiple specifications\n # If specs is None, then use all (via list_specifications)\n if isinstance(specs, str):\n specs = [specs]\n elif specs is None:\n specs = self.list_specifications(description=False)\n\n # Query all of the specs and make sure they are valid\n # Specs may not be loaded to self.df yet. This can be accomplished\n # with self.query, which stores the info in self.df\n for spec in specs:\n self.query(spec)\n\n def get_status(item):\n try:\n return item.status.value\n except AttributeError:\n return None\n\n # apply status by column then by row\n df = self.df[specs].apply(lambda col: col.apply(get_status))\n\n if status:\n df = df[(df == status.upper())]\n\n if collapse:\n return df.apply(lambda x: x.value_counts())\n else:\n return df\n\n if status not in [None, \"INCOMPLETE\"]:\n raise KeyError(\"Detailed status is only available for incomplete procedures.\")\n\n # Can only do detailed status for a single spec\n # If specs is a string, ok. If it is a list, then it should have length = 1\n if not (isinstance(specs, str) or len(specs) == 1):\n raise KeyError(\"Detailed status is only available for a single specification at a time.\")\n\n # If specs is a list (of length = 1, checked above), then make it a string\n # (_get_procedure_ids expects a string)\n if not isinstance(specs, str):\n specs = specs[0]\n\n mapper = self._get_procedure_ids(specs)\n reverse_map = {v: k for k, v in mapper.items()}\n procedures = self.client.query_procedures(id=list(mapper.values()))\n\n data = []\n\n for proc in procedures:\n if proc.status == \"COMPLETE\":\n continue\n\n try:\n blob = proc.detailed_status()\n except:\n raise AttributeError(\"Detailed statuses are not available for this dataset type.\")\n\n blob[\"Name\"] = reverse_map[proc.id]\n data.append(blob)\n\n df = pd.DataFrame(data)\n df.rename(columns={x: x.replace(\"_\", \" \").title() for x in df.columns}, inplace=True)\n if df.shape[0]:\n df = df.set_index(\"Name\")\n\n return df\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ghisvail/pyoperators
[ "af8bb089e1ac42b649592488dbd49a609e3f833a", "af8bb089e1ac42b649592488dbd49a609e3f833a" ]
[ "pyoperators/fft.py", "test/test_linear.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport os\nimport time\n\nfrom .config import LOCAL_PATH\nfrom .core import (\n AdditionOperator, CompositionOperator, DiagonalOperator, HomothetyOperator,\n Operator, _pool)\nfrom .flags import aligned, contiguous, inplace, linear, real, square, unitary\nfrom .memory import empty\nfrom .utils import (complex_dtype, isalias, omp_num_threads, product,\n tointtuple)\nfrom .utils.ufuncs import multiply_conjugate\nfrom .warnings import warn, PyOperatorsWarning\n\n__all__ = ['ConvolutionOperator', 'FFTOperator']\n\ntry:\n import pyfftw\n FFTW_DEFAULT_NUM_THREADS = omp_num_threads()\n FFTW_WISDOM_FILES = tuple(os.path.join(LOCAL_PATH, 'fftw{0}.wisdom'.format(\n t)) for t in ['', 'f', 'l'])\n FFTW_WISDOM_MIN_DELAY = 0.1\n _is_fftw_wisdom_loaded = False\nexcept:\n warn('The pyFFTW library is not installed.', PyOperatorsWarning)\n\n# FFTW out-of-place transforms:\n# PRESERVE_INPUT: default except c2r and hc2r\n# DESTROY_INPUT: default for c2r and hc2r, only possibility for multi c2r\n\nOPERATOR_ATTRIBUTES = ['attrin', 'attrout', 'classin', 'classout', 'commin',\n 'commout', 'reshapein', 'reshapeout', 'shapein',\n 'shapeout', 'toshapein', 'toshapeout', 'validatein',\n 'validateout', 'dtype', 'flags']\n\n\n@linear\n@square\n@inplace\n@aligned\n@contiguous\nclass _FFTWConvolutionOperator(Operator):\n \"\"\"\n Multi-dimensional convolution by a real or complex kernel,\n using the discrete Fourier transform.\n\n \"\"\"\n def __init__(self, kernel, shapein, axes=None, fftw_flag='FFTW_MEASURE',\n nthreads=None, dtype=None, **keywords):\n \"\"\"\n Parameters\n ----------\n kernel : array-like\n The multi-dimensional convolution kernel.\n shapein : tuple\n The shape of the input to be convolved by the kernel.\n axes : tuple\n Axes along which the convolution is performed. Convolution over\n less axes than the operator's input is not yet supported.\n fftw_flag : string\n list of strings and is a subset of the flags that FFTW allows for\n the planners. Specifically, FFTW_ESTIMATE, FFTW_MEASURE,\n FFTW_PATIENT and FFTW_EXHAUSTIVE are supported. These describe the\n increasing amount of effort spent during the planning stage to\n create the fastest possible transform. Usually, FFTW_MEASURE is\n a good compromise and is the default.\n nthreads : int\n Tells how many threads to use when invoking FFTW or MKL. Default is\n the number of cores.\n dtype : dtype\n Operator's dtype.\n\n \"\"\"\n kernel = np.array(kernel, dtype=dtype, copy=False)\n dtype = kernel.dtype\n if dtype.kind not in ('f', 'c'):\n kernel = kernel.astype(float)\n dtype = kernel.dtype\n\n if shapein is None:\n raise ValueError('The input shape is not specified.')\n\n shapein = tointtuple(shapein)\n if len(shapein) != kernel.ndim:\n raise ValueError(\n \"The kernel dimension '{0}' is incompatible with that of the s\"\n \"pecified shape '{1}'.\".format(kernel.ndim, len(shapein)))\n\n # if the kernel is larger than the image, we don't crop it since it\n # might affect normalisation of the kernel\n if any([ks > s for ks, s in zip(kernel.shape, shapein)]):\n raise ValueError('The kernel must not be larger than the input.')\n\n if axes is None:\n axes = range(len(shapein))\n axes = tointtuple(axes)\n nthreads = nthreads or FFTW_DEFAULT_NUM_THREADS\n fftw_flag = fftw_flag.upper()\n\n if dtype.kind == 'c':\n n = product(shapein)\n fft = _FFTWComplexForwardOperator(shapein, axes, fftw_flag,\n nthreads, dtype, **keywords)\n kernel_fft = _get_kernel_fft(kernel, shapein, dtype, shapein,\n dtype, fft.oplan)\n kernel_fft /= n\n self.__class__ = CompositionOperator\n self.__init__([n, fft.H, DiagonalOperator(kernel_fft), fft])\n return\n\n dtype_ = complex_dtype(dtype)\n shape_ = self._reshape_to_halfstorage(shapein, axes)\n _load_wisdom()\n aligned = self.flags.aligned_input\n contiguous = True\n with _pool.get(shapein, dtype, aligned, contiguous) as in_:\n with _pool.get(shape_, dtype_, aligned, contiguous) as out:\n t0 = time.time()\n fplan = pyfftw.FFTW(in_, out, axes=axes,\n flags=[fftw_flag],\n direction='FFTW_FORWARD',\n threads=nthreads)\n bplan = pyfftw.FFTW(out, in_, axes=axes,\n flags=[fftw_flag],\n direction='FFTW_BACKWARD',\n threads=nthreads)\n\n if time.time() - t0 > FFTW_WISDOM_MIN_DELAY:\n _save_wisdom()\n\n kernel_fft = _get_kernel_fft(kernel, shapein, dtype, shape_,\n dtype_, fplan)\n kernel_fft /= product(shapein)\n self.__class__ = _FFTWRealConvolutionOperator\n self.__init__(kernel_fft, fplan, bplan, axes, fftw_flag, nthreads,\n shapein, dtype, **keywords)\n\n def _reshape_to_halfstorage(self, shape, axes):\n shape = list(shape)\n shape[axes[-1]] = shape[axes[-1]] // 2 + 1\n return shape\n\n\n@real\n@linear\n@square\n@inplace\n@aligned\n@contiguous\nclass _FFTWRealConvolutionOperator(Operator):\n \"\"\"\n Convolution by a real kernel.\n The first argument is the FFT of the real kernel. It is not necessarily\n aligned.\n\n \"\"\"\n def __init__(self, kernel_fft, fplan, bplan, axes, fftw_flag, nthreads,\n shapein=None, dtype=None, **keywords):\n self.kernel = kernel_fft\n self._fplan = fplan\n self._bplan = bplan\n self.axes = axes\n self.nthreads = nthreads\n self.fftw_flag = fftw_flag\n\n Operator.__init__(self, shapein=shapein, dtype=dtype, **keywords)\n self.set_rule('T', lambda s: _FFTWRealConvolutionTransposeOperator(\n s.kernel, s._fplan, s._bplan, s.axes, s.fftw_flag, s.nthreads))\n self.set_rule(('.', HomothetyOperator), self._rule_homothety,\n CompositionOperator)\n self.set_rule(('.', _FFTWRealConvolutionOperator), self.\n _rule_add_real, AdditionOperator)\n self.set_rule(('.', _FFTWRealConvolutionOperator), self.\n _rule_cmp_real, CompositionOperator)\n self.set_rule(('.', _FFTWComplexBackwardOperator), self.\n _rule_complex_backward, CompositionOperator)\n self.set_rule((_FFTWComplexForwardOperator, '.'), self.\n _rule_complex_forward, CompositionOperator)\n\n def direct(self, input, output):\n shape = self.kernel.shape\n dtype = self.kernel.dtype\n aligned = self.flags.aligned_input\n contiguous = True\n with _pool.get(shape, dtype, aligned, contiguous) as buf:\n self._fplan.update_arrays(input, buf)\n self._fplan.execute()\n buf *= self.kernel\n self._bplan.update_arrays(buf, output)\n self._bplan.execute()\n\n def get_kernel(self, out=None):\n if out is not None:\n out[...] = self.kernel\n return self.kernel\n\n @property\n def nbytes(self):\n return self.kernel.nbytes\n\n @staticmethod\n def _rule_homothety(self, scalar):\n kernel = empty(self.kernel.shape, self.kernel.dtype)\n self.get_kernel(kernel)\n kernel *= scalar.data\n result = _FFTWRealConvolutionOperator(\n kernel, self._fplan, self._bplan, self.axes, self.fftw_flag,\n self.nthreads, self.shapein, self.dtype)\n return result\n\n @staticmethod\n def _rule_add_real(self, other):\n if isinstance(other, _FFTWRealConvolutionTransposeOperator):\n # spare allocation in other.get_kernel (if self is not a transpose)\n self, other = other, self\n kernel = empty(self.kernel.shape, self.kernel.dtype)\n self.get_kernel(kernel)\n np.add(kernel, other.get_kernel(), kernel)\n result = _FFTWRealConvolutionOperator(\n kernel, self._fplan, self._bplan, self.axes, self.fftw_flag,\n self.nthreads, self.shapein, self.dtype)\n return result\n\n @staticmethod\n def _rule_cmp_real(self, other):\n if isinstance(other, _FFTWRealConvolutionTransposeOperator):\n # spare allocation in other.get_kernel (if self is not a transpose)\n self, other = other, self\n kernel = empty(self.kernel.shape, self.kernel.dtype)\n self.get_kernel(kernel)\n kernel *= other.get_kernel()\n kernel *= product(self.shapein)\n result = _FFTWRealConvolutionOperator(\n kernel, self._fplan, self._bplan, self.axes, self.fftw_flag,\n self.nthreads, self.shapein, self.dtype)\n return result\n\n @staticmethod\n def _rule_complex_backward(self, other):\n kernel = self._restore_kernel().astype(self.kernel.dtype)\n other.H.direct(kernel, kernel)\n kernel /= product(self.shapein)\n return other, DiagonalOperator(kernel)\n\n @staticmethod\n def _rule_complex_forward(other, self):\n kernel = self._restore_kernel().astype(self.kernel.dtype)\n other.direct(kernel, kernel)\n return DiagonalOperator(kernel), other\n\n def _restore_kernel(self):\n shape = self.kernel.shape\n dtype = self.kernel.dtype\n aligned = self.flags.aligned_input\n contiguous = True\n with _pool.get(shape, dtype, aligned, contiguous) as x:\n self.get_kernel(x)\n y = empty(self.shapein, self.dtype)\n self._bplan.update_arrays(x, y)\n self._bplan.execute()\n return y\n\n\nclass _FFTWRealConvolutionTransposeOperator(_FFTWRealConvolutionOperator):\n \"\"\"\n Transpose of the convolution by a real kernel.\n\n \"\"\"\n __name__ = '_FFTW_RealConvolutionOperator.T'\n\n def get_kernel(self, out=None):\n return np.conjugate(self.kernel, out)\n\n def direct(self, input, output):\n with _pool.get(self.kernel.shape, self.kernel.dtype) as buf:\n self._fplan.update_arrays(input, buf)\n self._fplan.execute()\n multiply_conjugate(buf, self.kernel, buf)\n self._bplan.update_arrays(buf, output)\n self._bplan.execute()\n\n\n@linear\n@square\n@inplace\n@aligned\n@contiguous\nclass _FFTWComplexOperator(Operator):\n def __init__(self, shapein, axes=None, fftw_flag='FFTW_MEASURE',\n nthreads=None, dtype=complex, **keywords):\n shapein = tointtuple(shapein)\n if axes is None:\n axes = range(len(shapein))\n self.axes = tointtuple(axes)\n self.fftw_flag = fftw_flag.upper()\n self.nthreads = nthreads or FFTW_DEFAULT_NUM_THREADS\n dtype = np.dtype(dtype)\n _load_wisdom()\n Operator.__init__(self, shapein=shapein, dtype=dtype, **keywords)\n\n def direct(self, input, output):\n if isalias(input, output):\n self.iplan.update_arrays(input, output)\n self.iplan.execute()\n else:\n self.oplan.update_arrays(input, output)\n self.oplan.execute()\n\n\n@unitary\nclass _FFTWComplexForwardOperator(_FFTWComplexOperator):\n \"\"\"\n Complex multi-dimensional forward Discrete Fourier Transform.\n\n \"\"\"\n def __init__(self, shapein, axes=None, fftw_flag='FFTW_MEASURE',\n nthreads=None, dtype=complex, **keywords):\n \"\"\"\n Parameters\n ----------\n shapein : tuple\n The shape of the input to be Fourier-transformed\n axes : tuple\n Axes along which the transform is performed.\n fftw_flag : string\n FFTW flag for the planner: FFTW_ESTIMATE, FFTW_MEASURE,\n FFTW_PATIENT or FFTW_EXHAUSTIVE. These describe the\n increasing amount of effort spent during the planning stage to\n create the fastest possible transform. Usually, FFTW_MEASURE is\n a good compromise and is the default.\n nthreads : int\n Tells how many threads to use when invoking FFTW or MKL. Default is\n the number of cores.\n dtype : dtype\n Operator's complex dtype.\n\n \"\"\"\n _FFTWComplexOperator.__init__(self, shapein, axes, fftw_flag,\n nthreads, dtype, **keywords)\n\n self.set_rule('H', lambda s:\n HomothetyOperator(1 / product(s.shapein)) *\n _FFTWComplexBackwardOperator(s.shapein, forward=s))\n self.set_rule((_FFTWComplexBackwardOperator, '.'), lambda o, s:\n HomothetyOperator(product(s.shapein)),\n CompositionOperator)\n\n with _pool.get(shapein, dtype) as in_:\n t0 = time.time()\n self.iplan = pyfftw.FFTW(in_, in_, axes=self.axes,\n flags=[self.fftw_flag],\n direction='FFTW_FORWARD',\n threads=self.nthreads)\n with _pool.get(shapein, dtype) as out:\n self.oplan = pyfftw.FFTW(in_, out, axes=self.axes,\n flags=[self.fftw_flag],\n direction='FFTW_FORWARD',\n threads=self.nthreads)\n if time.time() - t0 > FFTW_WISDOM_MIN_DELAY:\n _save_wisdom()\n\n\nclass _FFTWComplexBackwardOperator(_FFTWComplexOperator):\n \"\"\"\n Complex multi-dimensional backward Discrete Fourier Transform.\n\n \"\"\"\n def __init__(self, shapein, dtype=None, forward=None, **keywords):\n\n dtype = dtype or forward.dtype\n _FFTWComplexOperator.__init__(self, shapein, forward.axes,\n forward.fftw_flag,\n forward.nthreads, dtype, **keywords)\n self.set_rule('H', lambda s:\n HomothetyOperator(product(s.shapein)) * forward)\n self.set_rule((_FFTWComplexForwardOperator, '.'), lambda o, s:\n HomothetyOperator(product(s.shapein)),\n CompositionOperator)\n\n with _pool.get(shapein, dtype) as in_:\n t0 = time.time()\n self.iplan = pyfftw.FFTW(in_, in_, axes=self.axes,\n flags=[self.fftw_flag],\n direction='FFTW_BACKWARD',\n threads=self.nthreads)\n with _pool.get(shapein, dtype) as out:\n self.oplan = pyfftw.FFTW(in_, out, axes=self.axes,\n flags=[self.fftw_flag],\n direction='FFTW_BACKWARD',\n threads=self.nthreads)\n if time.time() - t0 > FFTW_WISDOM_MIN_DELAY:\n _save_wisdom()\n\n\ndef _get_kernel_fft(kernel, shapein, dtypein, shapeout, dtypeout, fft):\n with _pool.get(shapein, dtypein) as kernel_padded:\n ker_slice = [slice(0, s) for s in kernel.shape]\n kernel_padded[...] = 0\n kernel_padded[ker_slice] = kernel\n ker_origin = (np.array(kernel.shape)-1) // 2\n for axis, o in enumerate(ker_origin):\n kernel_padded = np.roll(kernel_padded, int(-o), axis=axis)\n kernel_fft = empty(shapeout, dtypeout)\n fft.update_arrays(kernel_padded, kernel_fft)\n fft.execute()\n return kernel_fft\n\n\ndef _load_wisdom():\n \"\"\" Loads the 3 wisdom files. \"\"\"\n global _is_fftw_wisdom_loaded\n if _is_fftw_wisdom_loaded:\n return\n\n def load(filename):\n try:\n with open(filename, 'rb') as f:\n wisdom = f.read()\n except IOError:\n wisdom = b''\n return wisdom\n\n wisdom = [load(f) for f in FFTW_WISDOM_FILES]\n pyfftw.import_wisdom(wisdom)\n _is_fftw_wisdom_loaded = True\n\n\ndef _save_wisdom():\n \"\"\" Save wisdom as 3 files. \"\"\"\n wisdom = pyfftw.export_wisdom()\n for filename, w in zip(FFTW_WISDOM_FILES, wisdom):\n try:\n os.remove(filename)\n except OSError:\n pass\n if len(w) == 0:\n continue\n with open(filename, 'w') as f:\n f.write(w)\n\n\n# make FFTW the default\nConvolutionOperator = _FFTWConvolutionOperator\nFFTOperator = _FFTWComplexForwardOperator\n", "from __future__ import division\n\nimport numpy as np\nimport pyoperators\n\nfrom numpy.testing import assert_allclose\nfrom pyoperators import (\n BlockColumnOperator, BlockDiagonalOperator, DegreesOperator,\n DenseOperator, DiagonalOperator, DiagonalNumexprOperator,\n DifferenceOperator, IntegrationTrapezeOperator, Operator, RadiansOperator,\n Rotation2dOperator, Rotation3dOperator, TridiagonalOperator,\n SymmetricBandToeplitzOperator, SumOperator)\nfrom pyoperators.utils import product\nfrom pyoperators.utils.testing import (\n assert_eq, assert_is_instance, assert_is_none, assert_is_type,\n assert_same)\nfrom .common import IdentityOutplaceOperator, assert_inplace_outplace\n\nSHAPES = ((), (1,), (3,), (2, 3), (2, 3, 4))\n\n\ndef test_degrees():\n def func(dtype):\n d = DegreesOperator(dtype=dtype)\n assert_same(d(1), np.degrees(np.ones((), dtype=dtype)))\n for dtype in (np.float16, np.float32, np.float64, np.float128):\n yield func, dtype\n\n\ndef test_degrees_rules():\n d = DegreesOperator()\n assert_is_type(d.I, RadiansOperator)\n\n\ndef test_diagonal_numexpr():\n diag = np.array([1, 2, 3])\n expr = '(data+1)*3'\n\n def func(broadcast, values):\n if broadcast == 'rightward':\n expected = (values.T*(diag.T+1)*3).T\n else:\n expected = values*(diag+1)*3\n op = DiagonalNumexprOperator(diag, expr, broadcast=broadcast)\n if broadcast in ('leftward', 'rightward'):\n assert op.broadcast == broadcast\n assert_is_none(op.shapein)\n else:\n assert op.broadcast == 'disabled'\n assert_eq(op.shapein, diag.shape)\n assert_eq(op.shapeout, diag.shape)\n assert_inplace_outplace(op, values, expected)\n for broadcast in (None, 'rightward', 'leftward', 'disabled'):\n for values in (np.array([3, 2, 1.]),\n np.array([[1, 2, 3], [2, 3, 4], [3, 4, 5.]])):\n if values.ndim > 1 and broadcast in (None, 'disabled'):\n continue\n yield func, broadcast, values\n\n\ndef test_diagonal_numexpr2():\n d1 = DiagonalNumexprOperator([1, 2, 3], '(data+1)*3',\n broadcast='rightward')\n d2 = DiagonalNumexprOperator([3, 2, 1], '(data+2)*2')\n d = d1 * d2\n assert_is_instance(d, DiagonalOperator)\n assert_eq(d.broadcast, 'disabled')\n assert_eq(d.data, [60, 72, 72])\n c = BlockColumnOperator(3*[IdentityOutplaceOperator()], new_axisout=0)\n v = [1, 2]\n assert_inplace_outplace(d1*c, v, d1(c(v)))\n\n\ndef test_diff_non_optimised():\n def func(shape, axis):\n dX = DifferenceOperator(axis=axis, shapein=shape)\n a = np.arange(product(shape)).reshape(shape)\n assert_eq(dX(a), np.diff(a, axis=axis))\n dX_dense = dX.todense()\n\n dXT_dense = dX.T.todense()\n assert_eq(dX_dense.T, dXT_dense)\n\n for shape in ((3,), (3, 4), (3, 4, 5), (3, 4, 5, 6)):\n for axis in range(len(shape)):\n yield func, shape, axis\n\n\ndef test_integration_trapeze():\n @pyoperators.flags.square\n class Op(Operator):\n \"\"\" output[i] = value ** (i + input[i]) \"\"\"\n def __init__(self, x):\n Operator.__init__(self, dtype=float)\n self.x = x\n\n def direct(self, input, output):\n output[...] = self.x ** (np.arange(input.size) + input)\n\n value = list(range(3))\n x = [0.5, 1, 2, 4]\n func_op = BlockColumnOperator([Op(_) for _ in x], new_axisout=0)\n eval_ = func_op(value)\n expected = np.trapz(eval_, x=x, axis=0)\n integ = IntegrationTrapezeOperator(x)(func_op)\n assert_same(integ(value), expected)\n\n\ndef test_radians():\n def func(dtype):\n d = RadiansOperator(dtype=dtype)\n assert_same(d(1), np.radians(np.ones((), dtype=dtype)))\n for dtype in (np.float16, np.float32, np.float64, np.float128):\n yield func, dtype\n\n\ndef test_radians_rules():\n d = RadiansOperator()\n assert_is_type(d.I, DegreesOperator)\n\n\ndef test_rotation_2d():\n def func(shape, degrees):\n angle = np.arange(product(shape)).reshape(shape)\n if degrees:\n angle_ = np.radians(angle)\n else:\n angle_ = angle\n angle_ = angle_.reshape(angle.size)\n r = Rotation2dOperator(angle, degrees=degrees)\n actual = r([1, 0]).reshape((angle.size, 2))\n expected = np.array([np.cos(angle_), np.sin(angle_)]).T\n assert_same(actual, expected)\n for shape in SHAPES:\n for degrees in False, True:\n yield func, shape, degrees\n\n\ndef test_rotation_3d_1axis():\n rx = Rotation3dOperator('X', 90, degrees=True)\n ry = Rotation3dOperator('Y', 90, degrees=True)\n rz = Rotation3dOperator('Z', 90, degrees=True)\n ref = [[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]]\n\n # single axis rotations\n exps = (\n [[1, 0, 0], [0, 0, 1], [0, -1, 0]],\n [[0, 0, -1], [0, 1, 0], [1, 0, 0]],\n [[0, 1, 0], [-1, 0, 0], [0, 0, 1]])\n\n def func(rot, exp):\n assert_allclose(rot(ref), exp, atol=1e-15)\n for rot, exp in zip((rx, ry, rz), exps):\n yield func, rot, exp\n\n\ndef test_rotation_3d_2axis():\n ref = [[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]]\n alpha = 0.1\n beta = 0.2\n\n # intrinsic rotations\n conventions = (\"XY'\", \"XZ'\", \"YX'\", \"YZ'\", \"ZX'\", \"ZY'\")\n\n def func(c):\n r = Rotation3dOperator(c, alpha, beta)\n r2 = Rotation3dOperator(c[0], alpha) * \\\n Rotation3dOperator(c[1], beta)\n assert_allclose(r(ref), r2(ref))\n for c in conventions:\n yield func, c\n\n # extrinsic rotations\n conventions = ('XY', 'XZ', 'YX', 'YZ', 'ZX', 'ZY')\n\n def func(c):\n r = Rotation3dOperator(c, alpha, beta)\n r2 = Rotation3dOperator(c[1], beta) * \\\n Rotation3dOperator(c[0], alpha)\n assert_allclose(r(ref), r2(ref))\n for c in conventions:\n yield func, c\n\n\ndef test_rotation_3d_3axis():\n ref = [[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]]\n alpha = 0.1\n beta = 0.2\n gamma = 0.3\n\n # intrinsic rotations\n conventions = (\"XZ'X''\", \"XZ'Y''\",\n \"XY'X''\", \"XY'Z''\",\n \"YX'Y''\", \"YX'Z''\",\n \"YZ'Y''\", \"YZ'X''\",\n \"ZY'Z''\", \"ZY'X''\",\n \"ZX'Z''\", \"ZX'Y''\")\n\n def func(c):\n r = Rotation3dOperator(c, alpha, beta, gamma)\n r2 = Rotation3dOperator(c[0], alpha) * \\\n Rotation3dOperator(c[1], beta) * \\\n Rotation3dOperator(c[3], gamma)\n assert_allclose(r(ref), r2(ref))\n for c in conventions:\n yield func, c\n\n # extrinsic rotations\n conventions = (\"XZX\", \"XZY\",\n \"XYX\", \"XYZ\",\n \"YXY\", \"YXZ\",\n \"YZY\", \"YZX\",\n \"ZYZ\", \"ZYX\",\n \"ZXZ\", \"ZXY\")\n\n def func(c):\n r = Rotation3dOperator(c, alpha, beta, gamma)\n r2 = Rotation3dOperator(c[2], gamma) * \\\n Rotation3dOperator(c[1], beta) * \\\n Rotation3dOperator(c[0], alpha)\n assert_allclose(r(ref), r2(ref))\n for c in conventions:\n yield func, c\n\n\ndef test_sum_operator():\n for s in SHAPES[1:]:\n for a in [None] + list(range(len(s))):\n op = SumOperator(axis=a)\n d = op.todense(shapein=s)\n t = op.T.todense(shapeout=s)\n assert_eq(d, t.T)\n\n\ndef test_symmetric_band_toeplitz_operator():\n def totoeplitz(n, firstrow):\n if isinstance(n, tuple):\n n_ = n[-1]\n return BlockDiagonalOperator(\n [totoeplitz(n_, f_) for f_ in firstrow], new_axisin=0)\n ncorr = len(firstrow) - 1\n dense = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n if abs(i-j) <= ncorr:\n dense[i, j] = firstrow[abs(i-j)]\n return DenseOperator(dense, shapein=dense.shape[1])\n\n def func(n, firstrow):\n s = SymmetricBandToeplitzOperator(n, firstrow)\n if firstrow == [1] or firstrow == [[2], [1]]:\n assert_is_instance(s, DiagonalOperator)\n assert_same(s.todense(), totoeplitz(n, firstrow).todense(), atol=1)\n\n for n in [2, 3, 4, 5]:\n for firstrow in ([1], [2, 1]):\n yield func, n, firstrow\n for n in ((2, _) for _ in [2, 3, 4, 5]):\n for firstrow in ([[2], [1]], [[2, 1], [3, 2]]):\n yield func, n, firstrow\n\n\ndef test_tridiagonal_operator():\n values = (\n ([1, 1, 0], [2, 1], [2, 2]),\n ([1, 1, 2], [2, 1], None),\n ([1j, 1, 0], [2, 1], [-1j, 2]),\n ([1, 1j, 2], [2j, 1], None))\n expected = ([[1, 2, 0],\n [2, 1, 2],\n [0, 1, 0]],\n [[1, 2, 0],\n [2, 1, 1],\n [0, 1, 2]],\n [[1j,-1j, 0],\n [ 2, 1, 2],\n [ 0, 1, 0]],\n [[ 1,-2j, 0],\n [2j, 1j, 1],\n [ 0, 1, 2]])\n\n def func(v, e):\n o = TridiagonalOperator(v[0], v[1], v[2])\n assert_eq(o.todense(), e)\n assert_eq(o.T.todense(), e.T)\n assert_eq(o.C.todense(), e.conj())\n assert_eq(o.H.todense(), e.T.conj())\n for v, e in zip(values, expected):\n yield func, v, np.array(e)\n" ]
[ [ "numpy.dtype", "numpy.array", "numpy.conjugate" ], [ "numpy.radians", "numpy.arange", "numpy.cos", "numpy.ones", "numpy.sin", "numpy.diff", "numpy.array", "numpy.zeros", "numpy.trapz" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ClandininLab/multistim
[ "9aba24a85317e4cbd81555be45c4df87cfad035c", "9aba24a85317e4cbd81555be45c4df87cfad035c" ]
[ "visanalysis/visanalysis/analysis/volumetric_data.py", "visprotocol/visprotocol/clandinin_data.py" ]
[ "from visanalysis.analysis import imaging_data\nimport numpy as np\nfrom scipy import stats\nimport nibabel as nib\n\n\nclass VolumetricDataObject(imaging_data.ImagingDataObject):\n def __init__(self, file_path, series_number, quiet=False):\n super().__init__(file_path, series_number, quiet=quiet)\n\n def getTrialAlignedVoxelResponses(self, voxels, dff=False):\n n_voxels, t_dim = voxels.shape\n\n # zero values are from registration. Replace with nan\n voxels[np.where(voxels == 0)] = np.nan\n # set to minimum\n voxels[np.where(np.isnan(voxels))] = np.nanmin(voxels)\n\n stimulus_start_times = self.getStimulusTiming()['stimulus_start_times'] # sec\n pre_time = self.getRunParameters()['pre_time'] # sec\n stim_time = self.getRunParameters()['stim_time'] # sec\n tail_time = self.getRunParameters()['tail_time'] # sec\n epoch_start_times = stimulus_start_times - pre_time\n epoch_end_times = stimulus_start_times + stim_time + tail_time\n epoch_time = pre_time + stim_time + tail_time # sec\n\n sample_period = self.getResponseTiming()['sample_period'] # sec\n stack_times = self.getResponseTiming()['time_vector'] # sec\n\n # find how many acquisition frames correspond to pre, stim, tail time\n epoch_frames = int(epoch_time / sample_period) # in acquisition frames\n pre_frames = int(pre_time / sample_period) # in acquisition frames\n tail_frames = int(tail_time / sample_period)\n time_vector = np.arange(0, epoch_frames) * sample_period # sec\n\n no_trials = len(epoch_start_times)\n voxel_trial_matrix = np.ndarray(shape=(n_voxels, epoch_frames, no_trials), dtype='float32') #n_voxels, time_vector, trials\n voxel_trial_matrix[:] = np.nan\n cut_inds = np.empty(0, dtype=int)\n for idx, val in enumerate(epoch_start_times):\n stack_inds = np.where(np.logical_and(stack_times < epoch_end_times[idx], stack_times >= epoch_start_times[idx]))[0]\n if len(stack_inds) == 0: # no imaging acquisitions happened during this epoch presentation\n cut_inds = np.append(cut_inds, idx)\n continue\n if np.any(stack_inds > voxels.shape[1]):\n cut_inds = np.append(cut_inds, idx)\n continue\n if idx == no_trials:\n if len(stack_inds) < epoch_frames: # missed images for the end of the stimulus\n cut_inds = np.append(cut_inds, idx)\n continue\n\n # Get voxel responses for this epoch\n new_resp_chunk = voxels[:, stack_inds] # voxel X time\n\n if dff:\n # calculate baseline using pre frames and last half of tail frames\n baseline_pre = new_resp_chunk[:, 0:pre_frames]\n baseline_tail = new_resp_chunk[:, -int(tail_frames/2):]\n baseline = np.mean(np.concatenate((baseline_pre, baseline_tail), axis=1), axis=1, keepdims=True)\n # to dF/F\n new_resp_chunk = (new_resp_chunk - baseline) / baseline;\n\n try:\n voxel_trial_matrix[:, :, idx] = new_resp_chunk[:, 0:epoch_frames]\n except:\n print('Size mismatch idx = {}'.format(idx)) # the end of a response clipped off\n cut_inds = np.append(cut_inds, idx)\n\n voxel_trial_matrix = np.delete(voxel_trial_matrix, cut_inds, axis=2) # shape = (voxel, time, trial)\n\n return time_vector, voxel_trial_matrix\n\n def getMeanBrainByStimulus(self, voxel_trial_matrix, parameter_key=None):\n run_parameters = self.getRunParameters()\n response_timing = self.getResponseTiming()\n epoch_parameters = self.getEpochParameters()\n\n if parameter_key is None:\n parameter_values = [list(pd.values()) for pd in self.getEpochParameterDicts()]\n elif type(parameter_key) is dict: #for composite stims like panglom suite\n parameter_values = []\n for ind_e, ep in enumerate(epoch_parameters):\n component_stim_type = ep.get('component_stim_type')\n e_params = [component_stim_type]\n param_keys = parameter_key[component_stim_type]\n for pk in param_keys:\n e_params.append(ep.get(pk))\n\n parameter_values.append(e_params)\n else:\n parameter_values = [ep.get(parameter_key) for ep in epoch_parameters]\n\n unique_parameter_values = np.unique(np.array(parameter_values, dtype='object'))\n n_stimuli = len(unique_parameter_values)\n\n pre_frames = int(run_parameters['pre_time'] / response_timing.get('sample_period'))\n stim_frames = int(run_parameters['stim_time'] / response_timing.get('sample_period'))\n tail_frames = int(run_parameters['tail_time'] / response_timing.get('sample_period'))\n\n n_voxels, t_dim, trials = voxel_trial_matrix.shape\n\n mean_voxel_response = np.ndarray(shape=(n_voxels, t_dim, n_stimuli)) # voxels x time x stim condition\n p_values = np.ndarray(shape=(n_voxels, n_stimuli))\n response_amp = np.ndarray(shape=(n_voxels, n_stimuli)) # mean voxel resp for each stim condition (voxel x stim)\n trial_response_amp = [] # list (len=n_stimuli), each entry is ndarray of mean response amplitudes (voxels x trials)\n trial_response_by_stimulus = [] # list (len=n_stimuli), each entry is ndarray of trial response (voxel x time x trial)\n\n for p_ind, up in enumerate(unique_parameter_values):\n pull_inds = np.where([up == x for x in parameter_values])[0]\n\n if np.any(pull_inds >= voxel_trial_matrix.shape[2]):\n tmp = np.where(pull_inds >= voxel_trial_matrix.shape[2])[0]\n print('Epoch(s) {} not included in voxel_trial_matrix'.format(pull_inds[tmp]))\n pull_inds = pull_inds[pull_inds < voxel_trial_matrix.shape[2]]\n\n baseline_pts = np.concatenate((voxel_trial_matrix[:, 0:pre_frames, pull_inds],\n voxel_trial_matrix[:, -int(tail_frames/2):, pull_inds]), axis=1)\n response_pts = voxel_trial_matrix[:, pre_frames:(pre_frames+stim_frames), pull_inds]\n\n _, p_values[:, p_ind] = stats.ttest_ind(np.reshape(baseline_pts, (n_voxels, -1)),\n np.reshape(response_pts, (n_voxels, -1)), axis=1)\n\n trial_response_amp.append(np.nanmean(response_pts, axis=1)) # each list entry = timee average. (voxels x trials)\n\n response_amp[:, p_ind] = np.mean(response_pts, axis=(1, 2))\n\n mean_voxel_response[:, :, p_ind] = (np.mean(voxel_trial_matrix[:, :, pull_inds], axis=2))\n trial_response_by_stimulus.append(voxel_trial_matrix[:, :, pull_inds])\n\n return mean_voxel_response, unique_parameter_values, p_values, response_amp, trial_response_amp, trial_response_by_stimulus\n\n\ndef loadFunctionalBrain(file_path, x_lim=[0, None], y_lim=[0, None], z_lim=[0, None], t_lim=[0, None], channel=1):\n brain = nib.load(file_path).get_fdata()\n if len(brain.shape) > 4: # multi-channel xyztc\n brain = brain[x_lim[0]:x_lim[1], y_lim[0]:y_lim[1], z_lim[0]:z_lim[1], t_lim[0]:t_lim[1], channel]\n # print('Loaded channel {} of xyztc brain {}'.format(channel, file_path))\n else: # single channel xyzt\n brain = brain[x_lim[0]:x_lim[1], y_lim[0]:y_lim[1], z_lim[0]:z_lim[1], t_lim[0]:t_lim[1]]\n # print('Loaded single channel xyzt brain {}'.format(file_path))\n\n return brain\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nData file class\n\nData File structure is:\nyyyy-mm-dd\n Client\n Flies\n Fly_n\n epoch_runs\n series_00n (attrs = protocol_parameters)\n acquisition\n epochs\n epoch_001 (attrs = epoch_parameters, convenience_parameters)\n epoch_002\n rois\n stimulus_timing\n Notes\n\n\"\"\"\nimport h5py\nimport os\nimport inspect\nimport yaml\nfrom datetime import datetime\nimport numpy as np\nimport visprotocol\n\n\nclass Data():\n def __init__(self, cfg):\n self.experiment_file_name = None\n self.series_count = 1\n self.fly_metadata = {} # populated in GUI or user protocol\n self.current_fly = None\n self.user_name = cfg.get('user_name')\n self.rig_name = cfg.get('rig_name')\n self.cfg = cfg\n\n # # # Metadata defaults # # #\n self.experimenter = self.cfg.get('experimenter', '')\n\n # # # Lists of fly metadata # # #\n self.prepChoices = self.cfg.get('prep_choices', [])\n self.driverChoices = self.cfg.get('driver_choices', [])\n self.indicatorChoices = self.cfg.get('indicator_choices', [])\n\n # load rig-specific metadata things\n self.data_directory = self.cfg.get('rig_config').get(self.rig_name).get('data_directory', os.getcwd())\n self.rig = self.cfg.get('rig_config').get(self.rig_name).get('rig', '(rig)')\n self.screen_center = self.cfg.get('rig_config').get(self.rig_name).get('screen_center', [0, 0])\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# # # # # # # # # Creating experiment file and groups # # # # # # # # # # # #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n def initializeExperimentFile(self):\n \"\"\"\n Create HDF5 data file and initialize top-level hierarchy nodes\n \"\"\"\n with h5py.File(os.path.join(self.data_directory, self.experiment_file_name + '.hdf5'), 'w-') as experiment_file:\n # Experiment date/time\n init_now = datetime.now()\n date = init_now.isoformat()[:-16]\n init_time = init_now.strftime(\"%H:%M:%S\")\n\n # Write experiment metadata as top-level attributes\n experiment_file.attrs['date'] = date\n experiment_file.attrs['init_time'] = init_time\n experiment_file.attrs['data_directory'] = self.data_directory\n experiment_file.attrs['experimenter'] = self.experimenter\n experiment_file.attrs['rig'] = self.rig\n\n # Create a top-level group for epoch runs and user-entered notes\n experiment_file.create_group('Client')\n experiment_file.create_group('Flies')\n experiment_file.create_group('Notes')\n\n def createFly(self, fly_metadata):\n \"\"\"\n \"\"\"\n if fly_metadata.get('fly_id') in [x.get('fly_id') for x in self.getExistingFlyData()]:\n print('A fly with this ID already exists')\n return\n\n if self.experimentFileExists():\n with h5py.File(os.path.join(self.data_directory, self.experiment_file_name + '.hdf5'), 'r+') as experiment_file:\n fly_init_time = datetime.now().strftime('%H:%M:%S.%f')[:-4]\n flies_group = experiment_file['/Flies']\n new_fly = flies_group.create_group(fly_metadata.get('fly_id'))\n new_fly.attrs['init_time'] = fly_init_time\n for key in fly_metadata:\n new_fly.attrs[key] = fly_metadata.get(key)\n\n new_fly.create_group('epoch_runs')\n\n self.selectFly(fly_metadata.get('fly_id'))\n else:\n print('Initialize a data file before defining a fly')\n\n def createEpochRun(self, protocol_object):\n \"\"\"\"\n \"\"\"\n # create a new epoch run group in the data file\n if (self.currentFlyExists() and self.experimentFileExists()):\n with h5py.File(os.path.join(self.data_directory, self.experiment_file_name + '.hdf5'), 'r+') as experiment_file:\n run_start_time = datetime.now().strftime('%H:%M:%S.%f')[:-4]\n fly_group = experiment_file['/Flies/{}/epoch_runs'.format(self.current_fly)]\n new_epoch_run = fly_group.create_group('series_{}'.format(str(self.series_count).zfill(3)))\n new_epoch_run.attrs['run_start_time'] = run_start_time\n for key in protocol_object.run_parameters: # add run parameter attributes\n new_epoch_run.attrs[key] = protocol_object.run_parameters[key]\n\n for key in protocol_object.protocol_parameters: # add user-entered protocol params\n new_epoch_run.attrs[key] = protocol_object.protocol_parameters[key]\n\n # add subgroups:\n new_epoch_run.create_group('acquisition')\n new_epoch_run.create_group('epochs')\n new_epoch_run.create_group('rois')\n new_epoch_run.create_group('stimulus_timing')\n\n else:\n print('Create a data file and/or define a fly first')\n\n def createEpoch(self, protocol_object):\n \"\"\"\n \"\"\"\n if (self.currentFlyExists() and self.experimentFileExists()):\n with h5py.File(os.path.join(self.data_directory, self.experiment_file_name + '.hdf5'), 'r+') as experiment_file:\n epoch_time = datetime.now().strftime('%H:%M:%S.%f')[:-4]\n epoch_run_group = experiment_file['/Flies/{}/epoch_runs/series_{}/epochs'.format(self.current_fly, str(self.series_count).zfill(3))]\n new_epoch = epoch_run_group.create_group('epoch_{}'.format(str(protocol_object.num_epochs_completed+1).zfill(3)))\n new_epoch.attrs['epoch_time'] = epoch_time\n\n epochParametersGroup = new_epoch\n if type(protocol_object.epoch_parameters) is tuple: # stimulus is tuple of multiple stims layered on top of one another\n num_stims = len(protocol_object.epoch_parameters)\n for stim_ind in range(num_stims):\n for key in protocol_object.epoch_parameters[stim_ind]:\n newValue = protocol_object.epoch_parameters[stim_ind][key]\n if type(newValue) is dict:\n newValue = str(newValue)\n prefix = 'stim' + str(stim_ind) + '_'\n if newValue is None:\n newValue = 'None'\n epochParametersGroup.attrs[prefix + key] = newValue\n\n elif type(protocol_object.epoch_parameters) is dict:\n for key in protocol_object.epoch_parameters: # save out epoch parameters\n newValue = protocol_object.epoch_parameters[key]\n if type(newValue) is dict: # TODO: Find a way to split this into subgroups. Hacky work around.\n newValue = str(newValue)\n if newValue is None:\n newValue = 'None'\n epochParametersGroup.attrs[key] = newValue\n\n convenienceParametersGroup = new_epoch\n for key in protocol_object.convenience_parameters: # save out convenience parameters\n convenienceParametersGroup.attrs[key] = protocol_object.convenience_parameters[key]\n\n else:\n print('Create a data file and/or define a fly first')\n\n def createNote(self, noteText):\n \"\"\n \"\"\n if self.experimentFileExists():\n with h5py.File(os.path.join(self.data_directory, self.experiment_file_name + '.hdf5'), 'r+') as experiment_file:\n noteTime = datetime.now().strftime('%H:%M:%S.%f')[:-4]\n notes = experiment_file['/Notes']\n notes.attrs[noteTime] = noteText\n else:\n print('Initialize a data file before writing a note')\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# # # # # # # # # Retrieve / query data file # # # # # # # # # # # # # # # # #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n def experimentFileExists(self):\n if self.experiment_file_name is None:\n tf = False\n else:\n tf = os.path.isfile(os.path.join(self.data_directory, self.experiment_file_name + '.hdf5'))\n return tf\n\n def currentFlyExists(self):\n if self.current_fly is None:\n tf = False\n else:\n tf = True\n return tf\n\n def getExistingSeries(self):\n all_series = []\n with h5py.File(os.path.join(self.data_directory, self.experiment_file_name + '.hdf5'), 'r') as experiment_file:\n for fly_id in list(experiment_file['/Flies'].keys()):\n new_series = list(experiment_file['/Flies/{}/epoch_runs'.format(fly_id)].keys())\n all_series.append(new_series)\n all_series = [val for s in all_series for val in s]\n series = [int(x.split('_')[-1]) for x in all_series]\n return series\n\n def getHighestSeriesCount(self):\n series = self.getExistingSeries()\n if len(series) == 0:\n return 0\n else:\n return np.max(series)\n\n def getExistingFlyData(self):\n # return list of dicts for fly metadata already present in experiment file\n fly_data_list = []\n if self.experimentFileExists():\n with h5py.File(os.path.join(self.data_directory, self.experiment_file_name + '.hdf5'), 'r') as experiment_file:\n for fly in experiment_file['/Flies']:\n new_fly = experiment_file['/Flies'][fly]\n new_dict = {}\n for at in new_fly.attrs:\n new_dict[at] = new_fly.attrs[at]\n\n fly_data_list.append(new_dict)\n return fly_data_list\n\n def selectFly(self, fly_id):\n self.current_fly = fly_id\n\n def advanceSeriesCount(self):\n self.series_count += 1\n\n def updateSeriesCount(self, val):\n self.series_count = val\n\n def getSeriesCount(self):\n return self.series_count\n\n def reloadSeriesCount(self):\n all_series = []\n with h5py.File(os.path.join(self.data_directory, self.experiment_file_name + '.hdf5'), 'r') as experiment_file:\n for fly_id in list(experiment_file['/Flies'].keys()):\n new_series = list(experiment_file['/Flies/{}/epoch_runs'.format(fly_id)].keys())\n all_series.append(new_series)\n all_series = [val for s in all_series for val in s]\n series = [int(x.split('_')[-1]) for x in all_series]\n\n if len(series) == 0:\n self.series_count = 0 + 1\n else:\n self.series_count = np.max(series) + 1\n\n\nclass AODscopeData(Data):\n def __init__(self, cfg):\n super().__init__(cfg)\n self.poi_scan = True\n self.poi_count = 1\n self.xyt_count = 1\n\n def advanceSeriesCount(self):\n self.series_count += 1\n if self.poi_scan:\n self.poi_count += 1\n else:\n self.xyt_count += 1\n\n def updateSeriesCount(self, val):\n if self.poi_scan:\n self.poi_count = val\n else:\n self.xyt_count = val\n\n def getSeriesCount(self):\n if self.poi_scan:\n return self.poi_count\n else:\n return self.xyt_count\n\n def getExistingSeries(self):\n poi_series = []\n xyt_series = []\n with h5py.File(os.path.join(self.data_directory, self.experiment_file_name + '.hdf5'), 'r') as experiment_file:\n for fly_id in list(experiment_file['/Flies'].keys()):\n for series_id in experiment_file['/Flies/{}/epoch_runs'.format(fly_id)]:\n acq_group = experiment_file['/Flies/{}/epoch_runs/{}/acquisition'.format(fly_id, series_id)]\n if acq_group.attrs['poi_scan']:\n poi_series.append(acq_group.attrs['poi_count'])\n else:\n xyt_series.append(acq_group.attrs['xyt_count'])\n\n poi_series = [int(x) for x in poi_series]\n xyt_series = [int(x) for x in xyt_series]\n\n if self.poi_scan:\n return poi_series\n else:\n return xyt_series\n\n def createEpochRun(self, protocol_object):\n \"\"\"\"\n \"\"\"\n # create a new epoch run group in the data file\n if (self.currentFlyExists() and self.experimentFileExists()):\n with h5py.File(os.path.join(self.data_directory, self.experiment_file_name + '.hdf5'), 'r+') as experiment_file:\n run_start_time = datetime.now().strftime('%H:%M:%S.%f')[:-4]\n fly_group = experiment_file['/Flies/{}/epoch_runs'.format(self.current_fly)]\n new_epoch_run = fly_group.create_group('series_{}'.format(str(self.series_count).zfill(3)))\n new_epoch_run.attrs['run_start_time'] = run_start_time\n for key in protocol_object.run_parameters: # add run parameter attributes\n new_epoch_run.attrs[key] = protocol_object.run_parameters[key]\n\n for key in protocol_object.protocol_parameters: # add user-entered protocol params\n new_epoch_run.attrs[key] = protocol_object.protocol_parameters[key]\n\n # add subgroups:\n new_epoch_run.create_group('acquisition')\n new_epoch_run.create_group('epochs')\n new_epoch_run.create_group('rois')\n new_epoch_run.create_group('stimulus_timing')\n\n # AODscope-specific data stuff:\n new_epoch_run['acquisition'].attrs['poi_scan'] = self.poi_scan\n if self.poi_scan:\n new_epoch_run['acquisition'].attrs['poi_count'] = self.poi_count\n else:\n new_epoch_run['acquisition'].attrs['xyt_count'] = self.xyt_count\n else:\n print('Create a data file and/or define a fly first')\n" ]
[ [ "numpy.logical_and", "numpy.reshape", "numpy.arange", "numpy.isnan", "numpy.nanmin", "numpy.ndarray", "numpy.concatenate", "numpy.delete", "numpy.append", "numpy.mean", "numpy.any", "numpy.nanmean", "numpy.array", "numpy.where", "numpy.empty" ], [ "numpy.max" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nadgeri14/allennlp
[ "2eefffaf71612263a1c20e8ce4107849cfd5efe3", "2eefffaf71612263a1c20e8ce4107849cfd5efe3", "2eefffaf71612263a1c20e8ce4107849cfd5efe3", "2eefffaf71612263a1c20e8ce4107849cfd5efe3", "2eefffaf71612263a1c20e8ce4107849cfd5efe3" ]
[ "allennlp/modules/seq2seq_decoders/lstm_cell_decoder_net.py", "allennlp/models/decomposable_attention.py", "scripts/write_srl_predictions_to_conll_format.py", "allennlp/tests/common/from_params_test.py", "allennlp/tests/training/metrics/spearman_correlation_test.py" ]
[ "from typing import Tuple, Dict, Optional\nfrom overrides import overrides\n\nimport torch\nfrom torch.nn import LSTMCell\n\nfrom allennlp.modules import Attention\nfrom allennlp.modules.seq2seq_decoders.decoder_net import DecoderNet\nfrom allennlp.nn import util\n\n\[email protected](\"lstm_cell\")\nclass LstmCellDecoderNet(DecoderNet):\n \"\"\"\n This decoder net implements simple decoding network with LSTMCell and Attention.\n\n # Parameters\n\n decoding_dim : ``int``, required\n Defines dimensionality of output vectors.\n target_embedding_dim : ``int``, required\n Defines dimensionality of input target embeddings. Since this model takes it's output on a previous step\n as input of following step, this is also an input dimensionality.\n attention : ``Attention``, optional (default = None)\n If you want to use attention to get a dynamic summary of the encoder outputs at each step\n of decoding, this is the function used to compute similarity between the decoder hidden\n state and encoder outputs.\n \"\"\"\n\n def __init__(\n self,\n decoding_dim: int,\n target_embedding_dim: int,\n attention: Optional[Attention] = None,\n bidirectional_input: bool = False,\n ) -> None:\n\n super().__init__(\n decoding_dim=decoding_dim,\n target_embedding_dim=target_embedding_dim,\n decodes_parallel=False,\n )\n\n # In this particular type of decoder output of previous step passes directly to the input of current step\n # We also assume that decoder output dimensionality is equal to the encoder output dimensionality\n decoder_input_dim = self.target_embedding_dim\n\n # Attention mechanism applied to the encoder output for each step.\n self._attention = attention\n\n if self._attention:\n # If using attention, a weighted average over encoder outputs will be concatenated\n # to the previous target embedding to form the input to the decoder at each\n # time step. encoder output dim will be same as decoding_dim\n decoder_input_dim += decoding_dim\n\n # We'll use an LSTM cell as the recurrent cell that produces a hidden state\n # for the decoder at each time step.\n self._decoder_cell = LSTMCell(decoder_input_dim, self.decoding_dim)\n self._bidirectional_input = bidirectional_input\n\n def _prepare_attended_input(\n self,\n decoder_hidden_state: torch.Tensor = None,\n encoder_outputs: torch.Tensor = None,\n encoder_outputs_mask: torch.Tensor = None,\n ) -> torch.Tensor:\n \"\"\"Apply attention over encoder outputs and decoder state.\"\"\"\n # Ensure mask is also a FloatTensor. Or else the multiplication within\n # attention will complain.\n # shape: (batch_size, max_input_sequence_length, encoder_output_dim)\n encoder_outputs_mask = encoder_outputs_mask.float()\n\n # shape: (batch_size, max_input_sequence_length)\n input_weights = self._attention(decoder_hidden_state, encoder_outputs, encoder_outputs_mask)\n\n # shape: (batch_size, encoder_output_dim)\n attended_input = util.weighted_sum(encoder_outputs, input_weights)\n\n return attended_input\n\n def init_decoder_state(\n self, encoder_out: Dict[str, torch.LongTensor]\n ) -> Dict[str, torch.Tensor]:\n\n batch_size, _ = encoder_out[\"source_mask\"].size()\n\n # Initialize the decoder hidden state with the final output of the encoder,\n # and the decoder context with zeros.\n # shape: (batch_size, encoder_output_dim)\n final_encoder_output = util.get_final_encoder_states(\n encoder_out[\"encoder_outputs\"],\n encoder_out[\"source_mask\"],\n bidirectional=self._bidirectional_input,\n )\n\n return {\n \"decoder_hidden\": final_encoder_output, # shape: (batch_size, decoder_output_dim)\n \"decoder_context\": final_encoder_output.new_zeros(batch_size, self.decoding_dim)\n # shape: (batch_size, decoder_output_dim)\n }\n\n @overrides\n def forward(\n self,\n previous_state: Dict[str, torch.Tensor],\n encoder_outputs: torch.Tensor,\n source_mask: torch.Tensor,\n previous_steps_predictions: torch.Tensor,\n previous_steps_mask: Optional[torch.Tensor] = None,\n ) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:\n\n decoder_hidden = previous_state[\"decoder_hidden\"]\n decoder_context = previous_state[\"decoder_context\"]\n\n # shape: (group_size, output_dim)\n last_predictions_embedding = previous_steps_predictions[:, -1]\n\n if self._attention:\n # shape: (group_size, encoder_output_dim)\n attended_input = self._prepare_attended_input(\n decoder_hidden, encoder_outputs, source_mask\n )\n\n # shape: (group_size, decoder_output_dim + target_embedding_dim)\n decoder_input = torch.cat((attended_input, last_predictions_embedding), -1)\n else:\n # shape: (group_size, target_embedding_dim)\n decoder_input = last_predictions_embedding\n\n # shape (decoder_hidden): (batch_size, decoder_output_dim)\n # shape (decoder_context): (batch_size, decoder_output_dim)\n decoder_hidden, decoder_context = self._decoder_cell(\n decoder_input, (decoder_hidden, decoder_context)\n )\n\n return (\n {\"decoder_hidden\": decoder_hidden, \"decoder_context\": decoder_context},\n decoder_hidden,\n )\n", "from typing import Dict, Optional, List, Any\n\nimport torch\n\nfrom allennlp.common.checks import check_dimensions_match\nfrom allennlp.data import TextFieldTensors, Vocabulary\nfrom allennlp.models.model import Model\nfrom allennlp.modules import FeedForward\nfrom allennlp.modules import Seq2SeqEncoder, SimilarityFunction, TimeDistributed, TextFieldEmbedder\nfrom allennlp.modules.matrix_attention.legacy_matrix_attention import LegacyMatrixAttention\nfrom allennlp.nn import InitializerApplicator, RegularizerApplicator\nfrom allennlp.nn.util import get_text_field_mask, masked_softmax, weighted_sum\nfrom allennlp.training.metrics import CategoricalAccuracy\n\n\[email protected](\"decomposable_attention\")\nclass DecomposableAttention(Model):\n \"\"\"\n This ``Model`` implements the Decomposable Attention model described in [A Decomposable\n Attention Model for Natural Language Inference](\n https://www.semanticscholar.org/paper/A-Decomposable-Attention-Model-for-Natural-Languag-Parikh-T%C3%A4ckstr%C3%B6m/07a9478e87a8304fc3267fa16e83e9f3bbd98b27)\n by Parikh et al., 2016, with some optional enhancements before the decomposable attention\n actually happens. Parikh's original model allowed for computing an \"intra-sentence\" attention\n before doing the decomposable entailment step. We generalize this to any\n :class:`Seq2SeqEncoder` that can be applied to the premise and/or the hypothesis before\n computing entailment.\n\n The basic outline of this model is to get an embedded representation of each word in the\n premise and hypothesis, align words between the two, compare the aligned phrases, and make a\n final entailment decision based on this aggregated comparison. Each step in this process uses\n a feedforward network to modify the representation.\n\n # Parameters\n\n vocab : ``Vocabulary``\n text_field_embedder : ``TextFieldEmbedder``\n Used to embed the ``premise`` and ``hypothesis`` ``TextFields`` we get as input to the\n model.\n attend_feedforward : ``FeedForward``\n This feedforward network is applied to the encoded sentence representations before the\n similarity matrix is computed between words in the premise and words in the hypothesis.\n similarity_function : ``SimilarityFunction``\n This is the similarity function used when computing the similarity matrix between words in\n the premise and words in the hypothesis.\n compare_feedforward : ``FeedForward``\n This feedforward network is applied to the aligned premise and hypothesis representations,\n individually.\n aggregate_feedforward : ``FeedForward``\n This final feedforward network is applied to the concatenated, summed result of the\n ``compare_feedforward`` network, and its output is used as the entailment class logits.\n premise_encoder : ``Seq2SeqEncoder``, optional (default=``None``)\n After embedding the premise, we can optionally apply an encoder. If this is ``None``, we\n will do nothing.\n hypothesis_encoder : ``Seq2SeqEncoder``, optional (default=``None``)\n After embedding the hypothesis, we can optionally apply an encoder. If this is ``None``,\n we will use the ``premise_encoder`` for the encoding (doing nothing if ``premise_encoder``\n is also ``None``).\n initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)\n Used to initialize the model parameters.\n regularizer : ``RegularizerApplicator``, optional (default=``None``)\n If provided, will be used to calculate the regularization penalty during training.\n \"\"\"\n\n def __init__(\n self,\n vocab: Vocabulary,\n text_field_embedder: TextFieldEmbedder,\n attend_feedforward: FeedForward,\n similarity_function: SimilarityFunction,\n compare_feedforward: FeedForward,\n aggregate_feedforward: FeedForward,\n premise_encoder: Optional[Seq2SeqEncoder] = None,\n hypothesis_encoder: Optional[Seq2SeqEncoder] = None,\n initializer: InitializerApplicator = InitializerApplicator(),\n regularizer: Optional[RegularizerApplicator] = None,\n ) -> None:\n super().__init__(vocab, regularizer)\n\n self._text_field_embedder = text_field_embedder\n self._attend_feedforward = TimeDistributed(attend_feedforward)\n self._matrix_attention = LegacyMatrixAttention(similarity_function)\n self._compare_feedforward = TimeDistributed(compare_feedforward)\n self._aggregate_feedforward = aggregate_feedforward\n self._premise_encoder = premise_encoder\n self._hypothesis_encoder = hypothesis_encoder or premise_encoder\n\n self._num_labels = vocab.get_vocab_size(namespace=\"labels\")\n\n check_dimensions_match(\n text_field_embedder.get_output_dim(),\n attend_feedforward.get_input_dim(),\n \"text field embedding dim\",\n \"attend feedforward input dim\",\n )\n check_dimensions_match(\n aggregate_feedforward.get_output_dim(),\n self._num_labels,\n \"final output dimension\",\n \"number of labels\",\n )\n\n self._accuracy = CategoricalAccuracy()\n self._loss = torch.nn.CrossEntropyLoss()\n\n initializer(self)\n\n def forward( # type: ignore\n self,\n premise: TextFieldTensors,\n hypothesis: TextFieldTensors,\n label: torch.IntTensor = None,\n metadata: List[Dict[str, Any]] = None,\n ) -> Dict[str, torch.Tensor]:\n\n \"\"\"\n # Parameters\n\n premise : TextFieldTensors\n From a ``TextField``\n hypothesis : TextFieldTensors\n From a ``TextField``\n label : torch.IntTensor, optional, (default = None)\n From a ``LabelField``\n metadata : ``List[Dict[str, Any]]``, optional, (default = None)\n Metadata containing the original tokenization of the premise and\n hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively.\n # Returns\n\n An output dictionary consisting of:\n\n label_logits : torch.FloatTensor\n A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log\n probabilities of the entailment label.\n label_probs : torch.FloatTensor\n A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the\n entailment label.\n loss : torch.FloatTensor, optional\n A scalar loss to be optimised.\n \"\"\"\n embedded_premise = self._text_field_embedder(premise)\n embedded_hypothesis = self._text_field_embedder(hypothesis)\n premise_mask = get_text_field_mask(premise).float()\n hypothesis_mask = get_text_field_mask(hypothesis).float()\n\n if self._premise_encoder:\n embedded_premise = self._premise_encoder(embedded_premise, premise_mask)\n if self._hypothesis_encoder:\n embedded_hypothesis = self._hypothesis_encoder(embedded_hypothesis, hypothesis_mask)\n\n projected_premise = self._attend_feedforward(embedded_premise)\n projected_hypothesis = self._attend_feedforward(embedded_hypothesis)\n # Shape: (batch_size, premise_length, hypothesis_length)\n similarity_matrix = self._matrix_attention(projected_premise, projected_hypothesis)\n\n # Shape: (batch_size, premise_length, hypothesis_length)\n p2h_attention = masked_softmax(similarity_matrix, hypothesis_mask)\n # Shape: (batch_size, premise_length, embedding_dim)\n attended_hypothesis = weighted_sum(embedded_hypothesis, p2h_attention)\n\n # Shape: (batch_size, hypothesis_length, premise_length)\n h2p_attention = masked_softmax(similarity_matrix.transpose(1, 2).contiguous(), premise_mask)\n # Shape: (batch_size, hypothesis_length, embedding_dim)\n attended_premise = weighted_sum(embedded_premise, h2p_attention)\n\n premise_compare_input = torch.cat([embedded_premise, attended_hypothesis], dim=-1)\n hypothesis_compare_input = torch.cat([embedded_hypothesis, attended_premise], dim=-1)\n\n compared_premise = self._compare_feedforward(premise_compare_input)\n compared_premise = compared_premise * premise_mask.unsqueeze(-1)\n # Shape: (batch_size, compare_dim)\n compared_premise = compared_premise.sum(dim=1)\n\n compared_hypothesis = self._compare_feedforward(hypothesis_compare_input)\n compared_hypothesis = compared_hypothesis * hypothesis_mask.unsqueeze(-1)\n # Shape: (batch_size, compare_dim)\n compared_hypothesis = compared_hypothesis.sum(dim=1)\n\n aggregate_input = torch.cat([compared_premise, compared_hypothesis], dim=-1)\n label_logits = self._aggregate_feedforward(aggregate_input)\n label_probs = torch.nn.functional.softmax(label_logits, dim=-1)\n\n output_dict = {\n \"label_logits\": label_logits,\n \"label_probs\": label_probs,\n \"h2p_attention\": h2p_attention,\n \"p2h_attention\": p2h_attention,\n }\n\n if label is not None:\n loss = self._loss(label_logits, label.long().view(-1))\n self._accuracy(label_logits, label)\n output_dict[\"loss\"] = loss\n\n if metadata is not None:\n output_dict[\"premise_tokens\"] = [x[\"premise_tokens\"] for x in metadata]\n output_dict[\"hypothesis_tokens\"] = [x[\"hypothesis_tokens\"] for x in metadata]\n\n return output_dict\n\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n return {\"accuracy\": self._accuracy.get_metric(reset)}\n", "import os\nimport sys\n\nimport argparse\n\nimport torch\n\nsys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))\n\nfrom allennlp.common.tqdm import Tqdm\nfrom allennlp.common import Params\nfrom allennlp.models.archival import load_archive\nfrom allennlp.data.iterators import BasicIterator\nfrom allennlp.data import DatasetReader\nfrom allennlp.models.semantic_role_labeler import write_to_conll_eval_file\nfrom allennlp.nn.util import move_to_device\n\n\ndef main(serialization_directory: int, device: int, data: str, prefix: str, domain: str = None):\n \"\"\"\n serialization_directory : str, required.\n The directory containing the serialized weights.\n device: int, default = -1\n The device to run the evaluation on.\n data: str, default = None\n The data to evaluate on. By default, we use the validation data from\n the original experiment.\n prefix: str, default=\"\"\n The prefix to prepend to the generated gold and prediction files, to distinguish\n different models/data.\n domain: str, optional (default = None)\n If passed, filters the ontonotes evaluation/test dataset to only contain the\n specified domain. This overwrites the domain in the config file from the model,\n to allow evaluation on domains other than the one the model was trained on.\n \"\"\"\n config = Params.from_file(os.path.join(serialization_directory, \"config.json\"))\n\n if domain is not None:\n # Hack to allow evaluation on different domains than the\n # model was trained on.\n config[\"dataset_reader\"][\"domain_identifier\"] = domain\n prefix = f\"{domain}_{prefix}\"\n else:\n config[\"dataset_reader\"].pop(\"domain_identifier\", None)\n\n dataset_reader = DatasetReader.from_params(config[\"dataset_reader\"])\n evaluation_data_path = data if data else config[\"validation_data_path\"]\n\n archive = load_archive(\n os.path.join(serialization_directory, \"model.tar.gz\"), cuda_device=device\n )\n model = archive.model\n model.eval()\n\n prediction_file_path = os.path.join(serialization_directory, prefix + \"_predictions.txt\")\n gold_file_path = os.path.join(serialization_directory, prefix + \"_gold.txt\")\n prediction_file = open(prediction_file_path, \"w+\")\n gold_file = open(gold_file_path, \"w+\")\n\n # Load the evaluation data and index it.\n print(\"reading evaluation data from {}\".format(evaluation_data_path))\n instances = dataset_reader.read(evaluation_data_path)\n\n with torch.autograd.no_grad():\n iterator = BasicIterator(batch_size=32)\n iterator.index_with(model.vocab)\n\n model_predictions = []\n batches = iterator(instances, num_epochs=1, shuffle=False)\n for batch in Tqdm.tqdm(batches):\n batch = move_to_device(batch, device)\n result = model(**batch)\n predictions = model.decode(result)\n model_predictions.extend(predictions[\"tags\"])\n\n for instance, prediction in zip(instances, model_predictions):\n fields = instance.fields\n verb_index = fields[\"metadata\"][\"verb_index\"]\n gold_tags = fields[\"metadata\"][\"gold_tags\"]\n sentence = fields[\"metadata\"][\"words\"]\n write_to_conll_eval_file(\n prediction_file, gold_file, verb_index, sentence, prediction, gold_tags\n )\n prediction_file.close()\n gold_file.close()\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n description=\"write conll format srl predictions to file from a pretrained model.\"\n )\n parser.add_argument(\"--path\", type=str, help=\"the serialization directory.\")\n parser.add_argument(\"--device\", type=int, default=-1, help=\"the device to load the model onto.\")\n parser.add_argument(\n \"--data\", type=str, default=None, help=\"A directory containing a dataset to evaluate on.\"\n )\n parser.add_argument(\n \"--prefix\", type=str, default=\"\", help=\"A prefix to distinguish model outputs.\"\n )\n parser.add_argument(\n \"--domain\",\n type=str,\n default=None,\n help=\"An optional domain to filter by for producing results.\",\n )\n args = parser.parse_args()\n main(args.path, args.device, args.data, args.prefix, args.domain)\n", "from typing import Dict, Optional, List, Set, Tuple, Union\n\nimport pytest\nimport torch\n\nfrom allennlp.common import Params\nfrom allennlp.common.from_params import FromParams, takes_arg, remove_optional, create_kwargs\nfrom allennlp.common.testing import AllenNlpTestCase\nfrom allennlp.data import DatasetReader, Tokenizer\nfrom allennlp.models import Model\nfrom allennlp.models.archival import load_archive\nfrom allennlp.common.checks import ConfigurationError\n\n\nclass MyClass(FromParams):\n def __init__(self, my_int: int, my_bool: bool = False) -> None:\n self.my_int = my_int\n self.my_bool = my_bool\n\n\nclass TestFromParams(AllenNlpTestCase):\n def test_takes_arg(self):\n def bare_function(some_input: int) -> int:\n return some_input + 1\n\n assert takes_arg(bare_function, \"some_input\")\n assert not takes_arg(bare_function, \"some_other_input\")\n\n class SomeClass:\n total = 0\n\n def __init__(self, constructor_param: str) -> None:\n self.constructor_param = constructor_param\n\n def check_param(self, check: str) -> bool:\n return self.constructor_param == check\n\n @classmethod\n def set_total(cls, new_total: int) -> None:\n cls.total = new_total\n\n assert takes_arg(SomeClass, \"self\")\n assert takes_arg(SomeClass, \"constructor_param\")\n assert not takes_arg(SomeClass, \"check\")\n\n assert takes_arg(SomeClass.check_param, \"check\")\n assert not takes_arg(SomeClass.check_param, \"other_check\")\n\n assert takes_arg(SomeClass.set_total, \"new_total\")\n assert not takes_arg(SomeClass.set_total, \"total\")\n\n def test_remove_optional(self):\n optional_type = Optional[Dict[str, str]]\n bare_type = remove_optional(optional_type) # type: ignore\n bare_bare_type = remove_optional(bare_type)\n\n assert bare_type == Dict[str, str]\n assert bare_bare_type == Dict[str, str]\n\n assert remove_optional(Optional[str]) == str\n assert remove_optional(str) == str\n\n def test_from_params(self):\n my_class = MyClass.from_params(Params({\"my_int\": 10}), my_bool=True)\n\n assert isinstance(my_class, MyClass)\n assert my_class.my_int == 10\n assert my_class.my_bool\n\n def test_create_kwargs(self):\n kwargs = create_kwargs(MyClass, MyClass, Params({\"my_int\": 5}), my_bool=True, my_float=4.4)\n\n # my_float should not be included because it's not a param of the MyClass constructor\n assert kwargs == {\"my_int\": 5, \"my_bool\": True}\n\n def test_extras(self):\n from allennlp.common.registrable import Registrable\n\n class A(Registrable):\n pass\n\n @A.register(\"b\")\n class B(A):\n def __init__(self, size: int, name: str) -> None:\n self.size = size\n self.name = name\n\n @A.register(\"c\")\n class C(A):\n def __init__(self, size: int, name: str) -> None:\n self.size = size\n self.name = name\n\n # custom from params\n @classmethod\n def from_params(cls, params: Params, size: int, **extras) -> \"C\": # type: ignore\n name = params.pop(\"name\")\n return cls(size=size, name=name)\n\n # Check that extras get passed, even though A doesn't need them.\n params = Params({\"type\": \"b\", \"size\": 10})\n b = A.from_params(params, name=\"extra\")\n\n assert b.name == \"extra\"\n assert b.size == 10\n\n # Check that extra extras don't get passed.\n params = Params({\"type\": \"b\", \"size\": 10})\n b = A.from_params(params, name=\"extra\", unwanted=True)\n\n assert b.name == \"extra\"\n assert b.size == 10\n\n # Now the same with a custom from_params.\n params = Params({\"type\": \"c\", \"name\": \"extra_c\"})\n c = A.from_params(params, size=20)\n assert c.name == \"extra_c\"\n assert c.size == 20\n\n # Check that extra extras don't get passed.\n params = Params({\"type\": \"c\", \"name\": \"extra_c\"})\n c = A.from_params(params, size=20, unwanted=True)\n\n assert c.name == \"extra_c\"\n assert c.size == 20\n\n def test_extras_for_custom_classes(self):\n\n from allennlp.common.registrable import Registrable\n\n class BaseClass(Registrable):\n pass\n\n class BaseClass2(Registrable):\n pass\n\n @BaseClass.register(\"A\")\n class A(BaseClass):\n def __init__(self, a: int, b: int, val: str) -> None:\n self.a = a\n self.b = b\n self.val = val\n\n def __hash__(self):\n return self.b\n\n def __eq__(self, other):\n return self.b == other.b\n\n @classmethod\n def from_params(cls, params: Params, a: int, **extras) -> \"A\": # type: ignore\n # A custom from params\n b = params.pop_int(\"b\")\n val = params.pop(\"val\", \"C\")\n params.assert_empty(cls.__name__)\n return cls(a=a, b=b, val=val)\n\n @BaseClass2.register(\"B\")\n class B(BaseClass2):\n def __init__(self, c: int, b: int) -> None:\n self.c = c\n self.b = b\n\n @classmethod\n def from_params(cls, params: Params, c: int, **extras) -> \"B\": # type: ignore\n b = params.pop_int(\"b\")\n params.assert_empty(cls.__name__)\n return cls(c=c, b=b)\n\n @BaseClass.register(\"E\")\n class E(BaseClass):\n def __init__(self, m: int, n: int) -> None:\n self.m = m\n self.n = n\n\n @classmethod\n def from_params(cls, params: Params, **extras2) -> \"E\": # type: ignore\n m = params.pop_int(\"m\")\n params.assert_empty(cls.__name__)\n n = extras2[\"n\"]\n return cls(m=m, n=n)\n\n class C:\n pass\n\n @BaseClass.register(\"D\")\n class D(BaseClass):\n def __init__(\n self,\n arg1: List[BaseClass],\n arg2: Tuple[BaseClass, BaseClass2],\n arg3: Dict[str, BaseClass],\n arg4: Set[BaseClass],\n arg5: List[BaseClass],\n ) -> None:\n self.arg1 = arg1\n self.arg2 = arg2\n self.arg3 = arg3\n self.arg4 = arg4\n self.arg5 = arg5\n\n vals = [1, 2, 3]\n params = Params(\n {\n \"type\": \"D\",\n \"arg1\": [\n {\"type\": \"A\", \"b\": vals[0]},\n {\"type\": \"A\", \"b\": vals[1]},\n {\"type\": \"A\", \"b\": vals[2]},\n ],\n \"arg2\": [{\"type\": \"A\", \"b\": vals[0]}, {\"type\": \"B\", \"b\": vals[0]}],\n \"arg3\": {\n \"class_1\": {\"type\": \"A\", \"b\": vals[0]},\n \"class_2\": {\"type\": \"A\", \"b\": vals[1]},\n },\n \"arg4\": [\n {\"type\": \"A\", \"b\": vals[0], \"val\": \"M\"},\n {\"type\": \"A\", \"b\": vals[1], \"val\": \"N\"},\n {\"type\": \"A\", \"b\": vals[1], \"val\": \"N\"},\n ],\n \"arg5\": [{\"type\": \"E\", \"m\": 9}],\n }\n )\n extra = C()\n tval1 = 5\n tval2 = 6\n d = BaseClass.from_params(params=params, extra=extra, a=tval1, c=tval2, n=10)\n\n # Tests for List # Parameters\n assert len(d.arg1) == len(vals)\n assert isinstance(d.arg1, list)\n assert isinstance(d.arg1[0], A)\n assert all([x.b == y for x, y in zip(d.arg1, vals)])\n assert all([x.a == tval1 for x in d.arg1])\n\n # Tests for Tuple\n assert isinstance(d.arg2, tuple)\n assert isinstance(d.arg2[0], A)\n assert isinstance(d.arg2[1], B)\n assert d.arg2[0].a == tval1\n assert d.arg2[1].c == tval2\n assert d.arg2[0].b == d.arg2[1].b == vals[0]\n\n # Tests for Dict\n assert isinstance(d.arg3, dict)\n assert isinstance(d.arg3[\"class_1\"], A)\n assert d.arg3[\"class_1\"].a == d.arg3[\"class_2\"].a == tval1\n assert d.arg3[\"class_1\"].b == vals[0]\n assert d.arg3[\"class_2\"].b == vals[1]\n\n # Tests for Set\n assert isinstance(d.arg4, set)\n assert len(d.arg4) == 2\n assert any(x.val == \"M\" for x in d.arg4)\n assert any(x.val == \"N\" for x in d.arg4)\n\n # Tests for custom extras parameters\n assert isinstance(d.arg5, list)\n assert isinstance(d.arg5[0], E)\n assert d.arg5[0].m == 9\n assert d.arg5[0].n == 10\n\n def test_no_constructor(self):\n params = Params({\"type\": \"just_spaces\"})\n\n Tokenizer.from_params(params)\n\n def test_union(self):\n class A(FromParams):\n def __init__(self, a: Union[int, List[int]]) -> None:\n self.a = a\n\n class B(FromParams):\n def __init__(self, b: Union[A, List[A]]) -> None:\n # Really you would want to be sure that `self.b` has a consistent type, but for\n # this test we'll ignore that.\n self.b = b\n\n class C(FromParams):\n def __init__(self, c: Union[A, B, Dict[str, A]]) -> None:\n # Really you would want to be sure that `self.c` has a consistent type, but for\n # this test we'll ignore that.\n self.c = c\n\n params = Params({\"a\": 3})\n a = A.from_params(params)\n assert a.a == 3\n\n params = Params({\"a\": [3, 4, 5]})\n a = A.from_params(params)\n assert a.a == [3, 4, 5]\n\n params = Params({\"b\": {\"a\": 3}})\n b = B.from_params(params)\n assert isinstance(b.b, A)\n assert b.b.a == 3\n\n params = Params({\"b\": [{\"a\": 3}, {\"a\": [4, 5]}]})\n b = B.from_params(params)\n assert isinstance(b.b, list)\n assert b.b[0].a == 3\n assert b.b[1].a == [4, 5]\n\n # This is a contrived, ugly example (why would you want to duplicate names in a nested\n # structure like this??), but it demonstrates a potential bug when dealing with mutatable\n # parameters. If you're not careful about keeping the parameters un-mutated in two\n # separate places, you'll end up with a B, or with a dict that's missing the 'b' key.\n params = Params({\"c\": {\"a\": {\"a\": 3}, \"b\": {\"a\": [4, 5]}}})\n c = C.from_params(params)\n assert isinstance(c.c, dict)\n assert c.c[\"a\"].a == 3\n assert c.c[\"b\"].a == [4, 5]\n\n def test_dict(self):\n\n from allennlp.common.registrable import Registrable\n\n class A(Registrable):\n pass\n\n @A.register(\"b\")\n class B(A):\n def __init__(self, size: int) -> None:\n self.size = size\n\n class C(Registrable):\n pass\n\n @C.register(\"d\")\n class D(C):\n def __init__(self, items: Dict[str, A]) -> None:\n self.items = items\n\n params = Params(\n {\n \"type\": \"d\",\n \"items\": {\"first\": {\"type\": \"b\", \"size\": 1}, \"second\": {\"type\": \"b\", \"size\": 2}},\n }\n )\n d = C.from_params(params)\n\n assert isinstance(d.items, dict)\n assert len(d.items) == 2\n assert all(isinstance(key, str) for key in d.items.keys())\n assert all(isinstance(value, B) for value in d.items.values())\n assert d.items[\"first\"].size == 1\n assert d.items[\"second\"].size == 2\n\n def test_dict_not_params(self):\n class A(FromParams):\n def __init__(self, counts: Dict[str, int]) -> None:\n self.counts = counts\n\n params = Params({\"counts\": {\"a\": 10, \"b\": 20}})\n a = A.from_params(params)\n\n assert isinstance(a.counts, dict)\n assert not isinstance(a.counts, Params)\n\n def test_list(self):\n\n from allennlp.common.registrable import Registrable\n\n class A(Registrable):\n pass\n\n @A.register(\"b\")\n class B(A):\n def __init__(self, size: int) -> None:\n self.size = size\n\n class C(Registrable):\n pass\n\n @C.register(\"d\")\n class D(C):\n def __init__(self, items: List[A]) -> None:\n self.items = items\n\n params = Params(\n {\"type\": \"d\", \"items\": [{\"type\": \"b\", \"size\": 1}, {\"type\": \"b\", \"size\": 2}]}\n )\n d = C.from_params(params)\n\n assert isinstance(d.items, list)\n assert len(d.items) == 2\n assert all(isinstance(item, B) for item in d.items)\n assert d.items[0].size == 1\n assert d.items[1].size == 2\n\n def test_tuple(self):\n\n from allennlp.common.registrable import Registrable\n\n class A(Registrable):\n pass\n\n @A.register(\"b\")\n class B(A):\n def __init__(self, size: int) -> None:\n self.size = size\n\n class C(Registrable):\n pass\n\n @C.register(\"d\")\n class D(C):\n def __init__(self, name: str) -> None:\n self.name = name\n\n class E(Registrable):\n pass\n\n @E.register(\"f\")\n class F(E):\n def __init__(self, items: Tuple[A, C]) -> None:\n self.items = items\n\n params = Params(\n {\"type\": \"f\", \"items\": [{\"type\": \"b\", \"size\": 1}, {\"type\": \"d\", \"name\": \"item2\"}]}\n )\n f = E.from_params(params)\n\n assert isinstance(f.items, tuple)\n assert len(f.items) == 2\n assert isinstance(f.items[0], B)\n assert isinstance(f.items[1], D)\n assert f.items[0].size == 1\n assert f.items[1].name == \"item2\"\n\n def test_set(self):\n\n from allennlp.common.registrable import Registrable\n\n class A(Registrable):\n def __init__(self, name: str) -> None:\n self.name = name\n\n def __eq__(self, other):\n return self.name == other.name\n\n def __hash__(self):\n return hash(self.name)\n\n @A.register(\"b\")\n class B(A):\n pass\n\n class C(Registrable):\n pass\n\n @C.register(\"d\")\n class D(C):\n def __init__(self, items: Set[A]) -> None:\n self.items = items\n\n params = Params(\n {\n \"type\": \"d\",\n \"items\": [\n {\"type\": \"b\", \"name\": \"item1\"},\n {\"type\": \"b\", \"name\": \"item2\"},\n {\"type\": \"b\", \"name\": \"item2\"},\n ],\n }\n )\n d = C.from_params(params)\n\n assert isinstance(d.items, set)\n assert len(d.items) == 2\n assert all(isinstance(item, B) for item in d.items)\n assert any(item.name == \"item1\" for item in d.items)\n assert any(item.name == \"item2\" for item in d.items)\n\n def test_transferring_of_modules(self):\n\n model_archive = str(\n self.FIXTURES_ROOT / \"decomposable_attention\" / \"serialization\" / \"model.tar.gz\"\n )\n trained_model = load_archive(model_archive).model\n\n config_file = str(self.FIXTURES_ROOT / \"decomposable_attention\" / \"experiment.json\")\n model_params = Params.from_file(config_file).pop(\"model\").as_dict(quiet=True)\n\n # Override only text_field_embedder (freeze) and attend_feedforward params (tunable)\n model_params[\"text_field_embedder\"] = {\n \"_pretrained\": {\n \"archive_file\": model_archive,\n \"module_path\": \"_text_field_embedder\",\n \"freeze\": True,\n }\n }\n model_params[\"attend_feedforward\"] = {\n \"_pretrained\": {\n \"archive_file\": model_archive,\n \"module_path\": \"_attend_feedforward._module\",\n \"freeze\": False,\n }\n }\n\n transfer_model = Model.from_params(vocab=trained_model.vocab, params=Params(model_params))\n\n # TextFieldEmbedder and AttendFeedforward parameters should be transferred\n for trained_parameter, transfer_parameter in zip(\n trained_model._text_field_embedder.parameters(),\n transfer_model._text_field_embedder.parameters(),\n ):\n assert torch.all(trained_parameter == transfer_parameter)\n for trained_parameter, transfer_parameter in zip(\n trained_model._attend_feedforward.parameters(),\n transfer_model._attend_feedforward.parameters(),\n ):\n assert torch.all(trained_parameter == transfer_parameter)\n # Any other module's parameters shouldn't be same (eg. compare_feedforward)\n for trained_parameter, transfer_parameter in zip(\n trained_model._compare_feedforward.parameters(),\n transfer_model._compare_feedforward.parameters(),\n ):\n assert torch.all(trained_parameter != transfer_parameter)\n\n # TextFieldEmbedder should have requires_grad Off\n for parameter in transfer_model._text_field_embedder.parameters():\n assert not parameter.requires_grad\n\n # # AttendFeedforward should have requires_grad On\n for parameter in transfer_model._attend_feedforward.parameters():\n assert parameter.requires_grad\n\n def test_transferring_of_modules_ensures_type_consistency(self):\n\n model_archive = str(\n self.FIXTURES_ROOT / \"decomposable_attention\" / \"serialization\" / \"model.tar.gz\"\n )\n trained_model = load_archive(model_archive).model\n\n config_file = str(self.FIXTURES_ROOT / \"decomposable_attention\" / \"experiment.json\")\n model_params = Params.from_file(config_file).pop(\"model\").as_dict(quiet=True)\n\n # Override only text_field_embedder and make it load AttendFeedForward\n model_params[\"text_field_embedder\"] = {\n \"_pretrained\": {\n \"archive_file\": model_archive,\n \"module_path\": \"_attend_feedforward._module\",\n }\n }\n with pytest.raises(ConfigurationError):\n Model.from_params(vocab=trained_model.vocab, params=Params(model_params))\n\n def test_kwargs_are_passed_to_superclass(self):\n params = Params(\n {\"type\": \"text_classification_json\", \"lazy\": True, \"cache_directory\": \"tmp\"}\n )\n reader = DatasetReader.from_params(params)\n assert reader.lazy is True\n assert str(reader._cache_directory) == \"tmp\"\n", "import math\r\nimport torch\r\nimport numpy as np\r\nfrom numpy.testing import assert_allclose\r\n\r\nfrom allennlp.common.testing import AllenNlpTestCase\r\nfrom allennlp.training.metrics import SpearmanCorrelation\r\n\r\n# pylint: disable=no-self-use\r\n\r\n\r\ndef spearman_formula(predictions, labels, mask=None):\r\n \"\"\"\r\n This function is spearman formula from:\r\n https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient\r\n \"\"\"\r\n if mask is not None:\r\n predictions = predictions * mask\r\n labels = labels * mask\r\n\r\n # if all number of a set is same, return np.nan\r\n if len(np.unique(predictions)) == 1 or len(np.unique(labels)) == 1:\r\n return np.nan\r\n\r\n len_pre = len(predictions)\r\n\r\n predictions = [(k, v) for k, v in enumerate(predictions)]\r\n predictions.sort(key=lambda x: x[1], reverse=True)\r\n predictions = [(k, v) for k, v in enumerate(predictions)]\r\n predictions.sort(key=lambda x: x[1][0])\r\n\r\n labels = [(k, v) for k, v in enumerate(labels)]\r\n labels.sort(key=lambda x: x[1], reverse=True)\r\n labels = [(k, v) for k, v in enumerate(labels)]\r\n labels.sort(key=lambda x: x[1][0])\r\n\r\n total = 0\r\n for i in range(len_pre):\r\n total += (predictions[i][0] - labels[i][0]) ** 2\r\n expected_spearman_correlation = 1 - float(6 * total) / (len_pre * (len_pre ** 2 - 1))\r\n\r\n return expected_spearman_correlation\r\n\r\n\r\nclass SpearmanCorrelationTest(AllenNlpTestCase):\r\n def test_unmasked_computation(self):\r\n spearman_correlation = SpearmanCorrelation()\r\n batch_size = 10\r\n num_labels = 10\r\n predictions1 = np.random.randn(batch_size, num_labels).astype(\"float32\")\r\n labels1 = 0.5 * predictions1 + np.random.randn(batch_size, num_labels).astype(\"float32\")\r\n\r\n predictions2 = np.random.randn(1).repeat(num_labels).astype(\"float32\")\r\n predictions2 = predictions2[np.newaxis, :].repeat(batch_size, axis=0)\r\n labels2 = np.random.randn(1).repeat(num_labels).astype(\"float32\")\r\n labels2 = 0.5 * predictions2 + labels2[np.newaxis, :].repeat(batch_size, axis=0)\r\n\r\n # in most cases, the data is constructed like predictions_1, the data of such a batch different.\r\n # but in a few cases, for example, predictions_2, the data of such a batch is exactly the same.\r\n predictions_labels_ = [(predictions1, labels1), (predictions2, labels2)]\r\n\r\n for predictions, labels in predictions_labels_:\r\n spearman_correlation.reset()\r\n spearman_correlation(torch.FloatTensor(predictions), torch.FloatTensor(labels))\r\n assert_allclose(\r\n spearman_formula(predictions.reshape(-1), labels.reshape(-1)),\r\n spearman_correlation.get_metric(),\r\n rtol=1e-5,\r\n )\r\n\r\n def test_masked_computation(self):\r\n spearman_correlation = SpearmanCorrelation()\r\n batch_size = 10\r\n num_labels = 10\r\n predictions1 = np.random.randn(batch_size, num_labels).astype(\"float32\")\r\n labels1 = 0.5 * predictions1 + np.random.randn(batch_size, num_labels).astype(\"float32\")\r\n\r\n predictions2 = np.random.randn(1).repeat(num_labels).astype(\"float32\")\r\n predictions2 = predictions2[np.newaxis, :].repeat(batch_size, axis=0)\r\n labels2 = np.random.randn(1).repeat(num_labels).astype(\"float32\")\r\n labels2 = 0.5 * predictions2 + labels2[np.newaxis, :].repeat(batch_size, axis=0)\r\n\r\n # in most cases, the data is constructed like predictions_1, the data of such a batch different.\r\n # but in a few cases, for example, predictions_2, the data of such a batch is exactly the same.\r\n predictions_labels_ = [(predictions1, labels1), (predictions2, labels2)]\r\n\r\n # Random binary mask\r\n mask = np.random.randint(0, 2, size=(batch_size, num_labels)).astype(\"float32\")\r\n\r\n for predictions, labels in predictions_labels_:\r\n spearman_correlation.reset()\r\n spearman_correlation(\r\n torch.FloatTensor(predictions), torch.FloatTensor(labels), torch.FloatTensor(mask)\r\n )\r\n expected_spearman_correlation = spearman_formula(\r\n predictions.reshape(-1), labels.reshape(-1), mask=mask.reshape(-1)\r\n )\r\n\r\n # because add mask, a batch of predictions or labels will have many 0,\r\n # spearman correlation algorithm will dependence the sorting position of a set of numbers,\r\n # too many identical numbers will result in different calculation results each time\r\n # but the positive and negative results are the same,\r\n # so here we only test the positive and negative results of the results.\r\n assert (expected_spearman_correlation * spearman_correlation.get_metric()) > 0\r\n\r\n def test_reset(self):\r\n spearman_correlation = SpearmanCorrelation()\r\n batch_size = 10\r\n num_labels = 10\r\n predictions = np.random.randn(batch_size, num_labels).astype(\"float32\")\r\n labels = 0.5 * predictions + np.random.randn(batch_size, num_labels).astype(\"float32\")\r\n\r\n # 1.test spearman_correlation.reset()\r\n spearman_correlation.reset()\r\n spearman_correlation(torch.FloatTensor(predictions), torch.FloatTensor(labels))\r\n temp = spearman_correlation.get_metric()\r\n spearman_correlation.reset()\r\n spearman_correlation(torch.FloatTensor(predictions), torch.FloatTensor(labels))\r\n assert spearman_correlation.get_metric() == temp\r\n\r\n # 2.test spearman_correlation.reset()\r\n spearman_correlation.reset()\r\n spearman_correlation(torch.FloatTensor(predictions), torch.FloatTensor(labels))\r\n\r\n spearman_correlation.get_metric(reset=False)\r\n assert spearman_correlation.get_metric() != np.nan\r\n spearman_correlation.get_metric(reset=True)\r\n assert math.isnan(spearman_correlation.get_metric())\r\n" ]
[ [ "torch.nn.LSTMCell", "torch.cat" ], [ "torch.nn.CrossEntropyLoss", "torch.nn.functional.softmax", "torch.cat" ], [ "torch.autograd.no_grad" ], [ "torch.all" ], [ "numpy.random.randint", "numpy.random.randn", "torch.FloatTensor", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dumpmemory/tianshou
[ "bc53ead273f6f9d3788a78ecc739249eeb96b8c6", "9c100e07057ad99f0a62d6e329451093dd44300a" ]
[ "tianshou/utils/net/discrete.py", "test/modelbased/test_ppo_icm.py" ]
[ "from typing import Any, Dict, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom tianshou.data import Batch, to_torch\nfrom tianshou.utils.net.common import MLP\n\n\nclass Actor(nn.Module):\n \"\"\"Simple actor network.\n\n Will create an actor operated in discrete action space with structure of\n preprocess_net ---> action_shape.\n\n :param preprocess_net: a self-defined preprocess_net which output a\n flattened hidden state.\n :param action_shape: a sequence of int for the shape of action.\n :param hidden_sizes: a sequence of int for constructing the MLP after\n preprocess_net. Default to empty sequence (where the MLP now contains\n only a single linear layer).\n :param bool softmax_output: whether to apply a softmax layer over the last\n layer's output.\n :param int preprocess_net_output_dim: the output dimension of\n preprocess_net.\n\n For advanced usage (how to customize the network), please refer to\n :ref:`build_the_network`.\n\n .. seealso::\n\n Please refer to :class:`~tianshou.utils.net.common.Net` as an instance\n of how preprocess_net is suggested to be defined.\n \"\"\"\n\n def __init__(\n self,\n preprocess_net: nn.Module,\n action_shape: Sequence[int],\n hidden_sizes: Sequence[int] = (),\n softmax_output: bool = True,\n preprocess_net_output_dim: Optional[int] = None,\n device: Union[str, int, torch.device] = \"cpu\",\n ) -> None:\n super().__init__()\n self.device = device\n self.preprocess = preprocess_net\n self.output_dim = int(np.prod(action_shape))\n input_dim = getattr(preprocess_net, \"output_dim\", preprocess_net_output_dim)\n self.last = MLP(\n input_dim, # type: ignore\n self.output_dim,\n hidden_sizes,\n device=self.device\n )\n self.softmax_output = softmax_output\n\n def forward(\n self,\n s: Union[np.ndarray, torch.Tensor],\n state: Any = None,\n info: Dict[str, Any] = {},\n ) -> Tuple[torch.Tensor, Any]:\n r\"\"\"Mapping: s -> Q(s, \\*).\"\"\"\n logits, h = self.preprocess(s, state)\n logits = self.last(logits)\n if self.softmax_output:\n logits = F.softmax(logits, dim=-1)\n return logits, h\n\n\nclass Critic(nn.Module):\n \"\"\"Simple critic network. Will create an actor operated in discrete \\\n action space with structure of preprocess_net ---> 1(q value).\n\n :param preprocess_net: a self-defined preprocess_net which output a\n flattened hidden state.\n :param hidden_sizes: a sequence of int for constructing the MLP after\n preprocess_net. Default to empty sequence (where the MLP now contains\n only a single linear layer).\n :param int last_size: the output dimension of Critic network. Default to 1.\n :param int preprocess_net_output_dim: the output dimension of\n preprocess_net.\n\n For advanced usage (how to customize the network), please refer to\n :ref:`build_the_network`.\n\n .. seealso::\n\n Please refer to :class:`~tianshou.utils.net.common.Net` as an instance\n of how preprocess_net is suggested to be defined.\n \"\"\"\n\n def __init__(\n self,\n preprocess_net: nn.Module,\n hidden_sizes: Sequence[int] = (),\n last_size: int = 1,\n preprocess_net_output_dim: Optional[int] = None,\n device: Union[str, int, torch.device] = \"cpu\",\n ) -> None:\n super().__init__()\n self.device = device\n self.preprocess = preprocess_net\n self.output_dim = last_size\n input_dim = getattr(preprocess_net, \"output_dim\", preprocess_net_output_dim)\n self.last = MLP(\n input_dim, # type: ignore\n last_size,\n hidden_sizes,\n device=self.device\n )\n\n def forward(\n self, s: Union[np.ndarray, torch.Tensor], **kwargs: Any\n ) -> torch.Tensor:\n \"\"\"Mapping: s -> V(s).\"\"\"\n logits, _ = self.preprocess(s, state=kwargs.get(\"state\", None))\n return self.last(logits)\n\n\nclass CosineEmbeddingNetwork(nn.Module):\n \"\"\"Cosine embedding network for IQN. Convert a scalar in [0, 1] to a list \\\n of n-dim vectors.\n\n :param num_cosines: the number of cosines used for the embedding.\n :param embedding_dim: the dimension of the embedding/output.\n\n .. note::\n\n From https://github.com/ku2482/fqf-iqn-qrdqn.pytorch/blob/master\n /fqf_iqn_qrdqn/network.py .\n \"\"\"\n\n def __init__(self, num_cosines: int, embedding_dim: int) -> None:\n super().__init__()\n self.net = nn.Sequential(nn.Linear(num_cosines, embedding_dim), nn.ReLU())\n self.num_cosines = num_cosines\n self.embedding_dim = embedding_dim\n\n def forward(self, taus: torch.Tensor) -> torch.Tensor:\n batch_size = taus.shape[0]\n N = taus.shape[1]\n # Calculate i * \\pi (i=1,...,N).\n i_pi = np.pi * torch.arange(\n start=1, end=self.num_cosines + 1, dtype=taus.dtype, device=taus.device\n ).view(1, 1, self.num_cosines)\n # Calculate cos(i * \\pi * \\tau).\n cosines = torch.cos(taus.view(batch_size, N, 1) * i_pi\n ).view(batch_size * N, self.num_cosines)\n # Calculate embeddings of taus.\n tau_embeddings = self.net(cosines).view(batch_size, N, self.embedding_dim)\n return tau_embeddings\n\n\nclass ImplicitQuantileNetwork(Critic):\n \"\"\"Implicit Quantile Network.\n\n :param preprocess_net: a self-defined preprocess_net which output a\n flattened hidden state.\n :param int action_dim: the dimension of action space.\n :param hidden_sizes: a sequence of int for constructing the MLP after\n preprocess_net. Default to empty sequence (where the MLP now contains\n only a single linear layer).\n :param int num_cosines: the number of cosines to use for cosine embedding.\n Default to 64.\n :param int preprocess_net_output_dim: the output dimension of\n preprocess_net.\n\n .. note::\n\n Although this class inherits Critic, it is actually a quantile Q-Network\n with output shape (batch_size, action_dim, sample_size).\n\n The second item of the first return value is tau vector.\n \"\"\"\n\n def __init__(\n self,\n preprocess_net: nn.Module,\n action_shape: Sequence[int],\n hidden_sizes: Sequence[int] = (),\n num_cosines: int = 64,\n preprocess_net_output_dim: Optional[int] = None,\n device: Union[str, int, torch.device] = \"cpu\"\n ) -> None:\n last_size = np.prod(action_shape)\n super().__init__(\n preprocess_net, hidden_sizes, last_size, preprocess_net_output_dim, device\n )\n self.input_dim = getattr(\n preprocess_net, \"output_dim\", preprocess_net_output_dim\n )\n self.embed_model = CosineEmbeddingNetwork(\n num_cosines,\n self.input_dim # type: ignore\n ).to(device)\n\n def forward( # type: ignore\n self, s: Union[np.ndarray, torch.Tensor], sample_size: int, **kwargs: Any\n ) -> Tuple[Any, torch.Tensor]:\n r\"\"\"Mapping: s -> Q(s, \\*).\"\"\"\n logits, h = self.preprocess(s, state=kwargs.get(\"state\", None))\n # Sample fractions.\n batch_size = logits.size(0)\n taus = torch.rand(\n batch_size, sample_size, dtype=logits.dtype, device=logits.device\n )\n embedding = (logits.unsqueeze(1) *\n self.embed_model(taus)).view(batch_size * sample_size, -1)\n out = self.last(embedding).view(batch_size, sample_size, -1).transpose(1, 2)\n return (out, taus), h\n\n\nclass FractionProposalNetwork(nn.Module):\n \"\"\"Fraction proposal network for FQF.\n\n :param num_fractions: the number of factions to propose.\n :param embedding_dim: the dimension of the embedding/input.\n\n .. note::\n\n Adapted from https://github.com/ku2482/fqf-iqn-qrdqn.pytorch/blob/master\n /fqf_iqn_qrdqn/network.py .\n \"\"\"\n\n def __init__(self, num_fractions: int, embedding_dim: int) -> None:\n super().__init__()\n self.net = nn.Linear(embedding_dim, num_fractions)\n torch.nn.init.xavier_uniform_(self.net.weight, gain=0.01)\n torch.nn.init.constant_(self.net.bias, 0)\n self.num_fractions = num_fractions\n self.embedding_dim = embedding_dim\n\n def forward(\n self, state_embeddings: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n # Calculate (log of) probabilities q_i in the paper.\n m = torch.distributions.Categorical(logits=self.net(state_embeddings))\n taus_1_N = torch.cumsum(m.probs, dim=1)\n # Calculate \\tau_i (i=0,...,N).\n taus = F.pad(taus_1_N, (1, 0))\n # Calculate \\hat \\tau_i (i=0,...,N-1).\n tau_hats = (taus[:, :-1] + taus[:, 1:]).detach() / 2.0\n # Calculate entropies of value distributions.\n entropies = m.entropy()\n return taus, tau_hats, entropies\n\n\nclass FullQuantileFunction(ImplicitQuantileNetwork):\n \"\"\"Full(y parameterized) Quantile Function.\n\n :param preprocess_net: a self-defined preprocess_net which output a\n flattened hidden state.\n :param int action_dim: the dimension of action space.\n :param hidden_sizes: a sequence of int for constructing the MLP after\n preprocess_net. Default to empty sequence (where the MLP now contains\n only a single linear layer).\n :param int num_cosines: the number of cosines to use for cosine embedding.\n Default to 64.\n :param int preprocess_net_output_dim: the output dimension of\n preprocess_net.\n\n .. note::\n\n The first return value is a tuple of (quantiles, fractions, quantiles_tau),\n where fractions is a Batch(taus, tau_hats, entropies).\n \"\"\"\n\n def __init__(\n self,\n preprocess_net: nn.Module,\n action_shape: Sequence[int],\n hidden_sizes: Sequence[int] = (),\n num_cosines: int = 64,\n preprocess_net_output_dim: Optional[int] = None,\n device: Union[str, int, torch.device] = \"cpu\",\n ) -> None:\n super().__init__(\n preprocess_net, action_shape, hidden_sizes, num_cosines,\n preprocess_net_output_dim, device\n )\n\n def _compute_quantiles(\n self, obs: torch.Tensor, taus: torch.Tensor\n ) -> torch.Tensor:\n batch_size, sample_size = taus.shape\n embedding = (obs.unsqueeze(1) *\n self.embed_model(taus)).view(batch_size * sample_size, -1)\n quantiles = self.last(embedding).view(batch_size, sample_size,\n -1).transpose(1, 2)\n return quantiles\n\n def forward( # type: ignore\n self, s: Union[np.ndarray, torch.Tensor],\n propose_model: FractionProposalNetwork,\n fractions: Optional[Batch] = None,\n **kwargs: Any\n ) -> Tuple[Any, torch.Tensor]:\n r\"\"\"Mapping: s -> Q(s, \\*).\"\"\"\n logits, h = self.preprocess(s, state=kwargs.get(\"state\", None))\n # Propose fractions\n if fractions is None:\n taus, tau_hats, entropies = propose_model(logits.detach())\n fractions = Batch(taus=taus, tau_hats=tau_hats, entropies=entropies)\n else:\n taus, tau_hats = fractions.taus, fractions.tau_hats\n quantiles = self._compute_quantiles(logits, tau_hats)\n # Calculate quantiles_tau for computing fraction grad\n quantiles_tau = None\n if self.training:\n with torch.no_grad():\n quantiles_tau = self._compute_quantiles(logits, taus[:, 1:-1])\n return (quantiles, fractions, quantiles_tau), h\n\n\nclass NoisyLinear(nn.Module):\n \"\"\"Implementation of Noisy Networks. arXiv:1706.10295.\n\n :param int in_features: the number of input features.\n :param int out_features: the number of output features.\n :param float noisy_std: initial standard deviation of noisy linear layers.\n\n .. note::\n\n Adapted from https://github.com/ku2482/fqf-iqn-qrdqn.pytorch/blob/master\n /fqf_iqn_qrdqn/network.py .\n \"\"\"\n\n def __init__(\n self, in_features: int, out_features: int, noisy_std: float = 0.5\n ) -> None:\n super().__init__()\n\n # Learnable parameters.\n self.mu_W = nn.Parameter(torch.FloatTensor(out_features, in_features))\n self.sigma_W = nn.Parameter(torch.FloatTensor(out_features, in_features))\n self.mu_bias = nn.Parameter(torch.FloatTensor(out_features))\n self.sigma_bias = nn.Parameter(torch.FloatTensor(out_features))\n\n # Factorized noise parameters.\n self.register_buffer('eps_p', torch.FloatTensor(in_features))\n self.register_buffer('eps_q', torch.FloatTensor(out_features))\n\n self.in_features = in_features\n self.out_features = out_features\n self.sigma = noisy_std\n\n self.reset()\n self.sample()\n\n def reset(self) -> None:\n bound = 1 / np.sqrt(self.in_features)\n self.mu_W.data.uniform_(-bound, bound)\n self.mu_bias.data.uniform_(-bound, bound)\n self.sigma_W.data.fill_(self.sigma / np.sqrt(self.in_features))\n self.sigma_bias.data.fill_(self.sigma / np.sqrt(self.in_features))\n\n def f(self, x: torch.Tensor) -> torch.Tensor:\n x = torch.randn(x.size(0), device=x.device)\n return x.sign().mul_(x.abs().sqrt_())\n\n def sample(self) -> None:\n self.eps_p.copy_(self.f(self.eps_p)) # type: ignore\n self.eps_q.copy_(self.f(self.eps_q)) # type: ignore\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n if self.training:\n weight = self.mu_W + self.sigma_W * (\n self.eps_q.ger(self.eps_p) # type: ignore\n )\n bias = self.mu_bias + self.sigma_bias * self.eps_q.clone() # type: ignore\n else:\n weight = self.mu_W\n bias = self.mu_bias\n\n return F.linear(x, weight, bias)\n\n\ndef sample_noise(model: nn.Module) -> bool:\n \"\"\"Sample the random noises of NoisyLinear modules in the model.\n\n :param model: a PyTorch module which may have NoisyLinear submodules.\n :returns: True if model has at least one NoisyLinear submodule;\n otherwise, False.\n \"\"\"\n done = False\n for m in model.modules():\n if isinstance(m, NoisyLinear):\n m.sample()\n done = True\n return done\n\n\nclass IntrinsicCuriosityModule(nn.Module):\n \"\"\"Implementation of Intrinsic Curiosity Module. arXiv:1705.05363.\n\n :param torch.nn.Module feature_net: a self-defined feature_net which output a\n flattened hidden state.\n :param int feature_dim: input dimension of the feature net.\n :param int action_dim: dimension of the action space.\n :param hidden_sizes: hidden layer sizes for forward and inverse models.\n :param device: device for the module.\n \"\"\"\n\n def __init__(\n self,\n feature_net: nn.Module,\n feature_dim: int,\n action_dim: int,\n hidden_sizes: Sequence[int] = (),\n device: Union[str, torch.device] = \"cpu\"\n ) -> None:\n super().__init__()\n self.feature_net = feature_net\n self.forward_model = MLP(\n feature_dim + action_dim,\n output_dim=feature_dim,\n hidden_sizes=hidden_sizes,\n device=device\n )\n self.inverse_model = MLP(\n feature_dim * 2,\n output_dim=action_dim,\n hidden_sizes=hidden_sizes,\n device=device\n )\n self.feature_dim = feature_dim\n self.action_dim = action_dim\n self.device = device\n\n def forward(\n self, s1: Union[np.ndarray, torch.Tensor],\n act: Union[np.ndarray, torch.Tensor], s2: Union[np.ndarray,\n torch.Tensor], **kwargs: Any\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n r\"\"\"Mapping: s1, act, s2 -> mse_loss, act_hat.\"\"\"\n s1 = to_torch(s1, dtype=torch.float32, device=self.device)\n s2 = to_torch(s2, dtype=torch.float32, device=self.device)\n phi1, phi2 = self.feature_net(s1), self.feature_net(s2)\n act = to_torch(act, dtype=torch.long, device=self.device)\n phi2_hat = self.forward_model(\n torch.cat([phi1, F.one_hot(act, num_classes=self.action_dim)], dim=1)\n )\n mse_loss = 0.5 * F.mse_loss(phi2_hat, phi2, reduction=\"none\").sum(1)\n act_hat = self.inverse_model(torch.cat([phi1, phi2], dim=1))\n return mse_loss, act_hat\n", "import argparse\nimport os\nimport pprint\n\nimport gym\nimport numpy as np\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom tianshou.data import Collector, VectorReplayBuffer\nfrom tianshou.env import SubprocVectorEnv\nfrom tianshou.policy import ICMPolicy, PPOPolicy\nfrom tianshou.trainer import onpolicy_trainer\nfrom tianshou.utils import TensorboardLogger\nfrom tianshou.utils.net.common import MLP, ActorCritic, DataParallelNet, Net\nfrom tianshou.utils.net.discrete import Actor, Critic, IntrinsicCuriosityModule\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--task', type=str, default='CartPole-v0')\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--buffer-size', type=int, default=20000)\n parser.add_argument('--lr', type=float, default=3e-4)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--epoch', type=int, default=10)\n parser.add_argument('--step-per-epoch', type=int, default=50000)\n parser.add_argument('--step-per-collect', type=int, default=2000)\n parser.add_argument('--repeat-per-collect', type=int, default=10)\n parser.add_argument('--batch-size', type=int, default=64)\n parser.add_argument('--hidden-sizes', type=int, nargs='*', default=[64, 64])\n parser.add_argument('--training-num', type=int, default=20)\n parser.add_argument('--test-num', type=int, default=100)\n parser.add_argument('--logdir', type=str, default='log')\n parser.add_argument('--render', type=float, default=0.)\n parser.add_argument(\n '--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu'\n )\n # ppo special\n parser.add_argument('--vf-coef', type=float, default=0.5)\n parser.add_argument('--ent-coef', type=float, default=0.0)\n parser.add_argument('--eps-clip', type=float, default=0.2)\n parser.add_argument('--max-grad-norm', type=float, default=0.5)\n parser.add_argument('--gae-lambda', type=float, default=0.95)\n parser.add_argument('--rew-norm', type=int, default=0)\n parser.add_argument('--norm-adv', type=int, default=0)\n parser.add_argument('--recompute-adv', type=int, default=0)\n parser.add_argument('--dual-clip', type=float, default=None)\n parser.add_argument('--value-clip', type=int, default=0)\n parser.add_argument(\n '--lr-scale',\n type=float,\n default=1.,\n help='use intrinsic curiosity module with this lr scale'\n )\n parser.add_argument(\n '--reward-scale',\n type=float,\n default=0.01,\n help='scaling factor for intrinsic curiosity reward'\n )\n parser.add_argument(\n '--forward-loss-weight',\n type=float,\n default=0.2,\n help='weight for the forward model loss in ICM'\n )\n args = parser.parse_known_args()[0]\n return args\n\n\ndef test_ppo(args=get_args()):\n env = gym.make(args.task)\n args.state_shape = env.observation_space.shape or env.observation_space.n\n args.action_shape = env.action_space.shape or env.action_space.n\n # train_envs = gym.make(args.task)\n # you can also use tianshou.env.SubprocVectorEnv\n train_envs = SubprocVectorEnv(\n [lambda: gym.make(args.task) for _ in range(args.training_num)]\n )\n # test_envs = gym.make(args.task)\n test_envs = SubprocVectorEnv(\n [lambda: gym.make(args.task) for _ in range(args.test_num)]\n )\n # seed\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n train_envs.seed(args.seed)\n test_envs.seed(args.seed)\n # model\n net = Net(args.state_shape, hidden_sizes=args.hidden_sizes, device=args.device)\n if torch.cuda.is_available():\n actor = DataParallelNet(\n Actor(net, args.action_shape, device=None).to(args.device)\n )\n critic = DataParallelNet(Critic(net, device=None).to(args.device))\n else:\n actor = Actor(net, args.action_shape, device=args.device).to(args.device)\n critic = Critic(net, device=args.device).to(args.device)\n actor_critic = ActorCritic(actor, critic)\n # orthogonal initialization\n for m in actor_critic.modules():\n if isinstance(m, torch.nn.Linear):\n torch.nn.init.orthogonal_(m.weight)\n torch.nn.init.zeros_(m.bias)\n optim = torch.optim.Adam(actor_critic.parameters(), lr=args.lr)\n dist = torch.distributions.Categorical\n policy = PPOPolicy(\n actor,\n critic,\n optim,\n dist,\n discount_factor=args.gamma,\n max_grad_norm=args.max_grad_norm,\n eps_clip=args.eps_clip,\n vf_coef=args.vf_coef,\n ent_coef=args.ent_coef,\n gae_lambda=args.gae_lambda,\n reward_normalization=args.rew_norm,\n dual_clip=args.dual_clip,\n value_clip=args.value_clip,\n action_space=env.action_space,\n deterministic_eval=True,\n advantage_normalization=args.norm_adv,\n recompute_advantage=args.recompute_adv\n )\n feature_dim = args.hidden_sizes[-1]\n feature_net = MLP(\n np.prod(args.state_shape),\n output_dim=feature_dim,\n hidden_sizes=args.hidden_sizes[:-1],\n device=args.device\n )\n action_dim = np.prod(args.action_shape)\n icm_net = IntrinsicCuriosityModule(\n feature_net,\n feature_dim,\n action_dim,\n hidden_sizes=args.hidden_sizes[-1:],\n device=args.device\n ).to(args.device)\n icm_optim = torch.optim.Adam(icm_net.parameters(), lr=args.lr)\n policy = ICMPolicy(\n policy, icm_net, icm_optim, args.lr_scale, args.reward_scale,\n args.forward_loss_weight\n )\n # collector\n train_collector = Collector(\n policy, train_envs, VectorReplayBuffer(args.buffer_size, len(train_envs))\n )\n test_collector = Collector(policy, test_envs)\n # log\n log_path = os.path.join(args.logdir, args.task, 'ppo_icm')\n writer = SummaryWriter(log_path)\n logger = TensorboardLogger(writer)\n\n def save_fn(policy):\n torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth'))\n\n def stop_fn(mean_rewards):\n return mean_rewards >= env.spec.reward_threshold\n\n # trainer\n result = onpolicy_trainer(\n policy,\n train_collector,\n test_collector,\n args.epoch,\n args.step_per_epoch,\n args.repeat_per_collect,\n args.test_num,\n args.batch_size,\n step_per_collect=args.step_per_collect,\n stop_fn=stop_fn,\n save_fn=save_fn,\n logger=logger\n )\n assert stop_fn(result['best_reward'])\n\n if __name__ == '__main__':\n pprint.pprint(result)\n # Let's watch its performance!\n env = gym.make(args.task)\n policy.eval()\n collector = Collector(policy, env)\n result = collector.collect(n_episode=1, render=args.render)\n rews, lens = result[\"rews\"], result[\"lens\"]\n print(f\"Final reward: {rews.mean()}, length: {lens.mean()}\")\n\n\nif __name__ == '__main__':\n test_ppo()\n" ]
[ [ "torch.nn.functional.softmax", "numpy.sqrt", "torch.cat", "torch.nn.init.constant_", "torch.nn.functional.one_hot", "torch.arange", "torch.nn.Linear", "torch.nn.functional.mse_loss", "torch.FloatTensor", "torch.rand", "numpy.prod", "torch.nn.init.xavier_uniform_", "torch.no_grad", "torch.nn.ReLU", "torch.cumsum", "torch.nn.functional.linear", "torch.nn.functional.pad" ], [ "numpy.random.seed", "torch.manual_seed", "torch.utils.tensorboard.SummaryWriter", "numpy.prod", "torch.cuda.is_available", "torch.nn.init.orthogonal_", "torch.nn.init.zeros_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bpiwowar/capreolus-xpm
[ "5374eb48df96b54d51365fc32441ae50a3e634c2" ]
[ "capreolus/reranker/tests/test_rerankers.py" ]
[ "import pytest\nimport torch\n\nfrom capreolus.reranker.POSITDRMM import POSITDRMM\nfrom capreolus.reranker.KNRM import KNRM\n\n\ndef test_validate_params_for_knrm():\n with pytest.raises(ValueError):\n KNRM.validate_params({\"foo\": \"bar\"})\n\n with pytest.raises(ValueError):\n KNRM.validate_params({\"pad_token\": 0})\n\n config = {\"pad_token\": 0, \"gradkernels\": True, \"singlefc\": False, \"scoretanh\": True}\n KNRM.validate_params(config)\n\n\ndef test_positdrmm_get_exact_match_count():\n query = torch.tensor([1, 2, 3])\n doc = torch.tensor([1, 5, 3, 2, 1, 1, 9])\n query_idf = [0.5, 0.5, 0.5]\n\n exact_count, exact_count_idf = POSITDRMM.get_exact_match_count(query, doc, query_idf)\n assert exact_count == 5 / len(doc)\n assert exact_count_idf == (3 * 0.5 + 0.5 + 0.5) / len(doc)\n\n\ndef test_positdrmm_get_bigrams():\n # Each number in the doc represents an index into the vocabulary\n doc = torch.tensor([1, 2, 3, 4])\n\n doc_bigrams = POSITDRMM.get_bigrams(doc)\n expected_doc_bigrams = torch.tensor([[1, 2], [2, 3], [3, 4]])\n\n assert torch.all(torch.eq(doc_bigrams, expected_doc_bigrams))\n\n\ndef test_positdrmm_get_bigram_match_count():\n doc = torch.tensor([1, 2, 3, 4, 1, 2])\n query = torch.tensor([1, 5, 9, 3, 4])\n\n bigram_match_count = POSITDRMM.get_bigram_match_count(query, doc)\n expected_count = 1 / 5 # The only matching bigram is [3, 4], and length of doc bigrams is 5\n\n assert bigram_match_count == expected_count\n\n\ndef test_positdrmm_get_exact_match_stats():\n # 3 docs, zero padded at the end\n docs = torch.tensor([[1, 2, 3, 4, 0], [2, 3, 1, 5, 0], [3, 4, 5, 6, 0]])\n # 1 query repeated 3 times (i.e, batch size = 3), zero padded at the end\n queries = torch.tensor([[3, 1, 5, 7, 0], [3, 1, 5, 7, 0], [3, 1, 5, 7, 0]])\n query_idf = torch.tensor([[0.5, 0.5, 0.5, 0.5, 0], [0.5, 0.5, 0.5, 0.5, 0], [0.5, 0.5, 0.5, 0.5, 0]])\n\n exact_matches, exact_match_idf, bigram_matches = POSITDRMM.get_exact_match_stats(query_idf, queries, docs)\n\n assert torch.all(torch.eq(exact_matches.reshape(3), torch.tensor([2 / 4, 3 / 4, 2 / 4])))\n assert torch.all(torch.eq(exact_match_idf.reshape(3), torch.tensor([2 * 0.5 / 4, 3 * 0.5 / 4, 2 * 0.5 / 4])))\n\n # The query bigrams are:\n # [[3, 1], [1, 5], [5, 7], [7, 0]] - we don't clean the query\n # The doc bigrams are:\n # [[1, 2], [2, 3], [3, 4]]\n # [[2, 3], [3, 1], [1, 5]]\n # [[3, 4], [4, 5], [5, 6]]\n assert torch.all(torch.eq(bigram_matches.reshape(3), torch.tensor([0, 2 / 3, 0])))\n" ]
[ [ "torch.eq", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhen-jia/incubator-tvm
[ "37af1e7fa8aeebb0d996d7018bceb9a1b567e4dd", "37af1e7fa8aeebb0d996d7018bceb9a1b567e4dd" ]
[ "tests/micro/zephyr/test_zephyr_aot.py", "tests/micro/zephyr/test_zephyr.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport io\nimport logging\nimport os\nimport sys\nimport logging\nimport pathlib\nimport tarfile\nimport tempfile\n\nimport pytest\nimport numpy as np\n\nimport tvm\nimport tvm.testing\nfrom tvm.micro.project_api import server\nimport tvm.relay as relay\n\nfrom tvm.contrib.download import download_testdata\nfrom tvm.micro.model_library_format import generate_c_interface_header\n\nimport test_utils\n\n\[email protected]_micro\ndef test_tflite(temp_dir, board, west_cmd, tvm_debug):\n \"\"\"Testing a TFLite model.\"\"\"\n\n if board not in [\n \"qemu_x86\",\n \"mps2_an521\",\n \"nrf5340dk_nrf5340_cpuapp\",\n \"nucleo_l4r5zi\",\n \"qemu_cortex_r5\",\n \"qemu_riscv32\",\n \"qemu_riscv64\",\n ]:\n pytest.skip(msg=\"Model does not fit.\")\n\n model = test_utils.ZEPHYR_BOARDS[board]\n input_shape = (1, 32, 32, 3)\n output_shape = (1, 10)\n build_config = {\"debug\": tvm_debug}\n\n model_url = \"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/model/image_classification_fp32.tflite\"\n model_path = download_testdata(model_url, \"image_classification_fp32.tflite\", module=\"model\")\n\n # Import TFLite model\n tflite_model_buf = open(model_path, \"rb\").read()\n try:\n import tflite\n\n tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)\n except AttributeError:\n import tflite.Model\n\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)\n\n # Load TFLite model and convert to Relay\n relay_mod, params = relay.frontend.from_tflite(\n tflite_model, shape_dict={\"input_1\": input_shape}, dtype_dict={\"input_1 \": \"float32\"}\n )\n\n target = tvm.target.target.micro(\n model, options=[\"-link-params=1\", \"--executor=aot\", \"--unpacked-api=1\", \"--interface-api=c\"]\n )\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n lowered = relay.build(relay_mod, target, params=params)\n\n # Load sample and generate input/output header files\n sample_url = \"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/testdata_image_classification_fp32_8.npy\"\n sample_path = download_testdata(\n sample_url, \"testdata_image_classification_fp32_8.npy\", module=\"data\"\n )\n sample = np.load(sample_path)\n\n with tempfile.NamedTemporaryFile() as tar_temp_file:\n with tarfile.open(tar_temp_file.name, \"w:gz\") as tf:\n with tempfile.TemporaryDirectory() as tar_temp_dir:\n model_files_path = os.path.join(tar_temp_dir, \"include\")\n os.mkdir(model_files_path)\n header_path = generate_c_interface_header(\n lowered.libmod_name, [\"input_1\"], [\"output\"], model_files_path\n )\n tf.add(header_path, arcname=os.path.relpath(header_path, tar_temp_dir))\n\n test_utils.create_header_file(\"input_data\", sample, \"include\", tf)\n test_utils.create_header_file(\n \"output_data\", np.zeros(shape=output_shape, dtype=\"float32\"), \"include\", tf\n )\n\n project, _ = test_utils.build_project(\n temp_dir,\n board,\n west_cmd,\n lowered,\n build_config,\n extra_files_tar=tar_temp_file.name,\n )\n\n project.flash()\n with project.transport() as transport:\n timeout_read = 60\n test_utils.get_message(transport, \"#wakeup\", timeout_sec=timeout_read)\n transport.write(b\"start\\n\", timeout_sec=5)\n result_line = test_utils.get_message(transport, \"#result\", timeout_sec=timeout_read)\n\n result_line = result_line.strip(\"\\n\")\n result_line = result_line.split(\":\")\n result = int(result_line[1])\n time = int(result_line[2])\n logging.info(f\"Result: {result}\\ttime: {time} ms\")\n assert result == 8\n\n\[email protected]_micro\ndef test_qemu_make_fail(temp_dir, board, west_cmd, tvm_debug):\n \"\"\"Testing QEMU make fail.\"\"\"\n if board not in [\"qemu_x86\", \"mps2_an521\"]:\n pytest.skip(msg=\"Only for QEMU targets.\")\n\n model = test_utils.ZEPHYR_BOARDS[board]\n build_config = {\"debug\": tvm_debug}\n shape = (10,)\n dtype = \"float32\"\n\n # Construct Relay program.\n x = relay.var(\"x\", relay.TensorType(shape=shape, dtype=dtype))\n xx = relay.multiply(x, x)\n z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))\n func = relay.Function([x], z)\n ir_mod = tvm.IRModule.from_expr(func)\n\n target = tvm.target.target.micro(model, options=[\"-link-params=1\", \"--executor=aot\"])\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n lowered = relay.build(ir_mod, target)\n\n # Generate input/output header files\n with tempfile.NamedTemporaryFile() as tar_temp_file:\n with tarfile.open(tar_temp_file.name, \"w:gz\") as tf:\n with tempfile.TemporaryDirectory() as tar_temp_dir:\n model_files_path = os.path.join(tar_temp_dir, \"include\")\n os.mkdir(model_files_path)\n header_path = generate_c_interface_header(\n lowered.libmod_name, [\"input_1\"], [\"output\"], model_files_path\n )\n tf.add(header_path, arcname=os.path.relpath(header_path, tar_temp_dir))\n test_utils.create_header_file(\n \"input_data\", np.zeros(shape=shape, dtype=dtype), \"include\", tf\n )\n test_utils.create_header_file(\n \"output_data\", np.zeros(shape=shape, dtype=dtype), \"include\", tf\n )\n\n project, project_dir = test_utils.build_project(\n temp_dir,\n board,\n west_cmd,\n lowered,\n build_config,\n extra_files_tar=tar_temp_file.name,\n )\n\n file_path = (\n pathlib.Path(project_dir) / \"build\" / \"zephyr\" / \"CMakeFiles\" / \"run.dir\" / \"build.make\"\n )\n assert file_path.is_file(), f\"[{file_path}] does not exist.\"\n\n # Remove a file to create make failure.\n os.remove(file_path)\n project.flash()\n with pytest.raises(server.JSONRPCError) as excinfo:\n project.transport().open()\n assert \"QEMU setup failed\" in str(excinfo.value)\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main([__file__] + sys.argv[1:]))\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport logging\nimport os\nimport pathlib\nimport subprocess\nimport sys\nimport logging\n\nimport pytest\nimport numpy as np\nimport onnx\nfrom PIL import Image\n\nimport tvm\nimport tvm.relay as relay\nfrom tvm.relay.testing import byoc\nfrom tvm.contrib import utils\nfrom tvm.micro.testing import check_tune_log\n\nimport test_utils\n\n_LOG = logging.getLogger(__name__)\n\n\ndef _make_sess_from_op(\n temp_dir, model, zephyr_board, west_cmd, op_name, sched, arg_bufs, build_config\n):\n target = tvm.target.target.micro(model)\n target = tvm.target.Target(target=target, host=target)\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n mod = tvm.build(sched, arg_bufs, target=target, name=op_name)\n\n return _make_session(temp_dir, zephyr_board, west_cmd, mod, build_config)\n\n\ndef _make_session(temp_dir, zephyr_board, west_cmd, mod, build_config):\n config_main_stack_size = None\n if test_utils.qemu_boards(zephyr_board):\n config_main_stack_size = 1536\n\n project_options = {\n \"project_type\": \"host_driven\",\n \"west_cmd\": west_cmd,\n \"verbose\": bool(build_config.get(\"debug\")),\n \"zephyr_board\": zephyr_board,\n }\n if config_main_stack_size is not None:\n project_options[\"config_main_stack_size\"] = config_main_stack_size\n\n project = tvm.micro.generate_project(\n str(test_utils.TEMPLATE_PROJECT_DIR),\n mod,\n temp_dir / \"project\",\n project_options,\n )\n project.build()\n project.flash()\n return tvm.micro.Session(project.transport())\n\n\ndef _make_add_sess(temp_dir, model, zephyr_board, west_cmd, build_config, dtype=\"int8\"):\n A = tvm.te.placeholder((2,), dtype=dtype)\n B = tvm.te.placeholder((1,), dtype=dtype)\n C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name=\"C\")\n sched = tvm.te.create_schedule(C.op)\n return _make_sess_from_op(\n temp_dir, model, zephyr_board, west_cmd, \"add\", sched, [A, B, C], build_config\n )\n\n\n# The same test code can be executed on both the QEMU simulation and on real hardware.\[email protected]_micro\ndef test_add_uint(temp_dir, board, west_cmd, tvm_debug):\n \"\"\"Test compiling the on-device runtime.\"\"\"\n\n model = test_utils.ZEPHYR_BOARDS[board]\n build_config = {\"debug\": tvm_debug}\n\n # NOTE: run test in a nested function so cPython will delete arrays before closing the session.\n def test_basic_add(sess):\n A_data = tvm.nd.array(np.array([2, 3], dtype=\"int8\"), device=sess.device)\n assert (A_data.numpy() == np.array([2, 3])).all()\n B_data = tvm.nd.array(np.array([4], dtype=\"int8\"), device=sess.device)\n assert (B_data.numpy() == np.array([4])).all()\n C_data = tvm.nd.array(np.array([0, 0], dtype=\"int8\"), device=sess.device)\n assert (C_data.numpy() == np.array([0, 0])).all()\n\n system_lib = sess.get_system_lib()\n system_lib.get_function(\"add\")(A_data, B_data, C_data)\n assert (C_data.numpy() == np.array([6, 7])).all()\n\n with _make_add_sess(temp_dir, model, board, west_cmd, build_config) as sess:\n test_basic_add(sess)\n\n\n# The same test code can be executed on both the QEMU simulation and on real hardware.\[email protected]_micro\ndef test_add_float(temp_dir, board, west_cmd, tvm_debug):\n \"\"\"Test compiling the on-device runtime.\"\"\"\n model = test_utils.ZEPHYR_BOARDS[board]\n if not test_utils.has_fpu(board):\n pytest.skip(f\"FPU not enabled for {board}\")\n\n build_config = {\"debug\": tvm_debug}\n\n # NOTE: run test in a nested function so cPython will delete arrays before closing the session.\n def test_basic_add(sess):\n A_data = tvm.nd.array(np.array([2.5, 3.5], dtype=\"float32\"), device=sess.device)\n assert (A_data.numpy() == np.array([2.5, 3.5])).all()\n B_data = tvm.nd.array(np.array([4.5], dtype=\"float32\"), device=sess.device)\n assert (B_data.numpy() == np.array([4.5])).all()\n C_data = tvm.nd.array(np.array([0, 0], dtype=\"float32\"), device=sess.device)\n assert (C_data.numpy() == np.array([0, 0])).all()\n\n system_lib = sess.get_system_lib()\n system_lib.get_function(\"add\")(A_data, B_data, C_data)\n assert (C_data.numpy() == np.array([7, 8])).all()\n\n with _make_add_sess(temp_dir, model, board, west_cmd, build_config, dtype=\"float32\") as sess:\n test_basic_add(sess)\n\n\[email protected]_micro\ndef test_platform_timer(temp_dir, board, west_cmd, tvm_debug):\n \"\"\"Test compiling the on-device runtime.\"\"\"\n\n model = test_utils.ZEPHYR_BOARDS[board]\n build_config = {\"debug\": tvm_debug}\n\n # NOTE: run test in a nested function so cPython will delete arrays before closing the session.\n def test_basic_add(sess):\n A_data = tvm.nd.array(np.array([2, 3], dtype=\"int8\"), device=sess.device)\n assert (A_data.numpy() == np.array([2, 3])).all()\n B_data = tvm.nd.array(np.array([4], dtype=\"int8\"), device=sess.device)\n assert (B_data.numpy() == np.array([4])).all()\n C_data = tvm.nd.array(np.array([0, 0], dtype=\"int8\"), device=sess.device)\n assert (C_data.numpy() == np.array([0, 0])).all()\n\n system_lib = sess.get_system_lib()\n time_eval_f = system_lib.time_evaluator(\n \"add\", sess.device, number=20, repeat=3, min_repeat_ms=40\n )\n result = time_eval_f(A_data, B_data, C_data)\n assert (C_data.numpy() == np.array([6, 7])).all()\n assert result.mean > 0\n assert len(result.results) == 3\n\n with _make_add_sess(temp_dir, model, board, west_cmd, build_config) as sess:\n test_basic_add(sess)\n\n\[email protected]_micro\ndef test_relay(temp_dir, board, west_cmd, tvm_debug):\n \"\"\"Testing a simple relay graph\"\"\"\n model = test_utils.ZEPHYR_BOARDS[board]\n build_config = {\"debug\": tvm_debug}\n shape = (10,)\n dtype = \"int8\"\n\n # Construct Relay program.\n x = relay.var(\"x\", relay.TensorType(shape=shape, dtype=dtype))\n xx = relay.multiply(x, x)\n z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))\n func = relay.Function([x], z)\n ir_mod = tvm.IRModule.from_expr(func)\n\n target = tvm.target.target.micro(model)\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n mod = tvm.relay.build(ir_mod, target=target)\n\n with _make_session(temp_dir, board, west_cmd, mod, build_config) as session:\n graph_mod = tvm.micro.create_local_graph_executor(\n mod.get_graph_json(), session.get_system_lib(), session.device\n )\n graph_mod.set_input(**mod.get_params())\n x_in = np.random.randint(10, size=shape[0], dtype=dtype)\n graph_mod.run(x=x_in)\n result = graph_mod.get_output(0).numpy()\n tvm.testing.assert_allclose(graph_mod.get_input(0).numpy(), x_in)\n tvm.testing.assert_allclose(result, x_in * x_in + 1)\n\n\[email protected]_micro\ndef test_onnx(temp_dir, board, west_cmd, tvm_debug):\n \"\"\"Testing a simple ONNX model.\"\"\"\n model = test_utils.ZEPHYR_BOARDS[board]\n build_config = {\"debug\": tvm_debug}\n\n this_dir = pathlib.Path(os.path.dirname(__file__))\n mnist_testdata = this_dir.parent / \"testdata\" / \"mnist\"\n digit_2 = Image.open(mnist_testdata / \"digit-2.jpg\").resize((28, 28))\n digit_2 = np.asarray(digit_2).astype(\"float32\")\n digit_2 = np.expand_dims(digit_2, axis=0)\n\n digit_9 = Image.open(mnist_testdata / \"digit-9.jpg\").resize((28, 28))\n digit_9 = np.asarray(digit_9).astype(\"float32\")\n digit_9 = np.expand_dims(digit_9, axis=0)\n\n # Load ONNX model and convert to Relay.\n onnx_model = onnx.load(mnist_testdata / \"mnist-8.onnx\")\n shape = {\"Input3\": (1, 1, 28, 28)}\n relay_mod, params = relay.frontend.from_onnx(onnx_model, shape=shape, freeze_params=True)\n relay_mod = relay.transform.DynamicToStatic()(relay_mod)\n\n # We add the -link-params=1 option to ensure the model parameters are compiled in.\n # There is currently a bug preventing the host_driven environment from receiving\n # the model weights when set using graph_mod.set_input().\n # See: https://github.com/apache/tvm/issues/7567\n target = tvm.target.target.micro(model, options=[\"-link-params=1\"])\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n lowered = relay.build(relay_mod, target, params=params)\n graph = lowered.get_graph_json()\n\n with _make_session(temp_dir, board, west_cmd, lowered, build_config) as session:\n graph_mod = tvm.micro.create_local_graph_executor(\n graph, session.get_system_lib(), session.device\n )\n\n # Send the digit-2 image and confirm that the correct result is returned.\n graph_mod.set_input(\"Input3\", tvm.nd.array(digit_2))\n graph_mod.run()\n result = graph_mod.get_output(0).numpy()\n assert np.argmax(result) == 2\n\n # Send the digit-9 image and confirm that the correct result is returned.\n graph_mod.set_input(\"Input3\", tvm.nd.array(digit_9))\n graph_mod.run()\n result = graph_mod.get_output(0).numpy()\n assert np.argmax(result) == 9\n\n\ndef check_result(\n temp_dir, relay_mod, model, zephyr_board, west_cmd, map_inputs, out_shape, result, build_config\n):\n \"\"\"Helper function to verify results\"\"\"\n TOL = 1e-5\n target = tvm.target.target.micro(model)\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n mod = tvm.relay.build(relay_mod, target=target)\n\n with _make_session(temp_dir, zephyr_board, west_cmd, mod, build_config) as session:\n rt_mod = tvm.micro.create_local_graph_executor(\n mod.get_graph_json(), session.get_system_lib(), session.device\n )\n rt_mod.set_input(**mod.get_params())\n for name, data in map_inputs.items():\n rt_mod.set_input(name, data)\n rt_mod.set_input(**mod.get_params())\n rt_mod.run()\n\n out_shapes = out_shape if isinstance(out_shape, list) else [out_shape]\n results = result if isinstance(result, list) else [result]\n\n for idx, shape in enumerate(out_shapes):\n out = tvm.nd.empty(shape, device=session.device)\n out = rt_mod.get_output(idx, out)\n tvm.testing.assert_allclose(out.numpy(), results[idx], rtol=TOL, atol=TOL)\n\n\[email protected]_micro\ndef test_byoc_microtvm(temp_dir, board, west_cmd, tvm_debug):\n \"\"\"This is a simple test case to check BYOC capabilities of microTVM\"\"\"\n model = test_utils.ZEPHYR_BOARDS[board]\n build_config = {\"debug\": tvm_debug}\n x = relay.var(\"x\", shape=(10, 10))\n w0 = relay.var(\"w0\", shape=(10, 10))\n w1 = relay.var(\"w1\", shape=(10, 10))\n w2 = relay.var(\"w2\", shape=(10, 10))\n w3 = relay.var(\"w3\", shape=(10, 10))\n w4 = relay.var(\"w4\", shape=(10, 10))\n w5 = relay.var(\"w5\", shape=(10, 10))\n w6 = relay.var(\"w6\", shape=(10, 10))\n w7 = relay.var(\"w7\", shape=(10, 10))\n\n # C compiler\n z0 = relay.add(x, w0)\n p0 = relay.subtract(z0, w1)\n q0 = relay.multiply(p0, w2)\n\n z1 = relay.add(x, w3)\n p1 = relay.subtract(z1, w4)\n q1 = relay.multiply(p1, w5)\n\n # Other parts on TVM\n z2 = relay.add(x, w6)\n q2 = relay.subtract(z2, w7)\n\n r = relay.concatenate((q0, q1, q2), axis=0)\n f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)\n mod = tvm.IRModule()\n ann = byoc.CcompilerAnnotator()\n mod[\"main\"] = ann.visit(f)\n mod = tvm.relay.transform.PartitionGraph()(mod)\n mod = tvm.relay.transform.InferType()(mod)\n\n x_data = np.random.rand(10, 10).astype(\"float32\")\n w_data = []\n for _ in range(8):\n w_data.append(np.random.rand(10, 10).astype(\"float32\"))\n\n map_inputs = {\"w{}\".format(i): w_data[i] for i in range(8)}\n map_inputs[\"x\"] = x_data\n check_result(\n temp_dir=temp_dir,\n relay_mod=mod,\n map_inputs=map_inputs,\n out_shape=(30, 10),\n result=np.concatenate(\n (\n ((x_data + w_data[0]) - w_data[1]) * w_data[2],\n ((x_data + w_data[3]) - w_data[4]) * w_data[5],\n x_data + w_data[6] - w_data[7],\n ),\n axis=0,\n ),\n model=model,\n zephyr_board=board,\n west_cmd=west_cmd,\n build_config=build_config,\n )\n\n\ndef _make_add_sess_with_shape(temp_dir, model, zephyr_board, west_cmd, shape, build_config):\n A = tvm.te.placeholder(shape, dtype=\"int8\")\n C = tvm.te.compute(A.shape, lambda i: A[i] + A[i], name=\"C\")\n sched = tvm.te.create_schedule(C.op)\n return _make_sess_from_op(\n temp_dir, model, zephyr_board, west_cmd, \"add\", sched, [A, C], build_config\n )\n\n\[email protected](\n \"shape,\",\n [\n pytest.param((1 * 1024,), id=\"(1*1024)\"),\n pytest.param((4 * 1024,), id=\"(4*1024)\"),\n pytest.param((16 * 1024,), id=\"(16*1024)\"),\n ],\n)\[email protected]_micro\ndef test_rpc_large_array(temp_dir, board, west_cmd, tvm_debug, shape):\n \"\"\"Test large RPC array transfer.\"\"\"\n model = test_utils.ZEPHYR_BOARDS[board]\n build_config = {\"debug\": tvm_debug}\n\n # NOTE: run test in a nested function so cPython will delete arrays before closing the session.\n def test_tensors(sess):\n a_np = np.random.randint(low=-128, high=127, size=shape, dtype=\"int8\")\n\n A_data = tvm.nd.array(a_np, device=sess.device)\n assert (A_data.numpy() == a_np).all()\n C_data = tvm.nd.array(np.zeros(shape, dtype=\"int8\"), device=sess.device)\n assert (C_data.numpy() == np.zeros(shape)).all()\n\n with _make_add_sess_with_shape(temp_dir, model, board, west_cmd, shape, build_config) as sess:\n test_tensors(sess)\n\n\[email protected]_micro\ndef test_autotune_conv2d(temp_dir, board, west_cmd, tvm_debug):\n \"\"\"Test AutoTune for microTVM Zephyr\"\"\"\n if board in [\"qemu_riscv32\", \"qemu_riscv64\"]:\n pytest.xfail(f\"Autotune fails on {board}.\")\n\n model = test_utils.ZEPHYR_BOARDS[board]\n build_config = {\"debug\": tvm_debug}\n\n # Create a Relay model\n data_shape = (1, 3, 16, 16)\n weight_shape = (8, 3, 5, 5)\n data = relay.var(\"data\", relay.TensorType(data_shape, \"float32\"))\n weight = relay.var(\"weight\", relay.TensorType(weight_shape, \"float32\"))\n y = relay.nn.conv2d(\n data,\n weight,\n padding=(2, 2),\n kernel_size=(5, 5),\n kernel_layout=\"OIHW\",\n out_dtype=\"float32\",\n )\n f = relay.Function([data, weight], y)\n mod = tvm.IRModule.from_expr(f)\n mod = relay.transform.InferType()(mod)\n\n data_sample = np.random.rand(data_shape[0], data_shape[1], data_shape[2], data_shape[3]).astype(\n \"float32\"\n )\n weight_sample = np.random.rand(\n weight_shape[0], weight_shape[1], weight_shape[2], weight_shape[3]\n ).astype(\"float32\")\n params = {mod[\"main\"].params[1].name_hint: weight_sample}\n\n target = tvm.target.target.micro(model)\n pass_context = tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True})\n with pass_context:\n tasks = tvm.autotvm.task.extract_from_program(mod[\"main\"], {}, target)\n assert len(tasks) > 0\n\n config_main_stack_size = None\n if test_utils.qemu_boards(board):\n config_main_stack_size = 1536\n\n project_options = {\n \"zephyr_board\": board,\n \"west_cmd\": west_cmd,\n \"verbose\": 1,\n \"project_type\": \"host_driven\",\n }\n if config_main_stack_size is not None:\n project_options[\"config_main_stack_size\"] = config_main_stack_size\n\n module_loader = tvm.micro.AutoTvmModuleLoader(\n template_project_dir=test_utils.TEMPLATE_PROJECT_DIR,\n project_options=project_options,\n )\n\n timeout = 200\n builder = tvm.autotvm.LocalBuilder(\n timeout=timeout,\n n_parallel=1,\n build_kwargs={\"build_option\": {\"tir.disable_vectorize\": True}},\n do_fork=True,\n build_func=tvm.micro.autotvm_build_func,\n )\n runner = tvm.autotvm.LocalRunner(\n number=1, repeat=1, timeout=timeout, module_loader=module_loader\n )\n\n measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner)\n\n log_path = pathlib.Path(\"zephyr_autotune.log\")\n if log_path.exists():\n log_path.unlink()\n\n n_trial = 10\n for task in tasks:\n tuner = tvm.autotvm.tuner.GATuner(task)\n tuner.tune(\n n_trial=n_trial,\n measure_option=measure_option,\n callbacks=[\n tvm.autotvm.callback.log_to_file(str(log_path)),\n tvm.autotvm.callback.progress_bar(n_trial, si_prefix=\"M\"),\n ],\n si_prefix=\"M\",\n )\n assert tuner.best_flops > 0\n\n check_tune_log(log_path)\n\n # Build without tuning\n with pass_context:\n lowered = tvm.relay.build(mod, target=target, params=params)\n\n temp_dir = utils.tempdir()\n with _make_session(temp_dir, board, west_cmd, lowered, build_config) as session:\n graph_mod = tvm.micro.create_local_graph_executor(\n lowered.get_graph_json(), session.get_system_lib(), session.device\n )\n graph_mod.set_input(**lowered.get_params())\n graph_mod.run(data=data_sample)\n expected_output = graph_mod.get_output(0).numpy()\n del graph_mod\n\n # Build using autotune logs\n with tvm.autotvm.apply_history_best(str(log_path)):\n with pass_context:\n lowered_tuned = tvm.relay.build(mod, target=target, params=params)\n\n temp_dir = utils.tempdir()\n with _make_session(temp_dir, board, west_cmd, lowered_tuned, build_config) as session:\n graph_mod = tvm.micro.create_local_graph_executor(\n lowered_tuned.get_graph_json(), session.get_system_lib(), session.device\n )\n graph_mod.set_input(**lowered_tuned.get_params())\n graph_mod.run(data=data_sample)\n output = graph_mod.get_output(0).numpy()\n del graph_mod\n\n tvm.testing.assert_allclose(output, expected_output, rtol=1e-4, atol=1e-5)\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main([__file__] + sys.argv[1:]))\n" ]
[ [ "numpy.load", "numpy.zeros", "numpy.ones" ], [ "numpy.expand_dims", "numpy.asarray", "numpy.ones", "numpy.concatenate", "numpy.argmax", "numpy.random.rand", "numpy.array", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sogartar/torch-mlir
[ "19e9fc4ef12d7207eadd3dc9121aebe1555ea8dd", "19e9fc4ef12d7207eadd3dc9121aebe1555ea8dd", "19e9fc4ef12d7207eadd3dc9121aebe1555ea8dd" ]
[ "python/torch_mlir_e2e_test/torchscript/configs/torchscript.py", "e2e_testing/torchscript/basic.py", "python/test/annotations-sugar.py" ]
[ "# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n# See https://llvm.org/LICENSE.txt for license information.\n# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n# Also available under a BSD-style license. See LICENSE.\n\nimport copy\nfrom typing import Any\n\nimport torch\n\nfrom torch_mlir_e2e_test.torchscript.framework import TestConfig, Trace, TraceItem\n\n\nclass TorchScriptTestConfig(TestConfig):\n \"\"\"TestConfig that runs the torch.nn.Module through TorchScript\"\"\"\n def __init__(self):\n super().__init__()\n\n def compile(self, program: torch.nn.Module) -> torch.jit.ScriptModule:\n return torch.jit.script(program)\n\n def run(self, artifact: torch.jit.ScriptModule, trace: Trace) -> Trace:\n # TODO: Deepcopy the torch.jit.ScriptModule, so that if the program is\n # stateful then it does not mutate the original compiled program.\n\n result: Trace = []\n for item in trace:\n attr = artifact\n for part in item.symbol.split('.'):\n attr = getattr(attr, part)\n output = attr(*item.inputs)\n result.append(\n TraceItem(symbol=item.symbol,\n inputs=item.inputs,\n output=output))\n return result\n", "# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n# See https://llvm.org/LICENSE.txt for license information.\n# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n# Also available under a BSD-style license. See LICENSE.\n\nimport torch\n\nfrom torch_mlir_e2e_test.torchscript.framework import TestUtils\nfrom torch_mlir_e2e_test.torchscript.registry import register_test_case\nfrom torch_mlir_e2e_test.torchscript.annotations import annotate_args, export\n\n# ==============================================================================\n\n\nclass MmModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, lhs, rhs):\n return torch.mm(lhs, rhs)\n\n\n@register_test_case(module_factory=lambda: MmModule())\ndef MmModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4, 4), tu.rand(4, 4))\n\n\n@register_test_case(module_factory=lambda: MmModule())\ndef MmModule_chained(module, tu: TestUtils):\n res = module.forward(tu.rand(4, 4), tu.rand(4, 4))\n module.forward(res, res)\n\n# ==============================================================================\n\n\nclass BmmModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float32, True),\n ([-1, -1, -1], torch.float32, True),\n ])\n def forward(self, lhs, rhs):\n return torch.bmm(lhs, rhs)\n\n\n@register_test_case(module_factory=lambda: BmmModule())\ndef BmmModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4, 5), tu.rand(3, 5, 4))\n\n\n# ==============================================================================\n\n\n# A subgraph with multiple mm ops.\nclass MmDagModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([4, 4], torch.float32, True),\n ([4, 4], torch.float32, True),\n ])\n def forward(self, lhs, rhs):\n return torch.mm(lhs, torch.mm(lhs, rhs))\n\n\n@register_test_case(module_factory=lambda: MmDagModule())\ndef MmDagModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4, 4), tu.rand(4, 4))\n\n\n# ==============================================================================\n\n\nclass MmTanhModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, lhs, rhs):\n return torch.tanh(self.matmul(lhs, rhs))\n\n def matmul(self, lhs, rhs):\n return torch.mm(lhs, rhs)\n\n\n@register_test_case(module_factory=lambda: MmTanhModule())\ndef MmTanhModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4, 2), tu.rand(2, 4))\n\n\nclass AdaptiveAvgPool2dModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.aap2d = torch.nn.AdaptiveAvgPool2d((1, 1))\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return self.aap2d(x)\n\n\n@register_test_case(module_factory=lambda: AdaptiveAvgPool2dModule())\ndef AdaptiveAvgPool2dModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(10, 3, 8, 9))\n\n\nclass FlattenStaticModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.flat = torch.nn.Flatten(2, 4)\n\n @export\n @annotate_args([\n None,\n ([10, 3, 8, 9, 3, 4], torch.float32, True),\n ])\n def forward(self, x):\n return self.flat(x)\n\n\n@register_test_case(module_factory=lambda: FlattenStaticModule())\ndef FlattenStaticModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(10, 3, 8, 9, 3, 4))\n\n\nclass FlattenRank0Module(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.flat = torch.nn.Flatten(-1, -1)\n\n @export\n @annotate_args([\n None,\n ([], torch.float32, True),\n ])\n def forward(self, x):\n return self.flat(x)\n\n\n@register_test_case(module_factory=lambda: FlattenRank0Module())\ndef FlattenRank0Module_basic(module, tu: TestUtils):\n module.forward(torch.tensor(4.0))\n\n\nclass FlattenDynamicModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.flat = torch.nn.Flatten(2, 4)\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1, 9, 3, -1], torch.float32, True),\n ])\n def forward(self, x):\n return self.flat(x)\n\n\n@register_test_case(module_factory=lambda: FlattenDynamicModule())\ndef FlattenDynamicModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(10, 3, 8, 9, 3, 4))\n\n\nclass MaxPool2dModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.mp2d = torch.nn.MaxPool2d(kernel_size=[6, 8],\n stride=[2, 2],\n padding=[3, 4],\n dilation=2)\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return self.mp2d(x)\n\n\n@register_test_case(module_factory=lambda: MaxPool2dModule())\ndef MaxPool2dModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(1, 1, 20, 20) - 0.5)\n\n\nclass TransposeIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([3, 4, 2], torch.float32, True),\n ])\n def forward(self, x):\n return torch.transpose(x, 0, 1)\n\n\n@register_test_case(module_factory=lambda: TransposeIntModule())\ndef TransposeIntModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4, 2))\n\n\nclass TensorsConcatModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float32, True),\n ([-1, -1, -1], torch.float32, True),\n ([-1, -1, -1], torch.float32, True),\n ])\n def forward(self, x, y, z):\n return torch.cat([x, y, z], 1)\n\n\n@register_test_case(module_factory=lambda: TensorsConcatModule())\ndef TensorsConcatModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(2, 2, 4), tu.rand(2, 1, 4), tu.rand(2, 3, 4))\n\n\nclass GatherModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float32, True),\n ([-1, -1, -1], torch.int64, True),\n ])\n def forward(self, tensor, indices):\n return torch.gather(tensor, 2, indices)\n\n\n@register_test_case(module_factory=lambda: GatherModule())\ndef GatherModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(2, 3, 4), torch.tensor([[[1, 2, 3], [1, 2, 3]]]))\n", "# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n# See https://llvm.org/LICENSE.txt for license information.\n# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n# Also available under a BSD-style license. See LICENSE.\n\n# RUN: %PYTHON %s | FileCheck %s\n\nimport torch\n\nfrom torch_mlir_e2e_test.torchscript.annotations import annotate_args, export\nfrom torch_mlir.dialects.torch.importer.jit_ir import ClassAnnotator\nfrom torch_mlir.dialects.torch.importer.jit_ir.torchscript_annotations import extract_annotations\n\nclass MmModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n @export\n @annotate_args([\n None,\n ([3, 4], torch.float32, False),\n ([4, 5], torch.float32, True),\n ])\n def forward(self, lhs, rhs):\n return torch.mm(lhs, rhs)\n\nmodule = MmModule()\nannotator = ClassAnnotator()\nextract_annotations(module, torch.jit.script(module), annotator)\nprint(annotator)\n\n# CHECK: ClassAnnotator {\n# CHECK: ClassAnnotation('__torch__.MmModule') {\n# CHECK: MethodAnnotation('forward') {\n# CHECK: isExported = true\n# CHECK: argAnnotations =\n# CHECK: ArgAnnotation(0) {\n# CHECK: dtype = <none>\n# CHECK: shape = <none>\n# CHECK: }\n# CHECK: ArgAnnotation(1) {\n# CHECK: dtype = Float\n# CHECK: shape = [3, 4]\n# CHECK: hasValueSemantics = false\n# CHECK: }\n# CHECK: ArgAnnotation(2) {\n# CHECK: dtype = Float\n# CHECK: shape = [4, 5]\n# CHECK: hasValueSemantics = true\n# CHECK: }\n# CHECK: }\n# CHECK: }\n# CHECK: }\n" ]
[ [ "torch.jit.script" ], [ "torch.mm", "torch.transpose", "torch.cat", "torch.nn.Flatten", "torch.tensor", "torch.nn.MaxPool2d", "torch.nn.AdaptiveAvgPool2d", "torch.bmm", "torch.gather" ], [ "torch.jit.script", "torch.mm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
michellqueiroz-ua/instance-generator
[ "8b431a64898bcf1006464a8394824ab57576811e" ]
[ "REQreate/retrieve_zones.py" ]
[ "import math\nimport matplotlib.pyplot as plt\nimport os\nimport osmnx as ox\nimport pandas as pd\nfrom shapely.geometry import Polygon\n\n\ndef retrieve_zones(G_walk, G_drive, place_name, save_dir, output_folder_base, BBx, BBy):\n\n zones = []\n zone_id = 0\n\n save_dir_csv = os.path.join(save_dir, 'csv')\n if not os.path.isdir(save_dir_csv):\n os.mkdir(save_dir_csv)\n\n save_dir_images = os.path.join(save_dir, 'images')\n zones_folder = os.path.join(save_dir_images, 'zones')\n \n if not os.path.isdir(zones_folder):\n os.mkdir(zones_folder)\n\n path_zones_csv_file = os.path.join(save_dir_csv, output_folder_base+'.zones.csv')\n\n if os.path.isfile(path_zones_csv_file):\n \n print('is file zones')\n zones = pd.read_csv(path_zones_csv_file)\n\n #updates the polygons\n for index, zone in zones.iterrows():\n\n earth_radius = 6371009 # meters\n dist_lat = zone['dist_lat']\n dist_lon = zone['dist_lon'] \n\n lat = zone['center_point_y']\n lng = zone['center_point_x']\n\n delta_lat = (dist_lat / earth_radius) * (180 / math.pi)\n delta_lng = (dist_lon / earth_radius) * (180 / math.pi) / math.cos(lat * math.pi / 180)\n \n north = lat + delta_lat\n south = lat - delta_lat\n east = lng + delta_lng\n west = lng - delta_lng\n \n #north, south, east, west = ox.utils_geo.bbox_from_point(zone_center_point, distance)\n polygon = Polygon([(west, south), (east, south), (east, north), (west, north)])\n \n zones.loc[index, 'polygon'] = polygon\n \n else:\n\n print('creating file zones')\n\n tags = {\n 'place':'borough',\n 'place':'suburb',\n 'place':'quarter',\n 'place':'neighbourhood',\n }\n \n poi_zones = ox.geometries_from_place(place_name, tags=tags)\n print('poi zones len', len(poi_zones))\n\n if len(poi_zones) > 0:\n\n for index, poi in poi_zones.iterrows():\n \n if str(poi['name']) != 'nan':\n zone_name = str(poi['name'])\n \n if not any((z.get('name', None) == zone_name) for z in zones):\n \n #future: see what to do with geometries that are not points\n if poi['geometry'].geom_type == 'Point':\n \n earth_radius = 6371009 # meters\n dist_lat = BBx\n dist_lon = BBy \n\n lat = poi.geometry.centroid.y\n lng = poi.geometry.centroid.x\n\n delta_lat = (dist_lat / earth_radius) * (180 / math.pi)\n delta_lng = (dist_lon / earth_radius) * (180 / math.pi) / math.cos(lat * math.pi / 180)\n \n north = lat + delta_lat\n south = lat - delta_lat\n east = lng + delta_lng\n west = lng - delta_lng\n\n polygon = Polygon([(west, south), (east, south), (east, north), (west, north)])\n\n zone_center_point = (poi.geometry.centroid.y, poi.geometry.centroid.x)\n \n #osmid nearest node walk\n osmid_walk = ox.nearest_nodes(G_walk, zone_center_point[1], zone_center_point[0]) \n\n #osmid nearest node drive\n osmid_drive = ox.nearest_nodes(G_drive, zone_center_point[1], zone_center_point[0])\n\n #plot here the center point zone in the walk network\n nc = ['r' if (node == osmid_walk) else '#336699' for node in G_walk.nodes()]\n ns = [16 if (node == osmid_walk) else 1 for node in G_walk.nodes()]\n zone_filename = str(zone_id)+'_'+zone_name+'_walk.png'\n fig, ax = ox.plot_graph(G_walk, node_size=ns, show=False, node_color=nc, node_zorder=2, save=True, filepath=zones_folder+'/'+zone_filename)\n plt.close(fig)\n\n #plot here the center point zone in the drive network\n nc = ['r' if (node == osmid_drive) else '#336699' for node in G_drive.nodes()]\n ns = [16 if (node == osmid_drive) else 1 for node in G_drive.nodes()]\n zone_filename = str(zone_id)+'_'+zone_name+'_drive.png'\n fig, ax = ox.plot_graph(G_drive, node_size=ns, show=False, node_color=nc, node_zorder=2, save=True, filepath=zones_folder+'/'+zone_filename)\n plt.close(fig)\n\n n = {\n 'index': index,\n 'id': zone_id,\n 'name': zone_name,\n 'polygon': polygon,\n 'center_point_y': poi.geometry.centroid.y,\n 'center_point_x': poi.geometry.centroid.x,\n 'osmid_walk': osmid_walk,\n 'osmid_drive': osmid_drive,\n 'dist_lat': dist_lat,\n 'dist_lon': dist_lon,\n }\n\n zone_id += 1\n\n zones.append(n)\n \n zones = pd.DataFrame(zones)\n zones.to_csv(path_zones_csv_file)\n \n if len(zones) > 0:\n zones.set_index(['id'], inplace=True)\n\n return zones\n\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.close", "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
SKKU-ESLAB/Auto-Compression
[ "a54143e97f5ba08daa4150fd880f5be1346f3d71" ]
[ "quantization/lbq-v2/lbq-v1/functions/duq_2.py" ]
[ "from __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\nfrom __future__ import unicode_literals\r\n\r\nimport torch \r\nimport torch.nn as nn \r\nimport torch.nn.functional as F\r\nfrom torch.nn.parameter import Parameter\r\nfrom torch import Tensor\r\nimport numpy as np \r\nfrom collections import OrderedDict\r\n\r\n\r\nclass RoundQuant(torch.autograd.Function):\r\n @staticmethod\r\n def forward(ctx, input, n_lvs): \r\n return input.mul(n_lvs-1).round_().div_(n_lvs-1)\r\n \r\n @staticmethod\r\n def backward(ctx, grad_output):\r\n return grad_output, None\r\n\r\ndef Quantizer(weight, bit):\r\n a = torch.pow(2, torch.tensor([bit]))\r\n \r\n\r\n\r\n\r\ndef softmax_init(bits):\r\n degree = 4\r\n theta = (bits ** degree)/(bits ** degree).sum\r\n return theta \r\n\"\"\"\r\n@inproceedings{\r\n esser2020learned,\r\n title={LEARNED STEP SIZE QUANTIZATION},\r\n author={Steven K. Esser and Jeffrey L. McKinstry and Deepika Bablani and Rathinakumar Appuswamy and Dharmendra S. Modha},\r\n booktitle={International Conference on Learning Representations},\r\n year={2020},\r\n url={https://openreview.net/forum?id=rkgO66VKDS}\r\n}\r\n\"\"\"\r\ndef grad_scale(x, scale):\r\n yOut = x\r\n yGrad = x * scale\r\n return (yOut-yGrad).detach() + yGrad\r\n\r\n\r\nclass Q_ReLU(nn.Module):\r\n def __init__(self, act_func=True, inplace=False):\r\n super(Q_ReLU, self).__init__()\r\n self.n_lvs = [1]\r\n self.bits = [32]\r\n self.act_func = act_func\r\n self.inplace = inplace\r\n self.a = Parameter(Tensor(1))\r\n self.theta = Parameter(Tensor([1]))\r\n self.tau = 1\r\n\r\n def initialize(self, bits, offset, diff):\r\n self.bits = Parameter(Tensor(bits), requires_grad=False)\r\n self.n_lvs = 2 ** self.bits\r\n self.a = Parameter(Tensor(len(self.bits)))\r\n\r\n #self.theta = Parameter(torch.ones(len(self.bits))/len(self.bits))\r\n self.theta = Parameter(F.softmax(self.bits ** 2, dim=0))\r\n self.a.data.fill_(np.log(np.exp(offset + diff)-1))\r\n \r\n def initialize_qonly(self, offset, diff):\r\n self.a.data.fill_(np.log(np.exp(offset + diff)-1))\r\n \r\n def forward(self, x):\r\n if self.act_func:\r\n x = F.relu(x, self.inplace)\r\n \r\n if len(self.bits)==1 and self.bits[0]==32:\r\n #print(\"Q_ReLU\")\r\n return x#, 32\r\n else:\r\n a = F.softplus(self.a)\r\n\r\n if self.train:\r\n softmask = F.gumbel_softmax(self.theta, tau=self.tau, hard=False, dim=0)\r\n else:\r\n softmask = F.softmax(self.theta/self.tau, dim=0)\r\n #x_bar = torch.zeros_like(x)\r\n '''\r\n for i, n_lv in enumerate(self.n_lvs):\r\n \r\n x_temp = F.hardtanh(x / a[i], 0, 1)\r\n x_bar = torch.add(x_bar, RoundQuant.apply(x_temp, n_lv) * c[i] * softmask[i])\r\n '''\r\n a_mean = (softmask * a).sum()\r\n n_lv_mean = (softmask * self.n_lvs.to(softmask.device)).sum()\r\n\r\n x = F.hardtanh(x / a_mean, 0, 1)\r\n x_bar = RoundQuant.apply(x, n_lv_mean) * a_mean\r\n #act_size = (softmask * self.bits).sum()\r\n return x_bar#, act_size\r\n\r\n \r\nclass Q_ReLU6(Q_ReLU):\r\n def __init__(self, act_func=True, inplace=False):\r\n super(Q_ReLU6, self).__init__(act_func, inplace)\r\n\r\n def initialize(self, bits, offset, diff):\r\n self.bits = Parameter(Tensor(bits), requires_grad=False)\r\n self.n_lvs = 2 ** self.bits\r\n self.a = Parameter(Tensor(len(self.bits)))\r\n self.theta = Parameter(torch.ones(len(self.n_lvs))/len(self.n_lvs))\r\n self.theta = Parameter(F.softmax(self.bits ** 2, dim=0))\r\n\r\n if offset + diff > 6:\r\n self.a.data.fill_(np.log(np.exp(6)-1))\r\n else:\r\n self.a.data.fill_(np.log(np.exp(offset + diff)-1))\r\n\r\n def initialize_qonly(self, offset, diff):\r\n if offset + diff > 6:\r\n self.a.data.fill_(np.log(np.exp(6)-1))\r\n else:\r\n self.a.data.fill_(np.log(np.exp(offset + diff)-1))\r\n\r\n\r\nclass Q_Sym(nn.Module):\r\n def __init__(self):\r\n super(Q_Sym, self).__init__()\r\n self.n_lvs = [1]\r\n self.bits = [32] #Parameter(Tensor([32]), requires_grad=False)\r\n self.a = Parameter(Tensor(1))\r\n self.theta = Parameter(Tensor([1]))\r\n self.tau = 1\r\n\r\n def initialize(self, bits, offset, diff):\r\n self.bits = Parameter(Tensor(bits), requires_grad=False)\r\n self.n_lvs = 2 ** self.bits\r\n self.a = Parameter(Tensor(len(self.bits)))\r\n\r\n #self.theta = Parameter(torch.ones(len(self.bits))/len(self.bits))\r\n self.theta = Parameter(F.softmax(self.bits ** 2, dim=0))\r\n self.a.data.fill_(np.log(np.exp(offset + diff)-1))\r\n \r\n def initialize_qonly(self, offset, diff):\r\n self.a.data.fill_(np.log(np.exp(offset + diff)-1))\r\n\r\n def forward(self, x):\r\n if len(self.bits)==1 and self.bits[0]==32:\r\n #print(\"Q_Sym\")\r\n return x#, 32\r\n else:\r\n a = F.softplus(self.a)\r\n \r\n if self.train:\r\n softmask = F.gumbel_softmax(self.theta, tau=self.tau, hard=False, dim=0)\r\n else:\r\n softmask = F.softmax(self.theta/self.tau, dim=0)\r\n '''\r\n x_bar = torch.zeros_like(x)\r\n for i, n_lv in enumerate(self.n_lvs):\r\n x_temp = F.hardtanh(x / a[i], -1, 1)\r\n x_bar = torch.add(x_bar, RoundQuant.apply(x_temp, n_lv // 2) * c[i] * softmask[i])\r\n '''\r\n a_mean = (softmask * a).sum()\r\n n_lv_mean = (softmask * self.n_lvs.to(softmask.device)).sum()\r\n\r\n x = F.hardtanh(x / a_mean, -1, 1)\r\n x_bar = RoundQuant.apply(x, torch.round(n_lv_mean / 2)) * a_mean\r\n #act_size = (softmask * self.bits).sum()\r\n return x_bar#, act_size\r\n\r\n\r\n################## didn't modify Q_HSwish #################\r\nclass Q_HSwish(nn.Module):\r\n def __init__(self, act_func=True):\r\n super(Q_HSwish, self).__init__()\r\n self.n_lvs = [1]\r\n self.bits = [32]\r\n self.act_func = act_func\r\n self.a = Parameter(Tensor(1))\r\n self.b = 3/8\r\n self.c = Parameter(Tensor(1))\r\n self.d = -3/8\r\n\r\n def initialize(self, n_lvs, offset, diff):\r\n self.n_lvs = n_lvs\r\n self.a.data.fill_(np.log(np.exp(offset + diff)-1))\r\n \r\n def forward(self, x):\r\n if self.act_func:\r\n x = x * (F.hardtanh(x + 3, 0, 6) / 6)\r\n\r\n if len(self.bits)==1 and self.bits[0]==32:\r\n return x\r\n else:\r\n a = F.softplus(self.a)\r\n c = F.softplus(self.c)\r\n x = x + self.b\r\n x = F.hardtanh(x / a, 0, 1)\r\n x = RoundQuant.apply(x, self.n_lvs) * c\r\n x = x + self.d\r\n return x \r\n##########################################################\r\n\r\n\r\nclass Q_Conv2d(nn.Conv2d):\r\n def __init__(self, *args, **kargs):\r\n super(Q_Conv2d, self).__init__(*args, **kargs)\r\n self.n_lvs = [1]\r\n self.bits = [32]\r\n self.a = Parameter(Tensor(1))\r\n self.weight_old = None\r\n self.theta = Parameter(Tensor([1]))\r\n self.computation = 0\r\n self.tau = 1\r\n\r\n def initialize(self, bits):\r\n self.bits = Parameter(Tensor(bits), requires_grad=False)\r\n self.n_lvs = 2 ** self.bits\r\n self.a = Parameter(Tensor(len(self.bits)))\r\n \r\n #self.theta = Parameter(torch.ones(len(self.bits))/len(self.bits))\r\n self.theta = Parameter(F.softmax(self.bits ** 2, dim=0))\r\n max_val = self.weight.data.abs().max().item()\r\n self.a.data.fill_(np.log(np.exp(max_val * 0.9)-1))\r\n\r\n def initialize_qonly(self):\r\n max_val = self.weight.data.abs().max().item()\r\n self.a.data.fill_(np.log(np.exp(max_val * 0.9)-1))\r\n\r\n def _weight_quant(self):\r\n \"\"\"\r\n a = F.softplus(self.a)\r\n c = F.softplus(self.c)\r\n \r\n if self.train:\r\n softmask = F.gumbel_softmax(self.theta, tau=self.tau, hard=False, dim=0)\r\n else:\r\n softmask = F.softmax(self.theta/self.tau, dim=0)\r\n '''\r\n w_bar = torch.zeros_like(self.weight)\r\n for i, n_lv in enumerate(self.n_lvs):\r\n weight = F.hardtanh(self.weight / a[i], -1, 1)\r\n w_bar = torch.add(w_bar, RoundQuant.apply(weight, n_lv // 2) * c[i] * softmask[i])\r\n '''\r\n a_mean = (softmask * a).sum()\r\n c_mean = (softmask * c).sum()\r\n n_lv_mean = (softmask * self.n_lvs.to(softmask.device)).sum()\r\n\r\n w_bar = F.hardtanh(self.weight / a_mean, -1, 1)\r\n w_bar = RoundQuant.apply(w_bar, torch.round(n_lv_mean / 2)) * c_mean\r\n \"\"\"\r\n \r\n softmask = F.softmax(self.theta/self.tau, dim=0)\r\n n_lv_mean = (softmask * self.n_lvs.to(softmask.device)).sum()\r\n\r\n max_w = torch.max(torch.abs(torch.tanh(self.weight)))\r\n weight = torch.tanh(self.weight) / max_w\r\n weight.add_(1.0)\r\n weight.div_(2.0)\r\n\r\n k = torch.round(n_lv_mean/2)-1\r\n weight = torch.round(weight * k)\r\n weight.div(k)\r\n \r\n weight.mul_(2.0)\r\n weight.sub_(1.0)\r\n weight.mul_(max_w)\r\n\r\n #bitwidth = (softmask * self.bits).sum()\r\n return weight#, bitwidth\r\n\r\n def forward(self, x):#, cost, act_size=None):\r\n if len(self.bits)==1 and self.bits[0]==32:\r\n #print(\"Q_Conv2d\")\r\n #cost += act_size * 32 * self.computation\r\n return F.conv2d(x, self.weight, self.bias,\r\n self.stride, self.padding, self.dilation, self.groups)#, cost\r\n else:\r\n weight = self._weight_quant() #, bitwidth\r\n #cost += act_size * bitwidth * self.computation\r\n return F.conv2d(x, weight, self.bias,\r\n self.stride, self.padding, self.dilation, self.groups)#, cost\r\n\r\n\r\nclass Q_Linear(nn.Linear):\r\n def __init__(self, *args, **kargs):\r\n super(Q_Linear, self).__init__(*args, **kargs)\r\n self.n_lvs = [0]\r\n self.bits = [32]\r\n self.a = Parameter(Tensor(1))\r\n self.weight_old = None\r\n self.theta = Parameter(Tensor([1]))\r\n self.computation = 0\r\n self.tau = 1\r\n\r\n def initialize(self, bits):\r\n self.bits = Parameter(Tensor(bits), requires_grad=False)\r\n self.n_lvs = 2 ** self.bits\r\n self.a = Parameter(Tensor(len(self.bits)))\r\n\r\n #self.theta = Parameter(torch.ones(len(self.bits))/len(self.bits))\r\n self.theta = Parameter(F.softmax(self.bits ** 2, dim=0))\r\n max_val = self.weight.data.abs().max().item()\r\n self.a.data.fill_(np.log(np.exp(max_val * 0.9)-1))\r\n \r\n def initialize_qonly(self):\r\n max_val = self.weight.data.abs().max().item()\r\n self.a.data.fill_(np.log(np.exp(max_val * 0.9)-1))\r\n\r\n def _weight_quant(self):\r\n \"\"\"\r\n a = F.softplus(self.a)\r\n c = F.softplus(self.c)\r\n\r\n if self.train:\r\n softmask = F.gumbel_softmax(self.theta, tau=self.tau, hard=False, dim=0)\r\n else:\r\n softmask = F.softmax(self.theta/self.tau, dim=0)\r\n '''\r\n w_bar = torch.zeros_like(self.weight)\r\n for i, n_lv in enumerate(self.n_lvs):\r\n weight = F.hardtanh(self.weight / a[i], -1, 1) \r\n w_bar = torch.add(w_bar, RoundQuant.apply(weight, n_lv // 2) * c[i] * softmask[i])\r\n '''\r\n a_mean = (softmask * a).sum()\r\n c_mean = (softmask * c).sum()\r\n n_lv_mean = (softmask * self.n_lvs.to(softmask.device)).sum()\r\n\r\n w_bar = F.hardtanh(self.weight / a_mean, -1, 1)\r\n w_bar = RoundQuant.apply(w_bar, torch.round(n_lv_mean / 2)) * c_mean\r\n \"\"\"\r\n \r\n softmask = F.softmax(self.theta/self.tau, dim=0)\r\n n_lv_mean = (softmask * self.n_lvs.to(softmask.device)).sum()\r\n\r\n max_w = torch.max(torch.abs(torch.tanh(self.weight)))\r\n weight = torch.tanh(self.weight) / max_w\r\n weight.add_(1.0)\r\n weight.div_(2.0)\r\n\r\n k = torch.round(n_lv_mean/2)-1\r\n weight = torch.round(weight * k)\r\n weight.div(k)\r\n \r\n weight.mul_(2.0)\r\n weight.sub_(1.0)\r\n weight.mul_(max_w)\r\n\r\n #bitwidth = (softmask * self.bits).sum()\r\n return weight#, bitwidth\r\n \r\n\r\n def forward(self, x):#, cost, act_size=None):\r\n if len(self.bits)==1 and self.bits[0]==32:\r\n #print(\"Q_Linear\")\r\n #cost += act_size * 32 * self.computation\r\n return F.linear(x, self.weight, self.bias)#, cost\r\n else:\r\n weight = self._weight_quant() #, bitwidth\r\n #cost += act_size * bitwidth * self.computation\r\n return F.linear(x, weight, self.bias)#, cost\r\n\r\n\r\nclass Q_Conv2dPad(Q_Conv2d):\r\n def __init__(self, mode, *args, **kargs):\r\n super(Q_Conv2dPad, self).__init__(*args, **kargs)\r\n self.mode = mode\r\n\r\n def forward(self, inputs):\r\n if self.mode == \"HS\":\r\n inputs = F.pad(inputs, self.padding + self.padding, value=-3/8)\r\n elif self.mode == \"RE\":\r\n inputs = F.pad(inputs, self.padding + self.padding, value=0)\r\n else:\r\n raise LookupError(\"Unknown nonlinear\")\r\n\r\n if self.n_lvs == 0:\r\n return F.conv2d(inputs, self.weight, self.bias,\r\n self.stride, 0, self.dilation, self.groups)\r\n else:\r\n weight = self._weight_quant()\r\n\r\n return F.conv2d(inputs, weight, self.bias,\r\n self.stride, 0, self.dilation, self.groups)\r\n\r\n\r\ndef initialize(model, loader, bits, act=False, weight=False, eps=0.05):\r\n if weight: \r\n print('==> set up weight bitwidth..')\r\n elif act: \r\n print('==> set up activation bitwidth..')\r\n if isinstance(bits, int):\r\n bits = [bits]\r\n def initialize_hook(module, input, output):\r\n if isinstance(module, (Q_ReLU, Q_Sym, Q_HSwish)) and act:\r\n if not isinstance(input, list):\r\n input = input[0]\r\n input = input.detach().cpu().numpy()\r\n\r\n if isinstance(input, Q_Sym):\r\n input = np.abs(input)\r\n elif isinstance(input, Q_HSwish):\r\n input = input + 3/8\r\n\r\n input = input.reshape(-1)\r\n input = input[input > 0]\r\n input = np.sort(input)\r\n \r\n if len(input) == 0:\r\n small, large = 0, 1e-3\r\n else:\r\n small, large = input[int(len(input) * eps)], input[int(len(input) * (1-eps))]\r\n module.initialize(bits, small, large - small)\r\n\r\n if isinstance(module, (Q_Conv2d, Q_Linear)) and weight:\r\n module.initialize(bits)\r\n \r\n if isinstance(module, Q_Conv2d) and weight:\r\n O, I, K1, K2 = module.weight.shape\r\n N, C, H, W = input[0].shape\r\n s = module.stride[0]\r\n module.computation = O * I * K1 * K2 * H * W / s / s\r\n\r\n if isinstance(module, Q_Linear) and weight:\r\n O, I = module.weight.shape\r\n N, I = input[0].shape\r\n module.computation = O * I\r\n\r\n hooks = []\r\n for name, module in model.named_modules():\r\n hook = module.register_forward_hook(initialize_hook)\r\n hooks.append(hook)\r\n\r\n model.train()\r\n model.cuda()\r\n for i, (input, target) in enumerate(loader):\r\n with torch.no_grad():\r\n if isinstance(model, nn.DataParallel):\r\n output = model.module(input.cuda())\r\n else:\r\n output = model(input.cuda())\r\n break\r\n\r\n model.cuda()\r\n for hook in hooks:\r\n hook.remove()\r\n\r\n\r\ndef sample_search_result(model, hard=True, print=True):\r\n if hard:\r\n for name, module in model.named_modules():\r\n if isinstance(module, (Q_Conv2d, Q_Linear, Q_ReLU, Q_Sym, Q_HSwish)):\r\n idx = torch.argmax(module.theta)\r\n for var in ['a', 'c']:\r\n setattr(module, var, Parameter(getattr(module, var)[idx].view(1)))\r\n for var in ['bits', 'n_lvs']:\r\n setattr(module, var, Parameter(getattr(module, var)[idx].view(1), requires_grad=False))\r\n module.theta=Parameter(torch.Tensor([1]), requires_grad=False)\r\n else: \r\n # TODO: stochastic sampling\r\n raise NotImplementedError\r\n\r\n\r\ndef extract_bitwidth(model, weight_or_act=None, tau=1):\r\n assert weight_or_act != None\r\n if weight_or_act == \"weight\" or weight_or_act == 0:\r\n i = 1\r\n module_set = (Q_Conv2d, Q_Linear)\r\n elif weight_or_act == \"act\" or weight_or_act == 1:\r\n i = 2\r\n module_set = (Q_ReLU, Q_Sym, Q_HSwish)\r\n else:\r\n print(f'[ValueError] weight_or_act: {weight_or_act}')\r\n raise ValueError\r\n \r\n list_select = []\r\n list_prob = []\r\n str_prob = ''\r\n for _, m in enumerate(model.modules()):\r\n if isinstance(m, module_set):\r\n prob = F.softmax(m.theta / tau, dim=0)\r\n list_select.append(int(m.bits[torch.argmax(prob)].item()))\r\n list_prob.append(prob)\r\n\r\n prob = [f'{i:.5f}' for i in prob.cpu().tolist()]\r\n str_prob += f'layer {i} [{\", \".join(prob)}]\\n'\r\n i += 1\r\n str_select = f'{weight_or_act} bitwidth select: \\n' + \", \".join(map(str, list_select))\r\n return list_select, list_prob, str_select, str_prob\r\n\r\n\r\ndef initialize_quantizer(model, loader, eps=0.05):\r\n def initialize_hook(module, input, output):\r\n if isinstance(module, (Q_ReLU, Q_Sym, Q_HSwish)):\r\n if not isinstance(input, list):\r\n input = input[0]\r\n input = input.detach().cpu().numpy()\r\n\r\n if isinstance(input, Q_Sym):\r\n input = np.abs(input)\r\n elif isinstance(input, Q_HSwish):\r\n input = input + 3/8\r\n\r\n input = input.reshape(-1)\r\n input = input[input > 0]\r\n input = np.sort(input)\r\n \r\n if len(input) == 0:\r\n small, large = 0, 1e-3\r\n else:\r\n small, large = input[int(len(input) * eps)], input[int(len(input) * (1-eps))]\r\n module.initialize_qonly(small, large - small)\r\n\r\n if isinstance(module, (Q_Conv2d, Q_Linear)):\r\n module.initialize_qonly()\r\n \r\n if isinstance(module, Q_Conv2d):\r\n O, I, K1, K2 = module.weight.shape\r\n N, C, H, W = input[0].shape\r\n s = module.stride[0]\r\n module.computation = O * I * K1 * K2 * H * W / s / s\r\n\r\n if isinstance(module, Q_Linear):\r\n O, I = module.weight.shape\r\n N, I = input[0].shape\r\n module.computation = O * I\r\n\r\n hooks = []\r\n for name, module in model.named_modules():\r\n hook = module.register_forward_hook(initialize_hook)\r\n hooks.append(hook)\r\n\r\n model.train()\r\n for i, (input, target) in enumerate(loader):\r\n with torch.no_grad():\r\n if isinstance(model, nn.DataParallel):\r\n output = model.module(input.cuda())\r\n else:\r\n output = model(input.cuda())\r\n break\r\n\r\n model.cuda()\r\n for hook in hooks:\r\n hook.remove()\r\n\r\n\r\nclass Q_Sequential(nn.Sequential):\r\n def __init__(self, *args):\r\n super(Q_Sequential, self).__init__()\r\n\r\n if len(args) == 1 and isinstance(args[0], OrderedDict):\r\n for key, module in args[0].items():\r\n self.add_module(key, module)\r\n else:\r\n idx = 0 \r\n for module in args:\r\n if isinstance(module, Q_Sym) or (isinstance(module, Q_HSwish) and idx == 0):\r\n self.add_module('-' + str(idx), module)\r\n else:\r\n self.add_module(str(idx), module)\r\n idx += 1\r\n\r\ndef transfer_bitwidth(model_src, model_dst): \r\n n_lvs_dict={}\r\n bit_dict={}\r\n for name, module in model_src.named_modules():\r\n if isinstance(module, (Q_Conv2d, Q_Linear, Q_ReLU, Q_Sym, Q_HSwish)):\r\n n_lvs_dict[name] = module.n_lvs.data\r\n bit_dict[name] = module.bits.data\r\n for name, module in model_dst.named_modules():\r\n if isinstance(module, (Q_Conv2d, Q_Linear, Q_ReLU, Q_Sym, Q_HSwish)):\r\n module.n_lvs.data = n_lvs_dict[name]\r\n module.bits.data = bit_dict[name]\r\n print(name)\r\n\r\n\r\nclass QuantOps(object):\r\n initialize = initialize\r\n initialize_quantizer = initialize_quantizer\r\n transfer_bitwidth = transfer_bitwidth\r\n \r\n Conv2d = Q_Conv2d\r\n ReLU = Q_ReLU\r\n ReLU6 = Q_ReLU6\r\n Sym = Q_Sym\r\n HSwish = Q_HSwish\r\n Conv2dPad = Q_Conv2dPad \r\n Sequential = Q_Sequential\r\n Linear = Q_Linear\r\n" ]
[ [ "torch.nn.functional.softmax", "torch.nn.functional.gumbel_softmax", "numpy.abs", "torch.Tensor", "torch.round", "torch.nn.functional.conv2d", "numpy.sort", "torch.tensor", "torch.tanh", "torch.nn.functional.relu", "torch.no_grad", "torch.nn.functional.hardtanh", "numpy.exp", "torch.nn.functional.linear", "torch.nn.functional.softplus", "torch.nn.functional.pad", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GMDennis/claf
[ "d1e064e593127e5d654f000f5506c5ae1caab5ce", "d1e064e593127e5d654f000f5506c5ae1caab5ce", "d1e064e593127e5d654f000f5506c5ae1caab5ce" ]
[ "claf/data/dataset/bert/multi_task.py", "claf/tokens/embedding/cove_embedding.py", "claf/modules/layer/normalization.py" ]
[ "\nimport json\nfrom overrides import overrides\nimport torch\nimport random\n\nfrom claf.config.factory.data_loader import make_data_loader\nfrom claf.data.dataset.base import DatasetBase\n\n\nclass MultiTaskBertDataset(DatasetBase):\n \"\"\"\n Dataset for Multi-Task GLUE using BERT\n\n * Args:\n batch: Batch DTO (claf.data.batch)\n\n * Kwargs:\n helper: helper from data_reader\n \"\"\"\n\n def __init__(self, batches, vocab, helper=None):\n super(MultiTaskBertDataset, self).__init__()\n\n self.name = \"multitask_bert\"\n self.vocab = vocab\n\n task_helpers = helper[\"task_helpers\"]\n\n self.multi_dataset_size = 0\n self.batch_sizes = []\n self.task_datasets = []\n\n for b, h in zip(batches, task_helpers):\n batch_size = h[\"batch_size\"]\n self.batch_sizes.append(batch_size)\n\n dataset_cls = h[\"dataset\"]\n dataset = dataset_cls(b, vocab, helper=h)\n self.task_datasets.append(dataset)\n\n task_dataset_size, remain = divmod(len(dataset), batch_size)\n if remain > 0:\n task_dataset_size += 1\n self.multi_dataset_size += task_dataset_size\n\n self.init_iterators()\n\n def init_iterators(self):\n cuda_device_id = None\n if torch.cuda.is_available():\n cuda_device_id = 0 # TODO: Hard-code\n\n self.iterators = []\n for batch_size, dataset in zip(self.batch_sizes, self.task_datasets):\n data_loader = make_data_loader(dataset, batch_size=batch_size, cuda_device_id=cuda_device_id) # TODO: cuda_device_id\n self.iterators.append(iter(data_loader))\n\n self.available_iterators = list(range(len(self.iterators)))\n\n @overrides\n def collate_fn(self, cuda_device_id=None):\n\n def pass_tensor(data):\n task_idx, tensor_datas = zip(*data)\n tensor_batch = tensor_datas[0]\n\n task_id_tensor = torch.LongTensor(list(task_idx))\n if torch.cuda.is_available():\n task_id_tensor.cuda(cuda_device_id)\n tensor_batch.features[\"task_index\"] = task_id_tensor\n return tensor_batch\n return pass_tensor\n\n @overrides\n def __getitem__(self, index):\n # self.lazy_evaluation(index)\n if len(self.available_iterators) == 0:\n self.init_iterators()\n\n random_index = random.choice(self.available_iterators)\n task_iterator = self.iterators[random_index]\n try:\n return random_index, next(task_iterator)\n except StopIteration:\n self.available_iterators.remove(random_index)\n return self.__getitem__(index)\n\n def __len__(self):\n return self.multi_dataset_size\n\n def __repr__(self):\n dataset_properties = {\n \"name\": self.name,\n \"total_count\": self.__len__(),\n \"dataset_count\": len(self.iterators),\n \"task_dataset_sizes\": [len(dataset) for dataset in self.task_datasets],\n }\n return json.dumps(dataset_properties, indent=4)\n", "\n\nfrom overrides import overrides\n\nimport torch.nn as nn\n\nfrom claf.tokens.cove import MTLSTM\n\nfrom .base import TokenEmbedding\nfrom .word_embedding import WordEmbedding\n\n\nclass CoveEmbedding(TokenEmbedding):\n \"\"\"\n Cove Embedding\n\n Learned in Translation: Contextualized Word Vectors\n (http://papers.nips.cc/paper/7209-learned-in-translation-contextualized-word-vectors.pdf)\n\n * Args:\n vocab: Vocab (claf.tokens.vocab)\n\n * Kwargs:\n dropout: The number of dropout probability\n pretrained_path: pretrained vector path (eg. GloVe)\n trainable: finetune or fixed\n project_dim: The number of project (linear) dimension\n \"\"\"\n\n def __init__(\n self,\n vocab,\n glove_pretrained_path=None,\n model_pretrained_path=None,\n dropout=0.2,\n trainable=False,\n project_dim=None,\n ):\n super(CoveEmbedding, self).__init__(vocab)\n\n self.embed_dim = 600 # MTLSTM (hidden_size=300 + bidirectional => 600)\n word_embedding = WordEmbedding(\n vocab, dropout=0, embed_dim=300, pretrained_path=glove_pretrained_path\n )\n self.cove = MTLSTM(\n word_embedding, pretrained_path=model_pretrained_path, requires_grad=trainable\n )\n\n if dropout and dropout > 0:\n self.dropout = nn.Dropout(p=dropout)\n else:\n self.dropout = lambda x: x\n\n self.project_dim = project_dim\n self.project_linear = None\n if project_dim:\n self.project_linear = nn.Linear(self.elmo.get_output_dim(), project_dim)\n\n @overrides\n def forward(self, words):\n embedded_words = self.cove(words)\n return self.dropout(embedded_words)\n\n @overrides\n def get_output_dim(self):\n if self.project_linear:\n return self.project_dim\n return self.embed_dim\n", "\nimport torch\nimport torch.nn as nn\n\n\nclass LayerNorm(nn.Module):\n \"\"\"\n Layer Normalization\n (https://arxiv.org/abs/1607.06450)\n \"\"\"\n\n def __init__(self, normalized_shape, eps=1e-5):\n super(LayerNorm, self).__init__()\n self.gamma = nn.Parameter(torch.ones(normalized_shape))\n self.beta = nn.Parameter(torch.zeros(normalized_shape))\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.gamma * (x - mean) / (std + self.eps) + self.beta\n" ]
[ [ "torch.cuda.is_available" ], [ "torch.nn.Dropout" ], [ "torch.ones", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yzh119/dgl
[ "6a7c1eb2323383739585259c70c8b9065ca95d1e" ]
[ "examples/pytorch/graphsage/experimental/train_dist_unsupervised.py" ]
[ "import os\nos.environ['DGLBACKEND']='pytorch'\nfrom multiprocessing import Process\nimport argparse, time, math\nimport numpy as np\nfrom functools import wraps\nimport tqdm\nimport sklearn.linear_model as lm\nimport sklearn.metrics as skm\n\nimport dgl\nfrom dgl import DGLGraph\nfrom dgl.data import register_data_args, load_data\nfrom dgl.data.utils import load_graphs\nimport dgl.function as fn\nimport dgl.nn.pytorch as dglnn\n\nimport torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.multiprocessing as mp\nfrom torch.utils.data import DataLoader\n#from pyinstrument import Profiler\nfrom train_sampling import SAGE\n\nclass NegativeSampler(object):\n def __init__(self, g, neg_nseeds):\n self.neg_nseeds = neg_nseeds\n\n def __call__(self, num_samples):\n # select local neg nodes as seeds\n return self.neg_nseeds[th.randint(self.neg_nseeds.shape[0], (num_samples,))]\n\nclass NeighborSampler(object):\n def __init__(self, g, fanouts, neg_nseeds, sample_neighbors, num_negs, remove_edge):\n self.g = g\n self.fanouts = fanouts\n self.sample_neighbors = sample_neighbors\n self.neg_sampler = NegativeSampler(g, neg_nseeds)\n self.num_negs = num_negs\n self.remove_edge = remove_edge\n\n def sample_blocks(self, seed_edges):\n n_edges = len(seed_edges)\n seed_edges = th.LongTensor(np.asarray(seed_edges))\n heads, tails = self.g.find_edges(seed_edges)\n\n neg_tails = self.neg_sampler(self.num_negs * n_edges)\n neg_heads = heads.view(-1, 1).expand(n_edges, self.num_negs).flatten()\n\n # Maintain the correspondence between heads, tails and negative tails as two\n # graphs.\n # pos_graph contains the correspondence between each head and its positive tail.\n # neg_graph contains the correspondence between each head and its negative tails.\n # Both pos_graph and neg_graph are first constructed with the same node space as\n # the original graph. Then they are compacted together with dgl.compact_graphs.\n pos_graph = dgl.graph((heads, tails), num_nodes=self.g.number_of_nodes())\n neg_graph = dgl.graph((neg_heads, neg_tails), num_nodes=self.g.number_of_nodes())\n pos_graph, neg_graph = dgl.compact_graphs([pos_graph, neg_graph])\n\n seeds = pos_graph.ndata[dgl.NID]\n blocks = []\n for fanout in self.fanouts:\n # For each seed node, sample ``fanout`` neighbors.\n frontier = self.sample_neighbors(self.g, seeds, fanout, replace=True)\n if self.remove_edge:\n # Remove all edges between heads and tails, as well as heads and neg_tails.\n _, _, edge_ids = frontier.edge_ids(\n th.cat([heads, tails, neg_heads, neg_tails]),\n th.cat([tails, heads, neg_tails, neg_heads]),\n return_uv=True)\n frontier = dgl.remove_edges(frontier, edge_ids)\n # Then we compact the frontier into a bipartite graph for message passing.\n block = dgl.to_block(frontier, seeds)\n\n # Obtain the seed nodes for next layer.\n seeds = block.srcdata[dgl.NID]\n\n blocks.insert(0, block)\n\n # Pre-generate CSR format that it can be used in training directly\n return pos_graph, neg_graph, blocks\n\nclass PosNeighborSampler(object):\n def __init__(self, g, fanouts, sample_neighbors):\n self.g = g\n self.fanouts = fanouts\n self.sample_neighbors = sample_neighbors\n\n def sample_blocks(self, seeds):\n seeds = th.LongTensor(np.asarray(seeds))\n blocks = []\n for fanout in self.fanouts:\n # For each seed node, sample ``fanout`` neighbors.\n frontier = self.sample_neighbors(self.g, seeds, fanout, replace=True)\n # Then we compact the frontier into a bipartite graph for message passing.\n block = dgl.to_block(frontier, seeds)\n # Obtain the seed nodes for next layer.\n seeds = block.srcdata[dgl.NID]\n\n blocks.insert(0, block)\n return blocks\n\nclass DistSAGE(SAGE):\n def __init__(self, in_feats, n_hidden, n_classes, n_layers,\n activation, dropout):\n super(DistSAGE, self).__init__(in_feats, n_hidden, n_classes, n_layers,\n activation, dropout)\n\n def inference(self, g, x, batch_size, device):\n \"\"\"\n Inference with the GraphSAGE model on full neighbors (i.e. without neighbor sampling).\n g : the entire graph.\n x : the input of entire node set.\n\n The inference code is written in a fashion that it could handle any number of nodes and\n layers.\n \"\"\"\n # During inference with sampling, multi-layer blocks are very inefficient because\n # lots of computations in the first few layers are repeated.\n # Therefore, we compute the representation of all nodes layer by layer. The nodes\n # on each layer are of course splitted in batches.\n # TODO: can we standardize this?\n nodes = dgl.distributed.node_split(np.arange(g.number_of_nodes()),\n g.get_partition_book(), force_even=True)\n y = dgl.distributed.DistTensor(g, (g.number_of_nodes(), self.n_hidden), th.float32, 'h',\n persistent=True)\n for l, layer in enumerate(self.layers):\n if l == len(self.layers) - 1:\n y = dgl.distributed.DistTensor(g, (g.number_of_nodes(), self.n_classes),\n th.float32, 'h_last', persistent=True)\n\n sampler = PosNeighborSampler(g, [-1], dgl.distributed.sample_neighbors)\n print('|V|={}, eval batch size: {}'.format(g.number_of_nodes(), batch_size))\n # Create PyTorch DataLoader for constructing blocks\n dataloader = DataLoader(\n dataset=nodes,\n batch_size=batch_size,\n collate_fn=sampler.sample_blocks,\n shuffle=False,\n drop_last=False,\n num_workers=args.num_workers)\n\n for blocks in tqdm.tqdm(dataloader):\n block = blocks[0]\n input_nodes = block.srcdata[dgl.NID]\n output_nodes = block.dstdata[dgl.NID]\n h = x[input_nodes].to(device)\n h_dst = h[:block.number_of_dst_nodes()]\n h = layer(block, (h, h_dst))\n if l != len(self.layers) - 1:\n h = self.activation(h)\n h = self.dropout(h)\n\n y[output_nodes] = h.cpu()\n\n x = y\n g.barrier()\n return y\n\ndef load_subtensor(g, input_nodes, device):\n \"\"\"\n Copys features and labels of a set of nodes onto GPU.\n \"\"\"\n batch_inputs = g.ndata['features'][input_nodes].to(device)\n return batch_inputs\n\nclass CrossEntropyLoss(nn.Module):\n def forward(self, block_outputs, pos_graph, neg_graph):\n with pos_graph.local_scope():\n pos_graph.ndata['h'] = block_outputs\n pos_graph.apply_edges(fn.u_dot_v('h', 'h', 'score'))\n pos_score = pos_graph.edata['score']\n with neg_graph.local_scope():\n neg_graph.ndata['h'] = block_outputs\n neg_graph.apply_edges(fn.u_dot_v('h', 'h', 'score'))\n neg_score = neg_graph.edata['score']\n\n score = th.cat([pos_score, neg_score])\n label = th.cat([th.ones_like(pos_score), th.zeros_like(neg_score)]).long()\n loss = F.binary_cross_entropy_with_logits(score, label.float())\n return loss\n\ndef generate_emb(model, g, inputs, batch_size, device):\n \"\"\"\n Generate embeddings for each node\n g : The entire graph.\n inputs : The features of all the nodes.\n batch_size : Number of nodes to compute at the same time.\n device : The GPU device to evaluate on.\n \"\"\"\n model.eval()\n with th.no_grad():\n pred = model.inference(g, inputs, batch_size, device)\n\n return pred\n\ndef compute_acc(emb, labels, train_nids, val_nids, test_nids):\n \"\"\"\n Compute the accuracy of prediction given the labels.\n \n We will fist train a LogisticRegression model using the trained embeddings,\n the training set, validation set and test set is provided as the arguments.\n\n The final result is predicted by the lr model.\n\n emb: The pretrained embeddings\n labels: The ground truth\n train_nids: The training set node ids\n val_nids: The validation set node ids\n test_nids: The test set node ids\n \"\"\"\n\n emb = emb[np.arange(labels.shape[0])].cpu().numpy()\n train_nids = train_nids.cpu().numpy()\n val_nids = val_nids.cpu().numpy()\n test_nids = test_nids.cpu().numpy()\n labels = labels.cpu().numpy()\n\n emb = (emb - emb.mean(0, keepdims=True)) / emb.std(0, keepdims=True)\n lr = lm.LogisticRegression(multi_class='multinomial', max_iter=10000)\n lr.fit(emb[train_nids], labels[train_nids])\n\n pred = lr.predict(emb)\n eval_acc = skm.accuracy_score(labels[val_nids], pred[val_nids])\n test_acc = skm.accuracy_score(labels[test_nids], pred[test_nids])\n return eval_acc, test_acc\n\ndef run(args, device, data):\n # Unpack data\n train_eids, train_nids, in_feats, g, global_train_nid, global_valid_nid, global_test_nid, labels = data\n # Create sampler\n sampler = NeighborSampler(g, [int(fanout) for fanout in args.fan_out.split(',')], train_nids,\n dgl.distributed.sample_neighbors, args.num_negs, args.remove_edge)\n\n # Create PyTorch DataLoader for constructing blocks\n dataloader = DataLoader(\n dataset=train_eids.numpy(),\n batch_size=args.batch_size,\n collate_fn=sampler.sample_blocks,\n shuffle=True,\n drop_last=False,\n num_workers=args.num_workers)\n\n # Define model and optimizer\n model = DistSAGE(in_feats, args.num_hidden, args.num_hidden, args.num_layers, F.relu, args.dropout)\n model = model.to(device)\n if not args.standalone:\n model = th.nn.parallel.DistributedDataParallel(model)\n loss_fcn = CrossEntropyLoss()\n loss_fcn = loss_fcn.to(device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n # Training loop\n #profiler = Profiler()\n #profiler.start()\n epoch = 0\n for epoch in range(args.num_epochs):\n sample_time = 0\n copy_time = 0\n forward_time = 0\n backward_time = 0\n update_time = 0\n num_seeds = 0\n num_inputs = 0\n\n step_time = []\n iter_t = []\n sample_t = []\n feat_copy_t = []\n forward_t = []\n backward_t = []\n update_t = []\n iter_tput = []\n\n start = time.time()\n # Loop over the dataloader to sample the computation dependency graph as a list of\n # blocks.\n for step, (pos_graph, neg_graph, blocks) in enumerate(dataloader):\n tic_step = time.time()\n sample_t.append(tic_step - start)\n\n # The nodes for input lies at the LHS side of the first block.\n # The nodes for output lies at the RHS side of the last block.\n input_nodes = blocks[0].srcdata[dgl.NID]\n\n # Load the input features as well as output labels\n batch_inputs = load_subtensor(g, input_nodes, device)\n copy_time = time.time()\n feat_copy_t.append(copy_time - tic_step)\n\n # Compute loss and prediction\n batch_pred = model(blocks, batch_inputs)\n loss = loss_fcn(batch_pred, pos_graph, neg_graph)\n forward_end = time.time()\n optimizer.zero_grad()\n loss.backward()\n compute_end = time.time()\n forward_t.append(forward_end - copy_time)\n backward_t.append(compute_end - forward_end)\n\n # Aggregate gradients in multiple nodes.\n optimizer.step()\n update_t.append(time.time() - compute_end)\n\n pos_edges = pos_graph.number_of_edges()\n neg_edges = neg_graph.number_of_edges()\n\n step_t = time.time() - start\n step_time.append(step_t)\n iter_tput.append(pos_edges / step_t)\n num_seeds += pos_edges\n if step % args.log_every == 0:\n print('[{}] Epoch {:05d} | Step {:05d} | Loss {:.4f} | Speed (samples/sec) {:.4f} | time {:.3f} s' \\\n '| sample {:.3f} | copy {:.3f} | forward {:.3f} | backward {:.3f} | update {:.3f}'.format(\n g.rank(), epoch, step, loss.item(), np.mean(iter_tput[3:]), np.sum(step_time[-args.log_every:]),\n np.sum(sample_t[-args.log_every:]), np.sum(feat_copy_t[-args.log_every:]), np.sum(forward_t[-args.log_every:]),\n np.sum(backward_t[-args.log_every:]), np.sum(update_t[-args.log_every:])))\n start = time.time()\n\n print('[{}]Epoch Time(s): {:.4f}, sample: {:.4f}, data copy: {:.4f}, forward: {:.4f}, backward: {:.4f}, update: {:.4f}, #seeds: {}, #inputs: {}'.format(\n g.rank(), np.sum(step_time), np.sum(sample_t), np.sum(feat_copy_t), np.sum(forward_t), np.sum(backward_t), np.sum(update_t), num_seeds, num_inputs))\n epoch += 1\n\n # evaluate the embedding using LogisticRegression\n if args.standalone:\n pred = generate_emb(model,g, g.ndata['features'], args.batch_size_eval, device)\n else:\n pred = generate_emb(model.module, g, g.ndata['features'], args.batch_size_eval, device)\n if g.rank() == 0:\n eval_acc, test_acc = compute_acc(pred, labels, global_train_nid, global_valid_nid, global_test_nid)\n print('eval acc {:.4f}; test acc {:.4f}'.format(eval_acc, test_acc))\n\n # sync for eval and test\n if not args.standalone:\n th.distributed.barrier()\n\n if not args.standalone:\n g._client.barrier()\n\n # save features into file\n if g.rank() == 0:\n th.save(pred, 'emb.pt')\n else:\n feat = g.ndata['features']\n th.save(pred, 'emb.pt')\n\ndef main(args):\n if not args.standalone:\n th.distributed.init_process_group(backend='gloo')\n g = dgl.distributed.DistGraph(args.ip_config, args.graph_name, conf_file=args.conf_path)\n print('rank:', g.rank())\n print('number of edges', g.number_of_edges())\n\n train_eids = dgl.distributed.edge_split(th.ones((g.number_of_edges(),), dtype=th.bool), g.get_partition_book(), force_even=True)\n train_nids = dgl.distributed.node_split(th.ones((g.number_of_nodes(),), dtype=th.bool), g.get_partition_book())\n global_train_nid = th.LongTensor(np.nonzero(g.ndata['train_mask'][np.arange(g.number_of_nodes())]))\n global_valid_nid = th.LongTensor(np.nonzero(g.ndata['val_mask'][np.arange(g.number_of_nodes())]))\n global_test_nid = th.LongTensor(np.nonzero(g.ndata['test_mask'][np.arange(g.number_of_nodes())]))\n labels = g.ndata['labels'][np.arange(g.number_of_nodes())]\n device = th.device('cpu')\n\n # Pack data\n in_feats = g.ndata['features'].shape[1]\n global_train_nid = global_train_nid.squeeze()\n global_valid_nid = global_valid_nid.squeeze()\n global_test_nid = global_test_nid.squeeze()\n print(\"number of train {}\".format(global_train_nid.shape[0]))\n print(\"number of valid {}\".format(global_valid_nid.shape[0]))\n print(\"number of test {}\".format(global_test_nid.shape[0]))\n data = train_eids, train_nids, in_feats, g, global_train_nid, global_valid_nid, global_test_nid, labels\n run(args, device, data)\n print(\"parent ends\")\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='GCN')\n register_data_args(parser)\n parser.add_argument('--graph-name', type=str, help='graph name')\n parser.add_argument('--id', type=int, help='the partition id')\n parser.add_argument('--ip_config', type=str, help='The file for IP configuration')\n parser.add_argument('--conf_path', type=str, help='The path to the partition config file')\n parser.add_argument('--num-client', type=int, help='The number of clients')\n parser.add_argument('--n-classes', type=int, help='the number of classes')\n parser.add_argument('--gpu', type=int, default=0,\n help=\"GPU device ID. Use -1 for CPU training\")\n parser.add_argument('--num-epochs', type=int, default=20)\n parser.add_argument('--num-hidden', type=int, default=16)\n parser.add_argument('--num-layers', type=int, default=2)\n parser.add_argument('--fan-out', type=str, default='10,25')\n parser.add_argument('--batch-size', type=int, default=1000)\n parser.add_argument('--batch-size-eval', type=int, default=100000)\n parser.add_argument('--log-every', type=int, default=20)\n parser.add_argument('--eval-every', type=int, default=5)\n parser.add_argument('--lr', type=float, default=0.003)\n parser.add_argument('--dropout', type=float, default=0.5)\n parser.add_argument('--num-workers', type=int, default=0,\n help=\"Number of sampling processes. Use 0 for no extra process.\")\n parser.add_argument('--local_rank', type=int, help='get rank of the process')\n parser.add_argument('--standalone', action='store_true', help='run in the standalone mode')\n parser.add_argument('--num-negs', type=int, default=1)\n parser.add_argument('--neg-share', default=False, action='store_true',\n help=\"sharing neg nodes for positive nodes\")\n parser.add_argument('--remove-edge', default=False, action='store_true',\n help=\"whether to remove edges during sampling\")\n args = parser.parse_args()\n\n print(args)\n main(args)\n" ]
[ [ "torch.ones_like", "torch.randint", "sklearn.linear_model.LogisticRegression", "torch.distributed.init_process_group", "torch.cat", "numpy.asarray", "numpy.arange", "torch.utils.data.DataLoader", "torch.zeros_like", "torch.distributed.barrier", "torch.no_grad", "torch.save", "numpy.mean", "torch.device", "numpy.sum", "torch.nn.parallel.DistributedDataParallel", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rahulvenkk/pytorch3d
[ "68bfac3394f9a87fb268165d1c9dd264e1d9316b", "68bfac3394f9a87fb268165d1c9dd264e1d9316b" ]
[ "tests/test_meshes.py", "pytorch3d/transforms/transform3d.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport random\nimport unittest\n\nimport numpy as np\nimport torch\nfrom common_testing import TestCaseMixin\nfrom pytorch3d.structures.meshes import Meshes\n\n\nclass TestMeshes(TestCaseMixin, unittest.TestCase):\n def setUp(self) -> None:\n np.random.seed(42)\n torch.manual_seed(42)\n\n @staticmethod\n def init_mesh(\n num_meshes: int = 10,\n max_v: int = 100,\n max_f: int = 300,\n lists_to_tensors: bool = False,\n device: str = \"cpu\",\n requires_grad: bool = False,\n ):\n \"\"\"\n Function to generate a Meshes object of N meshes with\n random numbers of vertices and faces.\n\n Args:\n num_meshes: Number of meshes to generate.\n max_v: Max number of vertices per mesh.\n max_f: Max number of faces per mesh.\n lists_to_tensors: Determines whether the generated meshes should be\n constructed from lists (=False) or\n a tensor (=True) of faces/verts.\n\n Returns:\n Meshes object.\n \"\"\"\n device = torch.device(device)\n\n verts_list = []\n faces_list = []\n\n # Randomly generate numbers of faces and vertices in each mesh.\n if lists_to_tensors:\n # If we define faces/verts with tensors, f/v has to be the\n # same for each mesh in the batch.\n f = torch.randint(max_f, size=(1,), dtype=torch.int32)\n v = torch.randint(3, high=max_v, size=(1,), dtype=torch.int32)\n f = f.repeat(num_meshes)\n v = v.repeat(num_meshes)\n else:\n # For lists of faces and vertices, we can sample different v/f\n # per mesh.\n f = torch.randint(max_f, size=(num_meshes,), dtype=torch.int32)\n v = torch.randint(3, high=max_v, size=(num_meshes,), dtype=torch.int32)\n\n # Generate the actual vertices and faces.\n for i in range(num_meshes):\n verts = torch.rand(\n (v[i], 3),\n dtype=torch.float32,\n device=device,\n requires_grad=requires_grad,\n )\n faces = torch.randint(\n v[i], size=(f[i], 3), dtype=torch.int64, device=device\n )\n verts_list.append(verts)\n faces_list.append(faces)\n\n if lists_to_tensors:\n verts_list = torch.stack(verts_list)\n faces_list = torch.stack(faces_list)\n\n return Meshes(verts=verts_list, faces=faces_list)\n\n @staticmethod\n def init_simple_mesh(device: str = \"cpu\"):\n \"\"\"\n Returns a Meshes data structure of simple mesh examples.\n\n Returns:\n Meshes object.\n \"\"\"\n device = torch.device(device)\n\n verts = [\n torch.tensor(\n [[0.1, 0.3, 0.5], [0.5, 0.2, 0.1], [0.6, 0.8, 0.7]],\n dtype=torch.float32,\n device=device,\n ),\n torch.tensor(\n [[0.1, 0.3, 0.3], [0.6, 0.7, 0.8], [0.2, 0.3, 0.4], [0.1, 0.5, 0.3]],\n dtype=torch.float32,\n device=device,\n ),\n torch.tensor(\n [\n [0.7, 0.3, 0.6],\n [0.2, 0.4, 0.8],\n [0.9, 0.5, 0.2],\n [0.2, 0.3, 0.4],\n [0.9, 0.3, 0.8],\n ],\n dtype=torch.float32,\n device=device,\n ),\n ]\n faces = [\n torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device),\n torch.tensor([[0, 1, 2], [1, 2, 3]], dtype=torch.int64, device=device),\n torch.tensor(\n [\n [1, 2, 0],\n [0, 1, 3],\n [2, 3, 1],\n [4, 3, 2],\n [4, 0, 1],\n [4, 3, 1],\n [4, 2, 1],\n ],\n dtype=torch.int64,\n device=device,\n ),\n ]\n return Meshes(verts=verts, faces=faces)\n\n def test_simple(self):\n mesh = TestMeshes.init_simple_mesh(\"cuda:0\")\n\n # Check that faces/verts per mesh are set in init:\n self.assertClose(mesh._num_faces_per_mesh.cpu(), torch.tensor([1, 2, 7]))\n self.assertClose(mesh._num_verts_per_mesh.cpu(), torch.tensor([3, 4, 5]))\n\n # Check computed tensors\n self.assertClose(\n mesh.verts_packed_to_mesh_idx().cpu(),\n torch.tensor([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2]),\n )\n self.assertClose(\n mesh.mesh_to_verts_packed_first_idx().cpu(), torch.tensor([0, 3, 7])\n )\n self.assertClose(\n mesh.verts_padded_to_packed_idx().cpu(),\n torch.tensor([0, 1, 2, 5, 6, 7, 8, 10, 11, 12, 13, 14]),\n )\n self.assertClose(\n mesh.faces_packed_to_mesh_idx().cpu(),\n torch.tensor([0, 1, 1, 2, 2, 2, 2, 2, 2, 2]),\n )\n self.assertClose(\n mesh.mesh_to_faces_packed_first_idx().cpu(), torch.tensor([0, 1, 3])\n )\n self.assertClose(\n mesh.num_edges_per_mesh().cpu(), torch.tensor([3, 5, 10], dtype=torch.int32)\n )\n self.assertClose(\n mesh.mesh_to_edges_packed_first_idx().cpu(),\n torch.tensor([0, 3, 8], dtype=torch.int64),\n )\n\n def test_init_error(self):\n # Check if correct errors are raised when verts/faces are on\n # different devices\n\n mesh = TestMeshes.init_mesh(10, 10, 100)\n verts_list = mesh.verts_list() # all tensors on cpu\n verts_list = [\n v.to(\"cuda:0\") if random.uniform(0, 1) > 0.5 else v for v in verts_list\n ]\n faces_list = mesh.faces_list()\n\n with self.assertRaises(ValueError) as cm:\n Meshes(verts=verts_list, faces=faces_list)\n self.assertTrue(\"same device\" in cm.msg)\n\n verts_padded = mesh.verts_padded() # on cpu\n verts_padded = verts_padded.to(\"cuda:0\")\n faces_padded = mesh.faces_padded()\n\n with self.assertRaises(ValueError) as cm:\n Meshes(verts=verts_padded, faces=faces_padded)\n self.assertTrue(\"same device\" in cm.msg)\n\n def test_simple_random_meshes(self):\n\n # Define the test mesh object either as a list or tensor of faces/verts.\n for lists_to_tensors in (False, True):\n N = 10\n mesh = TestMeshes.init_mesh(N, 100, 300, lists_to_tensors=lists_to_tensors)\n verts_list = mesh.verts_list()\n faces_list = mesh.faces_list()\n\n # Check batch calculations.\n verts_padded = mesh.verts_padded()\n faces_padded = mesh.faces_padded()\n verts_per_mesh = mesh.num_verts_per_mesh()\n faces_per_mesh = mesh.num_faces_per_mesh()\n for n in range(N):\n v = verts_list[n].shape[0]\n f = faces_list[n].shape[0]\n self.assertClose(verts_padded[n, :v, :], verts_list[n])\n if verts_padded.shape[1] > v:\n self.assertTrue(verts_padded[n, v:, :].eq(0).all())\n self.assertClose(faces_padded[n, :f, :], faces_list[n])\n if faces_padded.shape[1] > f:\n self.assertTrue(faces_padded[n, f:, :].eq(-1).all())\n self.assertEqual(verts_per_mesh[n], v)\n self.assertEqual(faces_per_mesh[n], f)\n\n # Check compute packed.\n verts_packed = mesh.verts_packed()\n vert_to_mesh = mesh.verts_packed_to_mesh_idx()\n mesh_to_vert = mesh.mesh_to_verts_packed_first_idx()\n faces_packed = mesh.faces_packed()\n face_to_mesh = mesh.faces_packed_to_mesh_idx()\n mesh_to_face = mesh.mesh_to_faces_packed_first_idx()\n\n curv, curf = 0, 0\n for n in range(N):\n v = verts_list[n].shape[0]\n f = faces_list[n].shape[0]\n self.assertClose(verts_packed[curv : curv + v, :], verts_list[n])\n self.assertClose(faces_packed[curf : curf + f, :] - curv, faces_list[n])\n self.assertTrue(vert_to_mesh[curv : curv + v].eq(n).all())\n self.assertTrue(face_to_mesh[curf : curf + f].eq(n).all())\n self.assertTrue(mesh_to_vert[n] == curv)\n self.assertTrue(mesh_to_face[n] == curf)\n curv += v\n curf += f\n\n # Check compute edges and compare with numpy unique.\n edges = mesh.edges_packed().cpu().numpy()\n edge_to_mesh_idx = mesh.edges_packed_to_mesh_idx().cpu().numpy()\n num_edges_per_mesh = mesh.num_edges_per_mesh().cpu().numpy()\n\n npfaces_packed = mesh.faces_packed().cpu().numpy()\n e01 = npfaces_packed[:, [0, 1]]\n e12 = npfaces_packed[:, [1, 2]]\n e20 = npfaces_packed[:, [2, 0]]\n npedges = np.concatenate((e12, e20, e01), axis=0)\n npedges = np.sort(npedges, axis=1)\n\n unique_edges, unique_idx = np.unique(npedges, return_index=True, axis=0)\n self.assertTrue(np.allclose(edges, unique_edges))\n temp = face_to_mesh.cpu().numpy()\n temp = np.concatenate((temp, temp, temp), axis=0)\n edge_to_mesh = temp[unique_idx]\n self.assertTrue(np.allclose(edge_to_mesh_idx, edge_to_mesh))\n num_edges = np.bincount(edge_to_mesh, minlength=N)\n self.assertTrue(np.allclose(num_edges_per_mesh, num_edges))\n mesh_to_edges_packed_first_idx = (\n mesh.mesh_to_edges_packed_first_idx().cpu().numpy()\n )\n self.assertTrue(\n np.allclose(mesh_to_edges_packed_first_idx[1:], num_edges.cumsum()[:-1])\n )\n self.assertTrue(mesh_to_edges_packed_first_idx[0] == 0)\n\n def test_allempty(self):\n verts_list = []\n faces_list = []\n mesh = Meshes(verts=verts_list, faces=faces_list)\n self.assertEqual(len(mesh), 0)\n self.assertEqual(mesh.verts_padded().shape[0], 0)\n self.assertEqual(mesh.faces_padded().shape[0], 0)\n self.assertEqual(mesh.verts_packed().shape[0], 0)\n self.assertEqual(mesh.faces_packed().shape[0], 0)\n self.assertEqual(mesh.num_faces_per_mesh().shape[0], 0)\n self.assertEqual(mesh.num_verts_per_mesh().shape[0], 0)\n\n def test_empty(self):\n N, V, F = 10, 100, 300\n device = torch.device(\"cuda:0\")\n verts_list = []\n faces_list = []\n valid = torch.randint(2, size=(N,), dtype=torch.uint8, device=device)\n for n in range(N):\n if valid[n]:\n v = torch.randint(\n 3, high=V, size=(1,), dtype=torch.int32, device=device\n )[0]\n f = torch.randint(F, size=(1,), dtype=torch.int32, device=device)[0]\n verts = torch.rand((v, 3), dtype=torch.float32, device=device)\n faces = torch.randint(v, size=(f, 3), dtype=torch.int64, device=device)\n else:\n verts = torch.tensor([], dtype=torch.float32, device=device)\n faces = torch.tensor([], dtype=torch.int64, device=device)\n verts_list.append(verts)\n faces_list.append(faces)\n\n mesh = Meshes(verts=verts_list, faces=faces_list)\n verts_padded = mesh.verts_padded()\n faces_padded = mesh.faces_padded()\n verts_per_mesh = mesh.num_verts_per_mesh()\n faces_per_mesh = mesh.num_faces_per_mesh()\n for n in range(N):\n v = len(verts_list[n])\n f = len(faces_list[n])\n if v > 0:\n self.assertClose(verts_padded[n, :v, :], verts_list[n])\n if verts_padded.shape[1] > v:\n self.assertTrue(verts_padded[n, v:, :].eq(0).all())\n if f > 0:\n self.assertClose(faces_padded[n, :f, :], faces_list[n])\n if faces_padded.shape[1] > f:\n self.assertTrue(faces_padded[n, f:, :].eq(-1).all())\n self.assertTrue(verts_per_mesh[n] == v)\n self.assertTrue(faces_per_mesh[n] == f)\n\n def test_padding(self):\n N, V, F = 10, 100, 300\n device = torch.device(\"cuda:0\")\n verts, faces = [], []\n valid = torch.randint(2, size=(N,), dtype=torch.uint8, device=device)\n num_verts, num_faces = (\n torch.zeros(N, dtype=torch.int32),\n torch.zeros(N, dtype=torch.int32),\n )\n for n in range(N):\n verts.append(torch.rand((V, 3), dtype=torch.float32, device=device))\n this_faces = torch.full((F, 3), -1, dtype=torch.int64, device=device)\n if valid[n]:\n v = torch.randint(\n 3, high=V, size=(1,), dtype=torch.int32, device=device\n )[0]\n f = torch.randint(F, size=(1,), dtype=torch.int32, device=device)[0]\n this_faces[:f, :] = torch.randint(\n v, size=(f, 3), dtype=torch.int64, device=device\n )\n num_verts[n] = v\n num_faces[n] = f\n faces.append(this_faces)\n\n mesh = Meshes(verts=torch.stack(verts), faces=torch.stack(faces))\n\n # Check verts/faces per mesh are set correctly in init.\n self.assertListEqual(mesh._num_faces_per_mesh.tolist(), num_faces.tolist())\n self.assertListEqual(mesh._num_verts_per_mesh.tolist(), [V] * N)\n\n for n, (vv, ff) in enumerate(zip(mesh.verts_list(), mesh.faces_list())):\n self.assertClose(ff, faces[n][: num_faces[n]])\n self.assertClose(vv, verts[n])\n\n new_faces = [ff.clone() for ff in faces]\n v = torch.randint(3, high=V, size=(1,), dtype=torch.int32, device=device)[0]\n f = torch.randint(F - 10, size=(1,), dtype=torch.int32, device=device)[0]\n this_faces = torch.full((F, 3), -1, dtype=torch.int64, device=device)\n this_faces[10 : f + 10, :] = torch.randint(\n v, size=(f, 3), dtype=torch.int64, device=device\n )\n new_faces[3] = this_faces\n\n with self.assertRaisesRegex(ValueError, \"Padding of faces\"):\n Meshes(verts=torch.stack(verts), faces=torch.stack(new_faces))\n\n def test_clone(self):\n N = 5\n mesh = TestMeshes.init_mesh(N, 10, 100)\n for force in [0, 1]:\n if force:\n # force mesh to have computed attributes\n mesh.verts_packed()\n mesh.edges_packed()\n mesh.verts_padded()\n\n new_mesh = mesh.clone()\n\n # Modify tensors in both meshes.\n new_mesh._verts_list[0] = new_mesh._verts_list[0] * 5\n\n # Check cloned and original Meshes objects do not share tensors.\n self.assertFalse(\n torch.allclose(new_mesh._verts_list[0], mesh._verts_list[0])\n )\n self.assertSeparate(new_mesh.verts_packed(), mesh.verts_packed())\n self.assertSeparate(new_mesh.verts_padded(), mesh.verts_padded())\n self.assertSeparate(new_mesh.faces_packed(), mesh.faces_packed())\n self.assertSeparate(new_mesh.faces_padded(), mesh.faces_padded())\n self.assertSeparate(new_mesh.edges_packed(), mesh.edges_packed())\n\n def test_detach(self):\n N = 5\n mesh = TestMeshes.init_mesh(N, 10, 100, requires_grad=True)\n for force in [0, 1]:\n if force:\n # force mesh to have computed attributes\n mesh.verts_packed()\n mesh.edges_packed()\n mesh.verts_padded()\n\n new_mesh = mesh.detach()\n\n self.assertFalse(new_mesh.verts_packed().requires_grad)\n self.assertClose(new_mesh.verts_packed(), mesh.verts_packed())\n self.assertFalse(new_mesh.verts_padded().requires_grad)\n self.assertClose(new_mesh.verts_padded(), mesh.verts_padded())\n for v, newv in zip(mesh.verts_list(), new_mesh.verts_list()):\n self.assertFalse(newv.requires_grad)\n self.assertClose(newv, v)\n\n def test_laplacian_packed(self):\n def naive_laplacian_packed(meshes):\n verts_packed = meshes.verts_packed()\n edges_packed = meshes.edges_packed()\n V = verts_packed.shape[0]\n\n L = torch.zeros((V, V), dtype=torch.float32, device=meshes.device)\n for e in edges_packed:\n L[e[0], e[1]] = 1\n # symetric\n L[e[1], e[0]] = 1\n\n deg = L.sum(1).view(-1, 1)\n deg[deg > 0] = 1.0 / deg[deg > 0]\n L = L * deg\n diag = torch.eye(V, dtype=torch.float32, device=meshes.device)\n L.masked_fill_(diag > 0, -1)\n return L\n\n # Note that we don't test with random meshes for this case, as the\n # definition of Laplacian is defined for simple graphs (aka valid meshes)\n meshes = TestMeshes.init_simple_mesh(\"cuda:0\")\n\n lapl_naive = naive_laplacian_packed(meshes)\n lapl = meshes.laplacian_packed().to_dense()\n # check with naive\n self.assertClose(lapl, lapl_naive)\n\n def test_offset_verts(self):\n def naive_offset_verts(mesh, vert_offsets_packed):\n # new Meshes class\n new_verts_packed = mesh.verts_packed() + vert_offsets_packed\n new_verts_list = list(\n new_verts_packed.split(mesh.num_verts_per_mesh().tolist(), 0)\n )\n new_faces_list = [f.clone() for f in mesh.faces_list()]\n return Meshes(verts=new_verts_list, faces=new_faces_list)\n\n N = 5\n mesh = TestMeshes.init_mesh(N, 10, 100)\n all_v = mesh.verts_packed().size(0)\n verts_per_mesh = mesh.num_verts_per_mesh()\n for force in [0, 1]:\n if force:\n # force mesh to have computed attributes\n mesh._compute_packed(refresh=True)\n mesh._compute_padded()\n mesh._compute_edges_packed()\n mesh.verts_padded_to_packed_idx()\n mesh._compute_face_areas_normals(refresh=True)\n mesh._compute_vertex_normals(refresh=True)\n\n deform = torch.rand((all_v, 3), dtype=torch.float32, device=mesh.device)\n # new meshes class to hold the deformed mesh\n new_mesh_naive = naive_offset_verts(mesh, deform)\n\n new_mesh = mesh.offset_verts(deform)\n\n # check verts_list & faces_list\n verts_cumsum = torch.cumsum(verts_per_mesh, 0).tolist()\n verts_cumsum.insert(0, 0)\n for i in range(N):\n self.assertClose(\n new_mesh.verts_list()[i],\n mesh.verts_list()[i]\n + deform[verts_cumsum[i] : verts_cumsum[i + 1]],\n )\n self.assertClose(\n new_mesh.verts_list()[i], new_mesh_naive.verts_list()[i]\n )\n self.assertClose(mesh.faces_list()[i], new_mesh_naive.faces_list()[i])\n self.assertClose(\n new_mesh.faces_list()[i], new_mesh_naive.faces_list()[i]\n )\n # check faces and vertex normals\n self.assertClose(\n new_mesh.verts_normals_list()[i],\n new_mesh_naive.verts_normals_list()[i],\n )\n self.assertClose(\n new_mesh.faces_normals_list()[i],\n new_mesh_naive.faces_normals_list()[i],\n )\n\n # check padded & packed\n self.assertClose(new_mesh.faces_padded(), new_mesh_naive.faces_padded())\n self.assertClose(new_mesh.verts_padded(), new_mesh_naive.verts_padded())\n self.assertClose(new_mesh.faces_packed(), new_mesh_naive.faces_packed())\n self.assertClose(new_mesh.verts_packed(), new_mesh_naive.verts_packed())\n self.assertClose(new_mesh.edges_packed(), new_mesh_naive.edges_packed())\n self.assertClose(\n new_mesh.verts_packed_to_mesh_idx(),\n new_mesh_naive.verts_packed_to_mesh_idx(),\n )\n self.assertClose(\n new_mesh.mesh_to_verts_packed_first_idx(),\n new_mesh_naive.mesh_to_verts_packed_first_idx(),\n )\n self.assertClose(\n new_mesh.num_verts_per_mesh(), new_mesh_naive.num_verts_per_mesh()\n )\n self.assertClose(\n new_mesh.faces_packed_to_mesh_idx(),\n new_mesh_naive.faces_packed_to_mesh_idx(),\n )\n self.assertClose(\n new_mesh.mesh_to_faces_packed_first_idx(),\n new_mesh_naive.mesh_to_faces_packed_first_idx(),\n )\n self.assertClose(\n new_mesh.num_faces_per_mesh(), new_mesh_naive.num_faces_per_mesh()\n )\n self.assertClose(\n new_mesh.edges_packed_to_mesh_idx(),\n new_mesh_naive.edges_packed_to_mesh_idx(),\n )\n self.assertClose(\n new_mesh.verts_padded_to_packed_idx(),\n new_mesh_naive.verts_padded_to_packed_idx(),\n )\n self.assertTrue(all(new_mesh.valid == new_mesh_naive.valid))\n self.assertTrue(new_mesh.equisized == new_mesh_naive.equisized)\n\n # check face areas, normals and vertex normals\n self.assertClose(\n new_mesh.verts_normals_packed(), new_mesh_naive.verts_normals_packed()\n )\n self.assertClose(\n new_mesh.verts_normals_padded(), new_mesh_naive.verts_normals_padded()\n )\n self.assertClose(\n new_mesh.faces_normals_packed(), new_mesh_naive.faces_normals_packed()\n )\n self.assertClose(\n new_mesh.faces_normals_padded(), new_mesh_naive.faces_normals_padded()\n )\n self.assertClose(\n new_mesh.faces_areas_packed(), new_mesh_naive.faces_areas_packed()\n )\n self.assertClose(\n new_mesh.mesh_to_edges_packed_first_idx(),\n new_mesh_naive.mesh_to_edges_packed_first_idx(),\n )\n\n def test_scale_verts(self):\n def naive_scale_verts(mesh, scale):\n if not torch.is_tensor(scale):\n scale = torch.ones(len(mesh)).mul_(scale)\n # new Meshes class\n new_verts_list = [\n scale[i] * v.clone() for (i, v) in enumerate(mesh.verts_list())\n ]\n new_faces_list = [f.clone() for f in mesh.faces_list()]\n return Meshes(verts=new_verts_list, faces=new_faces_list)\n\n N = 5\n for test in [\"tensor\", \"scalar\"]:\n for force in (False, True):\n mesh = TestMeshes.init_mesh(N, 10, 100)\n if force:\n # force mesh to have computed attributes\n mesh.verts_packed()\n mesh.edges_packed()\n mesh.verts_padded()\n mesh._compute_face_areas_normals(refresh=True)\n mesh._compute_vertex_normals(refresh=True)\n\n if test == \"tensor\":\n scales = torch.rand(N)\n elif test == \"scalar\":\n scales = torch.rand(1)[0].item()\n new_mesh_naive = naive_scale_verts(mesh, scales)\n new_mesh = mesh.scale_verts(scales)\n for i in range(N):\n if test == \"tensor\":\n self.assertClose(\n scales[i] * mesh.verts_list()[i], new_mesh.verts_list()[i]\n )\n else:\n self.assertClose(\n scales * mesh.verts_list()[i], new_mesh.verts_list()[i]\n )\n self.assertClose(\n new_mesh.verts_list()[i], new_mesh_naive.verts_list()[i]\n )\n self.assertClose(\n mesh.faces_list()[i], new_mesh_naive.faces_list()[i]\n )\n self.assertClose(\n new_mesh.faces_list()[i], new_mesh_naive.faces_list()[i]\n )\n # check face and vertex normals\n self.assertClose(\n new_mesh.verts_normals_list()[i],\n new_mesh_naive.verts_normals_list()[i],\n )\n self.assertClose(\n new_mesh.faces_normals_list()[i],\n new_mesh_naive.faces_normals_list()[i],\n )\n\n # check padded & packed\n self.assertClose(new_mesh.faces_padded(), new_mesh_naive.faces_padded())\n self.assertClose(new_mesh.verts_padded(), new_mesh_naive.verts_padded())\n self.assertClose(new_mesh.faces_packed(), new_mesh_naive.faces_packed())\n self.assertClose(new_mesh.verts_packed(), new_mesh_naive.verts_packed())\n self.assertClose(new_mesh.edges_packed(), new_mesh_naive.edges_packed())\n self.assertClose(\n new_mesh.verts_packed_to_mesh_idx(),\n new_mesh_naive.verts_packed_to_mesh_idx(),\n )\n self.assertClose(\n new_mesh.mesh_to_verts_packed_first_idx(),\n new_mesh_naive.mesh_to_verts_packed_first_idx(),\n )\n self.assertClose(\n new_mesh.num_verts_per_mesh(), new_mesh_naive.num_verts_per_mesh()\n )\n self.assertClose(\n new_mesh.faces_packed_to_mesh_idx(),\n new_mesh_naive.faces_packed_to_mesh_idx(),\n )\n self.assertClose(\n new_mesh.mesh_to_faces_packed_first_idx(),\n new_mesh_naive.mesh_to_faces_packed_first_idx(),\n )\n self.assertClose(\n new_mesh.num_faces_per_mesh(), new_mesh_naive.num_faces_per_mesh()\n )\n self.assertClose(\n new_mesh.edges_packed_to_mesh_idx(),\n new_mesh_naive.edges_packed_to_mesh_idx(),\n )\n self.assertClose(\n new_mesh.verts_padded_to_packed_idx(),\n new_mesh_naive.verts_padded_to_packed_idx(),\n )\n self.assertTrue(all(new_mesh.valid == new_mesh_naive.valid))\n self.assertTrue(new_mesh.equisized == new_mesh_naive.equisized)\n\n # check face areas, normals and vertex normals\n self.assertClose(\n new_mesh.verts_normals_packed(),\n new_mesh_naive.verts_normals_packed(),\n )\n self.assertClose(\n new_mesh.verts_normals_padded(),\n new_mesh_naive.verts_normals_padded(),\n )\n self.assertClose(\n new_mesh.faces_normals_packed(),\n new_mesh_naive.faces_normals_packed(),\n )\n self.assertClose(\n new_mesh.faces_normals_padded(),\n new_mesh_naive.faces_normals_padded(),\n )\n self.assertClose(\n new_mesh.faces_areas_packed(), new_mesh_naive.faces_areas_packed()\n )\n self.assertClose(\n new_mesh.mesh_to_edges_packed_first_idx(),\n new_mesh_naive.mesh_to_edges_packed_first_idx(),\n )\n\n def test_extend_list(self):\n N = 10\n mesh = TestMeshes.init_mesh(5, 10, 100)\n for force in [0, 1]:\n if force:\n # force some computes to happen\n mesh._compute_packed(refresh=True)\n mesh._compute_padded()\n mesh._compute_edges_packed()\n mesh.verts_padded_to_packed_idx()\n new_mesh = mesh.extend(N)\n self.assertEqual(len(mesh) * 10, len(new_mesh))\n for i in range(len(mesh)):\n for n in range(N):\n self.assertClose(\n mesh.verts_list()[i], new_mesh.verts_list()[i * N + n]\n )\n self.assertClose(\n mesh.faces_list()[i], new_mesh.faces_list()[i * N + n]\n )\n self.assertTrue(mesh.valid[i] == new_mesh.valid[i * N + n])\n self.assertAllSeparate(\n mesh.verts_list()\n + new_mesh.verts_list()\n + mesh.faces_list()\n + new_mesh.faces_list()\n )\n self.assertTrue(new_mesh._verts_packed is None)\n self.assertTrue(new_mesh._faces_packed is None)\n self.assertTrue(new_mesh._verts_padded is None)\n self.assertTrue(new_mesh._faces_padded is None)\n self.assertTrue(new_mesh._edges_packed is None)\n\n with self.assertRaises(ValueError):\n mesh.extend(N=-1)\n\n def test_to(self):\n mesh = TestMeshes.init_mesh(5, 10, 100, device=torch.device(\"cuda:0\"))\n device = torch.device(\"cuda:1\")\n\n new_mesh = mesh.to(device)\n self.assertTrue(new_mesh.device == device)\n self.assertTrue(mesh.device == torch.device(\"cuda:0\"))\n\n def test_split_mesh(self):\n mesh = TestMeshes.init_mesh(5, 10, 100)\n split_sizes = [2, 3]\n split_meshes = mesh.split(split_sizes)\n self.assertTrue(len(split_meshes[0]) == 2)\n self.assertTrue(\n split_meshes[0].verts_list()\n == [mesh.get_mesh_verts_faces(0)[0], mesh.get_mesh_verts_faces(1)[0]]\n )\n self.assertTrue(len(split_meshes[1]) == 3)\n self.assertTrue(\n split_meshes[1].verts_list()\n == [\n mesh.get_mesh_verts_faces(2)[0],\n mesh.get_mesh_verts_faces(3)[0],\n mesh.get_mesh_verts_faces(4)[0],\n ]\n )\n\n split_sizes = [2, 0.3]\n with self.assertRaises(ValueError):\n mesh.split(split_sizes)\n\n def test_update_padded(self):\n # Define the test mesh object either as a list or tensor of faces/verts.\n N = 10\n for lists_to_tensors in (False, True):\n for force in (True, False):\n mesh = TestMeshes.init_mesh(\n N, 100, 300, lists_to_tensors=lists_to_tensors\n )\n num_verts_per_mesh = mesh.num_verts_per_mesh()\n if force:\n # force mesh to have computed attributes\n mesh.verts_packed()\n mesh.edges_packed()\n mesh.laplacian_packed()\n mesh.faces_areas_packed()\n\n new_verts = torch.rand((mesh._N, mesh._V, 3), device=mesh.device)\n new_verts_list = [\n new_verts[i, : num_verts_per_mesh[i]] for i in range(N)\n ]\n new_mesh = mesh.update_padded(new_verts)\n\n # check the attributes assigned at construction time\n self.assertEqual(new_mesh._N, mesh._N)\n self.assertEqual(new_mesh._F, mesh._F)\n self.assertEqual(new_mesh._V, mesh._V)\n self.assertEqual(new_mesh.equisized, mesh.equisized)\n self.assertTrue(all(new_mesh.valid == mesh.valid))\n self.assertNotSeparate(\n new_mesh.num_verts_per_mesh(), mesh.num_verts_per_mesh()\n )\n self.assertClose(\n new_mesh.num_verts_per_mesh(), mesh.num_verts_per_mesh()\n )\n self.assertNotSeparate(\n new_mesh.num_faces_per_mesh(), mesh.num_faces_per_mesh()\n )\n self.assertClose(\n new_mesh.num_faces_per_mesh(), mesh.num_faces_per_mesh()\n )\n\n # check that the following attributes are not assigned\n self.assertIsNone(new_mesh._verts_list)\n self.assertIsNone(new_mesh._faces_areas_packed)\n self.assertIsNone(new_mesh._faces_normals_packed)\n self.assertIsNone(new_mesh._verts_normals_packed)\n\n check_tensors = [\n \"_faces_packed\",\n \"_verts_packed_to_mesh_idx\",\n \"_faces_packed_to_mesh_idx\",\n \"_mesh_to_verts_packed_first_idx\",\n \"_mesh_to_faces_packed_first_idx\",\n \"_edges_packed\",\n \"_edges_packed_to_mesh_idx\",\n \"_mesh_to_edges_packed_first_idx\",\n \"_faces_packed_to_edges_packed\",\n \"_num_edges_per_mesh\",\n ]\n for k in check_tensors:\n v = getattr(new_mesh, k)\n if not force:\n self.assertIsNone(v)\n else:\n v_old = getattr(mesh, k)\n self.assertNotSeparate(v, v_old)\n self.assertClose(v, v_old)\n\n # check verts/faces padded\n self.assertClose(new_mesh.verts_padded(), new_verts)\n self.assertNotSeparate(new_mesh.verts_padded(), new_verts)\n self.assertClose(new_mesh.faces_padded(), mesh.faces_padded())\n self.assertNotSeparate(new_mesh.faces_padded(), mesh.faces_padded())\n # check verts/faces list\n for i in range(N):\n self.assertNotSeparate(\n new_mesh.faces_list()[i], mesh.faces_list()[i]\n )\n self.assertClose(new_mesh.faces_list()[i], mesh.faces_list()[i])\n self.assertSeparate(new_mesh.verts_list()[i], mesh.verts_list()[i])\n self.assertClose(new_mesh.verts_list()[i], new_verts_list[i])\n # check verts/faces packed\n self.assertClose(new_mesh.verts_packed(), torch.cat(new_verts_list))\n self.assertSeparate(new_mesh.verts_packed(), mesh.verts_packed())\n self.assertClose(new_mesh.faces_packed(), mesh.faces_packed())\n # check pad_to_packed\n self.assertClose(\n new_mesh.verts_padded_to_packed_idx(),\n mesh.verts_padded_to_packed_idx(),\n )\n # check edges\n self.assertClose(new_mesh.edges_packed(), mesh.edges_packed())\n\n def test_get_mesh_verts_faces(self):\n device = torch.device(\"cuda:0\")\n verts_list = []\n faces_list = []\n verts_faces = [(10, 100), (20, 200)]\n for (V, F) in verts_faces:\n verts = torch.rand((V, 3), dtype=torch.float32, device=device)\n faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)\n verts_list.append(verts)\n faces_list.append(faces)\n\n mesh = Meshes(verts=verts_list, faces=faces_list)\n\n for i, (V, F) in enumerate(verts_faces):\n verts, faces = mesh.get_mesh_verts_faces(i)\n self.assertTrue(len(verts) == V)\n self.assertClose(verts, verts_list[i])\n self.assertTrue(len(faces) == F)\n self.assertClose(faces, faces_list[i])\n\n with self.assertRaises(ValueError):\n mesh.get_mesh_verts_faces(5)\n with self.assertRaises(ValueError):\n mesh.get_mesh_verts_faces(0.2)\n\n def test_get_bounding_boxes(self):\n device = torch.device(\"cuda:0\")\n verts_list = []\n faces_list = []\n for (V, F) in [(10, 100)]:\n verts = torch.rand((V, 3), dtype=torch.float32, device=device)\n faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)\n verts_list.append(verts)\n faces_list.append(faces)\n\n mins = torch.min(verts, dim=0)[0]\n maxs = torch.max(verts, dim=0)[0]\n bboxes_gt = torch.stack([mins, maxs], dim=1).unsqueeze(0)\n mesh = Meshes(verts=verts_list, faces=faces_list)\n bboxes = mesh.get_bounding_boxes()\n self.assertClose(bboxes_gt, bboxes)\n\n def test_padded_to_packed_idx(self):\n device = torch.device(\"cuda:0\")\n verts_list = []\n faces_list = []\n verts_faces = [(10, 100), (20, 200), (30, 300)]\n for (V, F) in verts_faces:\n verts = torch.rand((V, 3), dtype=torch.float32, device=device)\n faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)\n verts_list.append(verts)\n faces_list.append(faces)\n\n mesh = Meshes(verts=verts_list, faces=faces_list)\n verts_padded_to_packed_idx = mesh.verts_padded_to_packed_idx()\n verts_packed = mesh.verts_packed()\n verts_padded = mesh.verts_padded()\n verts_padded_flat = verts_padded.view(-1, 3)\n\n self.assertClose(verts_padded_flat[verts_padded_to_packed_idx], verts_packed)\n\n idx = verts_padded_to_packed_idx.view(-1, 1).expand(-1, 3)\n self.assertClose(verts_padded_flat.gather(0, idx), verts_packed)\n\n def test_getitem(self):\n device = torch.device(\"cuda:0\")\n verts_list = []\n faces_list = []\n verts_faces = [(10, 100), (20, 200), (30, 300)]\n for (V, F) in verts_faces:\n verts = torch.rand((V, 3), dtype=torch.float32, device=device)\n faces = torch.randint(V, size=(F, 3), dtype=torch.int64, device=device)\n verts_list.append(verts)\n faces_list.append(faces)\n\n mesh = Meshes(verts=verts_list, faces=faces_list)\n\n def check_equal(selected, indices):\n for selectedIdx, index in enumerate(indices):\n self.assertClose(\n selected.verts_list()[selectedIdx], mesh.verts_list()[index]\n )\n self.assertClose(\n selected.faces_list()[selectedIdx], mesh.faces_list()[index]\n )\n\n # int index\n index = 1\n mesh_selected = mesh[index]\n self.assertTrue(len(mesh_selected) == 1)\n check_equal(mesh_selected, [index])\n\n # list index\n index = [1, 2]\n mesh_selected = mesh[index]\n self.assertTrue(len(mesh_selected) == len(index))\n check_equal(mesh_selected, index)\n\n # slice index\n index = slice(0, 2, 1)\n mesh_selected = mesh[index]\n check_equal(mesh_selected, [0, 1])\n\n # bool tensor\n index = torch.tensor([1, 0, 1], dtype=torch.bool, device=device)\n mesh_selected = mesh[index]\n self.assertTrue(len(mesh_selected) == index.sum())\n check_equal(mesh_selected, [0, 2])\n\n # int tensor\n index = torch.tensor([1, 2], dtype=torch.int64, device=device)\n mesh_selected = mesh[index]\n self.assertTrue(len(mesh_selected) == index.numel())\n check_equal(mesh_selected, index.tolist())\n\n # invalid index\n index = torch.tensor([1, 0, 1], dtype=torch.float32, device=device)\n with self.assertRaises(IndexError):\n mesh_selected = mesh[index]\n index = 1.2\n with self.assertRaises(IndexError):\n mesh_selected = mesh[index]\n\n def test_compute_faces_areas(self):\n verts = torch.tensor(\n [\n [0.0, 0.0, 0.0],\n [0.5, 0.0, 0.0],\n [0.5, 0.5, 0.0],\n [0.5, 0.0, 0.0],\n [0.25, 0.8, 0.0],\n ],\n dtype=torch.float32,\n )\n faces = torch.tensor([[0, 1, 2], [0, 3, 4]], dtype=torch.int64)\n mesh = Meshes(verts=[verts], faces=[faces])\n\n face_areas = mesh.faces_areas_packed()\n expected_areas = torch.tensor([0.125, 0.2])\n self.assertClose(face_areas, expected_areas)\n\n def test_compute_normals(self):\n\n # Simple case with one mesh where normals point in either +/- ijk\n verts = torch.tensor(\n [\n [0.1, 0.3, 0.0],\n [0.5, 0.2, 0.0],\n [0.6, 0.8, 0.0],\n [0.0, 0.3, 0.2],\n [0.0, 0.2, 0.5],\n [0.0, 0.8, 0.7],\n [0.5, 0.0, 0.2],\n [0.6, 0.0, 0.5],\n [0.8, 0.0, 0.7],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n ],\n dtype=torch.float32,\n )\n faces = torch.tensor(\n [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], dtype=torch.int64\n )\n mesh = Meshes(verts=[verts], faces=[faces])\n\n verts_normals_expected = torch.tensor(\n [\n [0.0, 0.0, 1.0],\n [0.0, 0.0, 1.0],\n [0.0, 0.0, 1.0],\n [-1.0, 0.0, 0.0],\n [-1.0, 0.0, 0.0],\n [-1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n ]\n )\n faces_normals_expected = verts_normals_expected[[0, 3, 6, 9], :]\n\n self.assertTrue(\n torch.allclose(mesh.verts_normals_list()[0], verts_normals_expected)\n )\n self.assertTrue(\n torch.allclose(mesh.faces_normals_list()[0], faces_normals_expected)\n )\n self.assertTrue(\n torch.allclose(mesh.verts_normals_packed(), verts_normals_expected)\n )\n self.assertTrue(\n torch.allclose(mesh.faces_normals_packed(), faces_normals_expected)\n )\n\n # Multiple meshes in the batch with equal sized meshes\n meshes_extended = mesh.extend(3)\n for m in meshes_extended.verts_normals_list():\n self.assertClose(m, verts_normals_expected)\n for f in meshes_extended.faces_normals_list():\n self.assertClose(f, faces_normals_expected)\n\n # Multiple meshes in the batch with different sized meshes\n # Check padded and packed normals are the correct sizes.\n verts2 = torch.tensor(\n [\n [0.1, 0.3, 0.0],\n [0.5, 0.2, 0.0],\n [0.6, 0.8, 0.0],\n [0.0, 0.3, 0.2],\n [0.0, 0.2, 0.5],\n [0.0, 0.8, 0.7],\n ],\n dtype=torch.float32,\n )\n faces2 = torch.tensor([[0, 1, 2], [3, 4, 5]], dtype=torch.int64)\n verts_list = [verts, verts2]\n faces_list = [faces, faces2]\n meshes = Meshes(verts=verts_list, faces=faces_list)\n verts_normals_padded = meshes.verts_normals_padded()\n faces_normals_padded = meshes.faces_normals_padded()\n\n for n in range(len(meshes)):\n v = verts_list[n].shape[0]\n f = faces_list[n].shape[0]\n if verts_normals_padded.shape[1] > v:\n self.assertTrue(verts_normals_padded[n, v:, :].eq(0).all())\n self.assertTrue(\n torch.allclose(\n verts_normals_padded[n, :v, :].view(-1, 3),\n verts_normals_expected[:v, :],\n )\n )\n if faces_normals_padded.shape[1] > f:\n self.assertTrue(faces_normals_padded[n, f:, :].eq(0).all())\n self.assertTrue(\n torch.allclose(\n faces_normals_padded[n, :f, :].view(-1, 3),\n faces_normals_expected[:f, :],\n )\n )\n\n verts_normals_packed = meshes.verts_normals_packed()\n faces_normals_packed = meshes.faces_normals_packed()\n self.assertTrue(\n list(verts_normals_packed.shape) == [verts.shape[0] + verts2.shape[0], 3]\n )\n self.assertTrue(\n list(faces_normals_packed.shape) == [faces.shape[0] + faces2.shape[0], 3]\n )\n\n # Single mesh where two faces share one vertex so the normal is\n # the weighted sum of the two face normals.\n verts = torch.tensor(\n [\n [0.1, 0.3, 0.0],\n [0.5, 0.2, 0.0],\n [0.0, 0.3, 0.2], # vertex is shared between two faces\n [0.0, 0.2, 0.5],\n [0.0, 0.8, 0.7],\n ],\n dtype=torch.float32,\n )\n faces = torch.tensor([[0, 1, 2], [2, 3, 4]], dtype=torch.int64)\n mesh = Meshes(verts=[verts], faces=[faces])\n\n verts_normals_expected = torch.tensor(\n [\n [-0.2408, -0.9631, -0.1204],\n [-0.2408, -0.9631, -0.1204],\n [-0.9389, -0.3414, -0.0427],\n [-1.0000, 0.0000, 0.0000],\n [-1.0000, 0.0000, 0.0000],\n ]\n )\n faces_normals_expected = torch.tensor(\n [[-0.2408, -0.9631, -0.1204], [-1.0000, 0.0000, 0.0000]]\n )\n self.assertTrue(\n torch.allclose(\n mesh.verts_normals_list()[0], verts_normals_expected, atol=4e-5\n )\n )\n self.assertTrue(\n torch.allclose(\n mesh.faces_normals_list()[0], faces_normals_expected, atol=4e-5\n )\n )\n\n # Check empty mesh has empty normals\n meshes = Meshes(verts=[], faces=[])\n self.assertEqual(meshes.verts_normals_packed().shape[0], 0)\n self.assertEqual(meshes.verts_normals_padded().shape[0], 0)\n self.assertEqual(meshes.verts_normals_list(), [])\n self.assertEqual(meshes.faces_normals_packed().shape[0], 0)\n self.assertEqual(meshes.faces_normals_padded().shape[0], 0)\n self.assertEqual(meshes.faces_normals_list(), [])\n\n def test_compute_faces_areas_cpu_cuda(self):\n num_meshes = 10\n max_v = 100\n max_f = 300\n mesh_cpu = TestMeshes.init_mesh(num_meshes, max_v, max_f, device=\"cpu\")\n device = torch.device(\"cuda:0\")\n mesh_cuda = mesh_cpu.to(device)\n\n face_areas_cpu = mesh_cpu.faces_areas_packed()\n face_normals_cpu = mesh_cpu.faces_normals_packed()\n face_areas_cuda = mesh_cuda.faces_areas_packed()\n face_normals_cuda = mesh_cuda.faces_normals_packed()\n self.assertClose(face_areas_cpu, face_areas_cuda.cpu(), atol=1e-6)\n # because of the normalization of the normals with arbitrarily small values,\n # normals can become unstable. Thus only compare normals, for faces\n # with areas > eps=1e-6\n nonzero = face_areas_cpu > 1e-6\n self.assertClose(\n face_normals_cpu[nonzero], face_normals_cuda.cpu()[nonzero], atol=1e-6\n )\n\n @staticmethod\n def compute_packed_with_init(\n num_meshes: int = 10, max_v: int = 100, max_f: int = 300, device: str = \"cpu\"\n ):\n mesh = TestMeshes.init_mesh(num_meshes, max_v, max_f, device=device)\n torch.cuda.synchronize()\n\n def compute_packed():\n mesh._compute_packed(refresh=True)\n torch.cuda.synchronize()\n\n return compute_packed\n\n @staticmethod\n def compute_padded_with_init(\n num_meshes: int = 10, max_v: int = 100, max_f: int = 300, device: str = \"cpu\"\n ):\n mesh = TestMeshes.init_mesh(num_meshes, max_v, max_f, device=device)\n torch.cuda.synchronize()\n\n def compute_padded():\n mesh._compute_padded(refresh=True)\n torch.cuda.synchronize()\n\n return compute_padded\n", "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport math\nimport warnings\nfrom typing import List, Optional, Union\n\nimport torch\n\nfrom .rotation_conversions import _axis_angle_rotation\n\n\nclass Transform3d:\n \"\"\"\n A Transform3d object encapsulates a batch of N 3D transformations, and knows\n how to transform points and normal vectors. Suppose that t is a Transform3d;\n then we can do the following:\n\n .. code-block:: python\n\n N = len(t)\n points = torch.randn(N, P, 3)\n normals = torch.randn(N, P, 3)\n points_transformed = t.transform_points(points) # => (N, P, 3)\n normals_transformed = t.transform_normals(normals) # => (N, P, 3)\n\n\n BROADCASTING\n Transform3d objects supports broadcasting. Suppose that t1 and tN are\n Transform3D objects with len(t1) == 1 and len(tN) == N respectively. Then we\n can broadcast transforms like this:\n\n .. code-block:: python\n\n t1.transform_points(torch.randn(P, 3)) # => (P, 3)\n t1.transform_points(torch.randn(1, P, 3)) # => (1, P, 3)\n t1.transform_points(torch.randn(M, P, 3)) # => (M, P, 3)\n tN.transform_points(torch.randn(P, 3)) # => (N, P, 3)\n tN.transform_points(torch.randn(1, P, 3)) # => (N, P, 3)\n\n\n COMBINING TRANSFORMS\n Transform3d objects can be combined in two ways: composing and stacking.\n Composing is function composition. Given Transform3d objects t1, t2, t3,\n the following all compute the same thing:\n\n .. code-block:: python\n\n y1 = t3.transform_points(t2.transform_points(t1.transform_points(x)))\n y2 = t1.compose(t2).compose(t3).transform_points(x)\n y3 = t1.compose(t2, t3).transform_points(x)\n\n\n Composing transforms should broadcast.\n\n .. code-block:: python\n\n if len(t1) == 1 and len(t2) == N, then len(t1.compose(t2)) == N.\n\n We can also stack a sequence of Transform3d objects, which represents\n composition along the batch dimension; then the following should compute the\n same thing.\n\n .. code-block:: python\n\n N, M = len(tN), len(tM)\n xN = torch.randn(N, P, 3)\n xM = torch.randn(M, P, 3)\n y1 = torch.cat([tN.transform_points(xN), tM.transform_points(xM)], dim=0)\n y2 = tN.stack(tM).transform_points(torch.cat([xN, xM], dim=0))\n\n BUILDING TRANSFORMS\n We provide convenience methods for easily building Transform3d objects\n as compositions of basic transforms.\n\n .. code-block:: python\n\n # Scale by 0.5, then translate by (1, 2, 3)\n t1 = Transform3d().scale(0.5).translate(1, 2, 3)\n\n # Scale each axis by a different amount, then translate, then scale\n t2 = Transform3d().scale(1, 3, 3).translate(2, 3, 1).scale(2.0)\n\n t3 = t1.compose(t2)\n tN = t1.stack(t3, t3)\n\n\n BACKPROP THROUGH TRANSFORMS\n When building transforms, we can also parameterize them by Torch tensors;\n in this case we can backprop through the construction and application of\n Transform objects, so they could be learned via gradient descent or\n predicted by a neural network.\n\n .. code-block:: python\n\n s1_params = torch.randn(N, requires_grad=True)\n t_params = torch.randn(N, 3, requires_grad=True)\n s2_params = torch.randn(N, 3, requires_grad=True)\n\n t = Transform3d().scale(s1_params).translate(t_params).scale(s2_params)\n x = torch.randn(N, 3)\n y = t.transform_points(x)\n loss = compute_loss(y)\n loss.backward()\n\n with torch.no_grad():\n s1_params -= lr * s1_params.grad\n t_params -= lr * t_params.grad\n s2_params -= lr * s2_params.grad\n\n CONVENTIONS\n We adopt a right-hand coordinate system, meaning that rotation about an axis\n with a positive angle results in a counter clockwise rotation.\n\n This class assumes that transformations are applied on inputs which\n are row vectors. The internal representation of the Nx4x4 transformation\n matrix is of the form:\n\n .. code-block:: python\n\n M = [\n [Rxx, Ryx, Rzx, 0],\n [Rxy, Ryy, Rzy, 0],\n [Rxz, Ryz, Rzz, 0],\n [Tx, Ty, Tz, 1],\n ]\n\n To apply the transformation to points which are row vectors, the M matrix\n can be pre multiplied by the points:\n\n .. code-block:: python\n\n points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point\n transformed_points = points * M\n\n \"\"\"\n\n def __init__(\n self,\n dtype: torch.dtype = torch.float32,\n device=\"cpu\",\n matrix: Optional[torch.Tensor] = None,\n ):\n \"\"\"\n Args:\n dtype: The data type of the transformation matrix.\n to be used if `matrix = None`.\n device: The device for storing the implemented transformation.\n If `matrix != None`, uses the device of input `matrix`.\n matrix: A tensor of shape (4, 4) or of shape (minibatch, 4, 4)\n representing the 4x4 3D transformation matrix.\n If `None`, initializes with identity using\n the specified `device` and `dtype`.\n \"\"\"\n\n if matrix is None:\n self._matrix = torch.eye(4, dtype=dtype, device=device).view(1, 4, 4)\n else:\n if matrix.ndim not in (2, 3):\n raise ValueError('\"matrix\" has to be a 2- or a 3-dimensional tensor.')\n if matrix.shape[-2] != 4 or matrix.shape[-1] != 4:\n raise ValueError(\n '\"matrix\" has to be a tensor of shape (minibatch, 4, 4)'\n )\n # set the device from matrix\n device = matrix.device\n self._matrix = matrix.view(-1, 4, 4)\n\n self._transforms = [] # store transforms to compose\n self._lu = None\n self.device = device\n\n def __len__(self):\n return self.get_matrix().shape[0]\n\n def __getitem__(\n self, index: Union[int, List[int], slice, torch.Tensor]\n ) -> \"Transform3d\":\n \"\"\"\n Args:\n index: Specifying the index of the transform to retrieve.\n Can be an int, slice, list of ints, boolean, long tensor.\n Supports negative indices.\n\n Returns:\n Transform3d object with selected transforms. The tensors are not cloned.\n \"\"\"\n if isinstance(index, int):\n index = [index]\n return self.__class__(matrix=self.get_matrix()[index])\n\n def compose(self, *others):\n \"\"\"\n Return a new Transform3d with the tranforms to compose stored as\n an internal list.\n\n Args:\n *others: Any number of Transform3d objects\n\n Returns:\n A new Transform3d with the stored transforms\n \"\"\"\n out = Transform3d(device=self.device)\n out._matrix = self._matrix.clone()\n for other in others:\n if not isinstance(other, Transform3d):\n msg = \"Only possible to compose Transform3d objects; got %s\"\n raise ValueError(msg % type(other))\n out._transforms = self._transforms + list(others)\n return out\n\n def get_matrix(self):\n \"\"\"\n Return a matrix which is the result of composing this transform\n with others stored in self.transforms. Where necessary transforms\n are broadcast against each other.\n For example, if self.transforms contains transforms t1, t2, and t3, and\n given a set of points x, the following should be true:\n\n .. code-block:: python\n\n y1 = t1.compose(t2, t3).transform(x)\n y2 = t3.transform(t2.transform(t1.transform(x)))\n y1.get_matrix() == y2.get_matrix()\n\n Returns:\n A transformation matrix representing the composed inputs.\n \"\"\"\n composed_matrix = self._matrix.clone()\n if len(self._transforms) > 0:\n for other in self._transforms:\n other_matrix = other.get_matrix()\n composed_matrix = _broadcast_bmm(composed_matrix, other_matrix)\n return composed_matrix\n\n def _get_matrix_inverse(self):\n \"\"\"\n Return the inverse of self._matrix.\n \"\"\"\n return torch.inverse(self._matrix)\n\n def inverse(self, invert_composed: bool = False):\n \"\"\"\n Returns a new Transform3D object that represents an inverse of the\n current transformation.\n\n Args:\n invert_composed:\n - True: First compose the list of stored transformations\n and then apply inverse to the result. This is\n potentially slower for classes of transformations\n with inverses that can be computed efficiently\n (e.g. rotations and translations).\n - False: Invert the individual stored transformations\n independently without composing them.\n\n Returns:\n A new Transform3D object contaning the inverse of the original\n transformation.\n \"\"\"\n\n tinv = Transform3d(device=self.device)\n\n if invert_composed:\n # first compose then invert\n tinv._matrix = torch.inverse(self.get_matrix())\n else:\n # self._get_matrix_inverse() implements efficient inverse\n # of self._matrix\n i_matrix = self._get_matrix_inverse()\n\n # 2 cases:\n if len(self._transforms) > 0:\n # a) Either we have a non-empty list of transforms:\n # Here we take self._matrix and append its inverse at the\n # end of the reverted _transforms list. After composing\n # the transformations with get_matrix(), this correctly\n # right-multiplies by the inverse of self._matrix\n # at the end of the composition.\n tinv._transforms = [t.inverse() for t in reversed(self._transforms)]\n last = Transform3d(device=self.device)\n last._matrix = i_matrix\n tinv._transforms.append(last)\n else:\n # b) Or there are no stored transformations\n # we just set inverted matrix\n tinv._matrix = i_matrix\n\n return tinv\n\n def stack(self, *others):\n transforms = [self] + list(others)\n matrix = torch.cat([t._matrix for t in transforms], dim=0)\n out = Transform3d()\n out._matrix = matrix\n return out\n\n def transform_points(self, points, eps: Optional[float] = None):\n \"\"\"\n Use this transform to transform a set of 3D points. Assumes row major\n ordering of the input points.\n\n Args:\n points: Tensor of shape (P, 3) or (N, P, 3)\n eps: If eps!=None, the argument is used to clamp the\n last coordinate before peforming the final division.\n The clamping corresponds to:\n last_coord := (last_coord.sign() + (last_coord==0)) *\n torch.clamp(last_coord.abs(), eps),\n i.e. the last coordinates that are exactly 0 will\n be clamped to +eps.\n\n Returns:\n points_out: points of shape (N, P, 3) or (P, 3) depending\n on the dimensions of the transform\n \"\"\"\n points_batch = points.clone()\n if points_batch.dim() == 2:\n points_batch = points_batch[None] # (P, 3) -> (1, P, 3)\n if points_batch.dim() != 3:\n msg = \"Expected points to have dim = 2 or dim = 3: got shape %r\"\n raise ValueError(msg % repr(points.shape))\n\n N, P, _3 = points_batch.shape\n ones = torch.ones(N, P, 1, dtype=points.dtype, device=points.device)\n points_batch = torch.cat([points_batch, ones], dim=2)\n\n composed_matrix = self.get_matrix()\n points_out = _broadcast_bmm(points_batch, composed_matrix)\n denom = points_out[..., 3:] # denominator\n if eps is not None:\n denom_sign = denom.sign() + (denom == 0.0).type_as(denom)\n denom = denom_sign * torch.clamp(denom.abs(), eps)\n points_out = points_out[..., :3] / denom\n\n # When transform is (1, 4, 4) and points is (P, 3) return\n # points_out of shape (P, 3)\n if points_out.shape[0] == 1 and points.dim() == 2:\n points_out = points_out.reshape(points.shape)\n\n return points_out\n\n def transform_normals(self, normals):\n \"\"\"\n Use this transform to transform a set of normal vectors.\n\n Args:\n normals: Tensor of shape (P, 3) or (N, P, 3)\n\n Returns:\n normals_out: Tensor of shape (P, 3) or (N, P, 3) depending\n on the dimensions of the transform\n \"\"\"\n if normals.dim() not in [2, 3]:\n msg = \"Expected normals to have dim = 2 or dim = 3: got shape %r\"\n raise ValueError(msg % (normals.shape,))\n composed_matrix = self.get_matrix()\n\n # TODO: inverse is bad! Solve a linear system instead\n mat = composed_matrix[:, :3, :3]\n normals_out = _broadcast_bmm(normals, mat.transpose(1, 2).inverse())\n\n # This doesn't pass unit tests. TODO investigate further\n # if self._lu is None:\n # self._lu = self._matrix[:, :3, :3].transpose(1, 2).lu()\n # normals_out = normals.lu_solve(*self._lu)\n\n # When transform is (1, 4, 4) and normals is (P, 3) return\n # normals_out of shape (P, 3)\n if normals_out.shape[0] == 1 and normals.dim() == 2:\n normals_out = normals_out.reshape(normals.shape)\n\n return normals_out\n\n def translate(self, *args, **kwargs):\n return self.compose(Translate(device=self.device, *args, **kwargs))\n\n def scale(self, *args, **kwargs):\n return self.compose(Scale(device=self.device, *args, **kwargs))\n\n def rotate(self, *args, **kwargs):\n return self.compose(Rotate(device=self.device, *args, **kwargs))\n\n def rotate_axis_angle(self, *args, **kwargs):\n return self.compose(RotateAxisAngle(device=self.device, *args, **kwargs))\n\n def clone(self):\n \"\"\"\n Deep copy of Transforms object. All internal tensors are cloned\n individually.\n\n Returns:\n new Transforms object.\n \"\"\"\n other = Transform3d(device=self.device)\n if self._lu is not None:\n other._lu = [elem.clone() for elem in self._lu]\n other._matrix = self._matrix.clone()\n other._transforms = [t.clone() for t in self._transforms]\n return other\n\n def to(self, device, copy: bool = False, dtype=None):\n \"\"\"\n Match functionality of torch.Tensor.to()\n If copy = True or the self Tensor is on a different device, the\n returned tensor is a copy of self with the desired torch.device.\n If copy = False and the self Tensor already has the correct torch.device,\n then self is returned.\n\n Args:\n device: Device id for the new tensor.\n copy: Boolean indicator whether or not to clone self. Default False.\n dtype: If not None, casts the internal tensor variables\n to a given torch.dtype.\n\n Returns:\n Transform3d object.\n \"\"\"\n if not copy and self.device == device:\n return self\n other = self.clone()\n if self.device != device:\n other.device = device\n other._matrix = self._matrix.to(device=device, dtype=dtype)\n for t in other._transforms:\n t.to(device, copy=copy, dtype=dtype)\n return other\n\n def cpu(self):\n return self.to(torch.device(\"cpu\"))\n\n def cuda(self):\n return self.to(torch.device(\"cuda\"))\n\n\nclass Translate(Transform3d):\n def __init__(self, x, y=None, z=None, dtype=torch.float32, device=\"cpu\"):\n \"\"\"\n Create a new Transform3d representing 3D translations.\n\n Option I: Translate(xyz, dtype=torch.float32, device='cpu')\n xyz should be a tensor of shape (N, 3)\n\n Option II: Translate(x, y, z, dtype=torch.float32, device='cpu')\n Here x, y, and z will be broadcast against each other and\n concatenated to form the translation. Each can be:\n - A python scalar\n - A torch scalar\n - A 1D torch tensor\n \"\"\"\n super().__init__(device=device)\n xyz = _handle_input(x, y, z, dtype, device, \"Translate\")\n N = xyz.shape[0]\n\n mat = torch.eye(4, dtype=dtype, device=device)\n mat = mat.view(1, 4, 4).repeat(N, 1, 1)\n mat[:, 3, :3] = xyz\n self._matrix = mat\n\n def _get_matrix_inverse(self):\n \"\"\"\n Return the inverse of self._matrix.\n \"\"\"\n inv_mask = self._matrix.new_ones([1, 4, 4])\n inv_mask[0, 3, :3] = -1.0\n i_matrix = self._matrix * inv_mask\n return i_matrix\n\n\nclass Scale(Transform3d):\n def __init__(self, x, y=None, z=None, dtype=torch.float32, device=\"cpu\"):\n \"\"\"\n A Transform3d representing a scaling operation, with different scale\n factors along each coordinate axis.\n\n Option I: Scale(s, dtype=torch.float32, device='cpu')\n s can be one of\n - Python scalar or torch scalar: Single uniform scale\n - 1D torch tensor of shape (N,): A batch of uniform scale\n - 2D torch tensor of shape (N, 3): Scale differently along each axis\n\n Option II: Scale(x, y, z, dtype=torch.float32, device='cpu')\n Each of x, y, and z can be one of\n - python scalar\n - torch scalar\n - 1D torch tensor\n \"\"\"\n super().__init__(device=device)\n xyz = _handle_input(x, y, z, dtype, device, \"scale\", allow_singleton=True)\n N = xyz.shape[0]\n\n # TODO: Can we do this all in one go somehow?\n mat = torch.eye(4, dtype=dtype, device=device)\n mat = mat.view(1, 4, 4).repeat(N, 1, 1)\n mat[:, 0, 0] = xyz[:, 0]\n mat[:, 1, 1] = xyz[:, 1]\n mat[:, 2, 2] = xyz[:, 2]\n self._matrix = mat\n\n def _get_matrix_inverse(self):\n \"\"\"\n Return the inverse of self._matrix.\n \"\"\"\n xyz = torch.stack([self._matrix[:, i, i] for i in range(4)], dim=1)\n ixyz = 1.0 / xyz\n imat = torch.diag_embed(ixyz, dim1=1, dim2=2)\n return imat\n\n\nclass Rotate(Transform3d):\n def __init__(\n self, R, dtype=torch.float32, device=\"cpu\", orthogonal_tol: float = 1e-5\n ):\n \"\"\"\n Create a new Transform3d representing 3D rotation using a rotation\n matrix as the input.\n\n Args:\n R: a tensor of shape (3, 3) or (N, 3, 3)\n orthogonal_tol: tolerance for the test of the orthogonality of R\n\n \"\"\"\n super().__init__(device=device)\n if R.dim() == 2:\n R = R[None]\n if R.shape[-2:] != (3, 3):\n msg = \"R must have shape (3, 3) or (N, 3, 3); got %s\"\n raise ValueError(msg % repr(R.shape))\n R = R.to(dtype=dtype).to(device=device)\n _check_valid_rotation_matrix(R, tol=orthogonal_tol)\n N = R.shape[0]\n mat = torch.eye(4, dtype=dtype, device=device)\n mat = mat.view(1, 4, 4).repeat(N, 1, 1)\n mat[:, :3, :3] = R\n self._matrix = mat\n\n def _get_matrix_inverse(self):\n \"\"\"\n Return the inverse of self._matrix.\n \"\"\"\n return self._matrix.permute(0, 2, 1).contiguous()\n\n\nclass RotateAxisAngle(Rotate):\n def __init__(\n self,\n angle,\n axis: str = \"X\",\n degrees: bool = True,\n dtype=torch.float64,\n device=\"cpu\",\n ):\n \"\"\"\n Create a new Transform3d representing 3D rotation about an axis\n by an angle.\n\n Assuming a right-hand coordinate system, positive rotation angles result\n in a counter clockwise rotation.\n\n Args:\n angle:\n - A torch tensor of shape (N,)\n - A python scalar\n - A torch scalar\n axis:\n string: one of [\"X\", \"Y\", \"Z\"] indicating the axis about which\n to rotate.\n NOTE: All batch elements are rotated about the same axis.\n \"\"\"\n axis = axis.upper()\n if axis not in [\"X\", \"Y\", \"Z\"]:\n msg = \"Expected axis to be one of ['X', 'Y', 'Z']; got %s\"\n raise ValueError(msg % axis)\n angle = _handle_angle_input(angle, dtype, device, \"RotateAxisAngle\")\n angle = (angle / 180.0 * math.pi) if degrees else angle\n # We assume the points on which this transformation will be applied\n # are row vectors. The rotation matrix returned from _axis_angle_rotation\n # is for transforming column vectors. Therefore we transpose this matrix.\n # R will always be of shape (N, 3, 3)\n R = _axis_angle_rotation(axis, angle).transpose(1, 2)\n super().__init__(device=device, R=R)\n\n\ndef _handle_coord(c, dtype, device):\n \"\"\"\n Helper function for _handle_input.\n\n Args:\n c: Python scalar, torch scalar, or 1D torch tensor\n\n Returns:\n c_vec: 1D torch tensor\n \"\"\"\n if not torch.is_tensor(c):\n c = torch.tensor(c, dtype=dtype, device=device)\n if c.dim() == 0:\n c = c.view(1)\n return c\n\n\ndef _handle_input(x, y, z, dtype, device, name: str, allow_singleton: bool = False):\n \"\"\"\n Helper function to handle parsing logic for building transforms. The output\n is always a tensor of shape (N, 3), but there are several types of allowed\n input.\n\n Case I: Single Matrix\n In this case x is a tensor of shape (N, 3), and y and z are None. Here just\n return x.\n\n Case II: Vectors and Scalars\n In this case each of x, y, and z can be one of the following\n - Python scalar\n - Torch scalar\n - Torch tensor of shape (N, 1) or (1, 1)\n In this case x, y and z are broadcast to tensors of shape (N, 1)\n and concatenated to a tensor of shape (N, 3)\n\n Case III: Singleton (only if allow_singleton=True)\n In this case y and z are None, and x can be one of the following:\n - Python scalar\n - Torch scalar\n - Torch tensor of shape (N, 1) or (1, 1)\n Here x will be duplicated 3 times, and we return a tensor of shape (N, 3)\n\n Returns:\n xyz: Tensor of shape (N, 3)\n \"\"\"\n # If x is actually a tensor of shape (N, 3) then just return it\n if torch.is_tensor(x) and x.dim() == 2:\n if x.shape[1] != 3:\n msg = \"Expected tensor of shape (N, 3); got %r (in %s)\"\n raise ValueError(msg % (x.shape, name))\n if y is not None or z is not None:\n msg = \"Expected y and z to be None (in %s)\" % name\n raise ValueError(msg)\n return x\n\n if allow_singleton and y is None and z is None:\n y = x\n z = x\n\n # Convert all to 1D tensors\n xyz = [_handle_coord(c, dtype, device) for c in [x, y, z]]\n\n # Broadcast and concatenate\n sizes = [c.shape[0] for c in xyz]\n N = max(sizes)\n for c in xyz:\n if c.shape[0] != 1 and c.shape[0] != N:\n msg = \"Got non-broadcastable sizes %r (in %s)\" % (sizes, name)\n raise ValueError(msg)\n xyz = [c.expand(N) for c in xyz]\n xyz = torch.stack(xyz, dim=1)\n return xyz\n\n\ndef _handle_angle_input(x, dtype, device, name: str):\n \"\"\"\n Helper function for building a rotation function using angles.\n The output is always of shape (N,).\n\n The input can be one of:\n - Torch tensor of shape (N,)\n - Python scalar\n - Torch scalar\n \"\"\"\n if torch.is_tensor(x) and x.dim() > 1:\n msg = \"Expected tensor of shape (N,); got %r (in %s)\"\n raise ValueError(msg % (x.shape, name))\n else:\n return _handle_coord(x, dtype, device)\n\n\ndef _broadcast_bmm(a, b):\n \"\"\"\n Batch multiply two matrices and broadcast if necessary.\n\n Args:\n a: torch tensor of shape (P, K) or (M, P, K)\n b: torch tensor of shape (N, K, K)\n\n Returns:\n a and b broadcast multipled. The output batch dimension is max(N, M).\n\n To broadcast transforms across a batch dimension if M != N then\n expect that either M = 1 or N = 1. The tensor with batch dimension 1 is\n expanded to have shape N or M.\n \"\"\"\n if a.dim() == 2:\n a = a[None]\n if len(a) != len(b):\n if not ((len(a) == 1) or (len(b) == 1)):\n msg = \"Expected batch dim for bmm to be equal or 1; got %r, %r\"\n raise ValueError(msg % (a.shape, b.shape))\n if len(a) == 1:\n a = a.expand(len(b), -1, -1)\n if len(b) == 1:\n b = b.expand(len(a), -1, -1)\n return a.bmm(b)\n\n\ndef _check_valid_rotation_matrix(R, tol: float = 1e-7):\n \"\"\"\n Determine if R is a valid rotation matrix by checking it satisfies the\n following conditions:\n\n ``RR^T = I and det(R) = 1``\n\n Args:\n R: an (N, 3, 3) matrix\n\n Returns:\n None\n\n Emits a warning if R is an invalid rotation matrix.\n \"\"\"\n N = R.shape[0]\n eye = torch.eye(3, dtype=R.dtype, device=R.device)\n eye = eye.view(1, 3, 3).expand(N, -1, -1)\n orthogonal = torch.allclose(R.bmm(R.transpose(1, 2)), eye, atol=tol)\n det_R = torch.det(R)\n no_distortion = torch.allclose(det_R, torch.ones_like(det_R))\n if not (orthogonal and no_distortion):\n msg = \"R is not a valid rotation matrix\"\n warnings.warn(msg)\n return\n" ]
[ [ "torch.randint", "torch.max", "torch.zeros", "torch.cat", "numpy.concatenate", "torch.device", "torch.allclose", "torch.cuda.synchronize", "numpy.allclose", "numpy.unique", "torch.eye", "torch.tensor", "torch.rand", "torch.full", "torch.min", "torch.is_tensor", "torch.stack", "numpy.random.seed", "torch.manual_seed", "numpy.sort", "numpy.bincount", "torch.cumsum" ], [ "torch.ones", "torch.cat", "torch.det", "torch.diag_embed", "torch.eye", "torch.is_tensor", "torch.inverse", "torch.tensor", "torch.stack", "torch.device", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vivaan-park/go
[ "f90004eccebf83e21f181f6c84b160b7b6a21ba6", "f90004eccebf83e21f181f6c84b160b7b6a21ba6" ]
[ "dlgo/rl/zero_experience.py", "test_ac.py" ]
[ "# © 2020 지성. all rights reserved.\n# <[email protected]>\n# MIT License\n\nimport numpy as np\n\nclass ZeroExperienceCollector:\n def __init__(self):\n self.states = []\n self.visit_counts = []\n self.rewards = []\n self._current_episode_states = []\n self._current_episode_visit_counts = []\n\n def begin_episode(self):\n self._current_episode_states = []\n self._current_episode_visit_counts = []\n\n def record_decision(self, state, visit_counts):\n self._current_episode_states.append(state)\n self._current_episode_visit_counts.append(visit_counts)\n\n def complete_episode(self, reward):\n num_states = len(self._current_episode_states)\n self.states += self._current_episode_states\n self.visit_counts += self._current_episode_visit_counts\n self.rewards += [reward for _ in range(num_states)]\n\n self._current_episode_states = []\n self._current_episode_visit_counts = []\n\nclass ZeroExperienceBuffer(object):\n def __init__(self, states, visit_counts, rewards):\n self.states = states\n self.visit_counts = visit_counts\n self.rewards = rewards\n\n def serialize(self, h5file):\n h5file.create_group('experience')\n h5file['experience'].create_dataset(\n 'states', data=self.states)\n h5file['experience'].create_dataset(\n 'visit_counts', data=self.visit_counts)\n h5file['experience'].create_dataset(\n 'rewards', data=self.rewards)\n\ndef combine_experience(collectors):\n combined_states = np.concatenate([np.array(c.states) for c in collectors])\n combined_visit_counts = np.concatenate([np.array(c.visit_counts) for c in collectors])\n combined_rewards = np.concatenate([np.array(c.rewards) for c in collectors])\n\n return ZeroExperienceBuffer(\n combined_states,\n combined_visit_counts,\n combined_rewards)\n\ndef load_experience(h5file):\n return ZeroExperienceBuffer(\n states=np.array(h5file['experience']['states']),\n visit_counts=np.array(h5file['experience']['visit_counts']),\n rewards=np.array(h5file['experience']['rewards']))", "# © 2020 지성. all rights reserved.\n# <[email protected]>\n# MIT License\n\nimport argparse\nimport h5py\n\nfrom tensorflow.keras.layers import ZeroPadding2D, Conv2D, Dense, Flatten, Input\nfrom tensorflow.keras.models import Model\n\nfrom dlgo import rl\nfrom dlgo import encoders\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--board-size', type=int, default=19)\n parser.add_argument('output_file')\n args = parser.parse_args()\n\n encoder = encoders.get_encoder_by_name('simple', args.board_size)\n\n board_input = Input(shape=encoder.shape(), name='board_input')\n\n conv1a = ZeroPadding2D((2, 2))(board_input)\n conv1b = Conv2D(64, (5, 5), activation='relu')(conv1a)\n\n conv2a = ZeroPadding2D((1, 1))(conv1b)\n conv2b = Conv2D(64, (3, 3), activation='relu')(conv2a)\n\n flat = Flatten()(conv2b)\n processed_board = Dense(512)(flat)\n\n policy_hidden_layer = Dense(\n 512,\n activation='relu'\n )(processed_board)\n policy_output = Dense(\n encoder.num_points(),\n activation='softmax'\n )(policy_hidden_layer)\n\n value_hidden_layer = Dense(\n 512,\n activation='relu'\n )(processed_board)\n value_output = Dense(\n 1,\n activation='tanh'\n )(value_hidden_layer)\n\n model = Model(\n inputs=board_input,\n outputs=[policy_output, value_output]\n )\n\n new_agent = rl.ACAgent(model, encoder)\n with h5py.File(args.output_file, 'w') as outf:\n new_agent.serialize(outf)\n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.array" ], [ "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.ZeroPadding2D", "tensorflow.keras.layers.Flatten" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
llxcn/conformer_Informer
[ "717cc9edf6a65dbef4ad53d14e2e2811c57fe24b" ]
[ "loss/dilate_loss.py" ]
[ "import torch\nfrom . import soft_dtw\nfrom . import path_soft_dtw \n\ndef dilate_loss(outputs, targets, alpha, gamma, device):\n\t# outputs, targets: shape (batch_size, N_output, 1)\n\tbatch_size, N_output = outputs.shape[0:2]\n\tloss_shape = 0\n\tsoftdtw_batch = soft_dtw.SoftDTWBatch.apply\n\tD = torch.zeros((batch_size, N_output,N_output )).to(device)\n\tfor k in range(batch_size):\n\t\tDk = soft_dtw.pairwise_distances(targets[k,:,:].view(-1,1),outputs[k,:,:].view(-1,1))\n\t\tD[k:k+1,:,:] = Dk \n\tloss_shape = softdtw_batch(D,gamma)\n\t\n\tpath_dtw = path_soft_dtw.PathDTWBatch.apply\n\tpath = path_dtw(D,gamma) \n\tOmega = soft_dtw.pairwise_distances(torch.range(1,N_output).view(N_output,1)).to(device)\n\tloss_temporal = torch.sum( path*Omega ) / (N_output*N_output) \n\tloss = alpha*loss_shape+ (1-alpha)*loss_temporal\n\treturn loss, loss_shape, loss_temporal" ]
[ [ "torch.range", "torch.sum", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ukaea/ALC_UQ
[ "a2747c94036b04f1279abb5683c6a225a878aea3" ]
[ "user_interface/python/container.py" ]
[ "# A simple container class with parents and children used to construct the DAKOTA input file.\n# Mulitple instances can be used to implement a tree which keeps track of the tiered structure \n# of the variables in the file. \n\n# Each line of the file is an instance of this class with the following member data:\n# KEY : The 'name' of the entry\n# ATTRIBUTE : An attribute for lines of the form 'key = attribute' or 'key attribute'\n# EQUALS : Indicates whether the line is 'key = attribute' or simply 'key attribute'\n# CHILDREN : A list of child container classes beneath this one\n# PARENT : The parent class above this one\n\nimport numpy as np\nfrom exceptions import *\n\nclass Container:\n\n def __init__(self,key,parent=None,attribute=None,equals=True):\n\n self.key = key\n self.attribute = attribute\n self.equals = equals\n self.parent = parent\n\n # Initialise list of children to an empty list\n self.children = []\n\n # Set as a child of the parent\n if parent is not None:\n parent.add_children( [self] )\n\n # Just adds existing objects to list of children of this object\n def add_children(self,children):\n \n if not ( isinstance( children, list ) or isinstance( children, Container ) ):\n raise ContainerError('Objects passed to add_children must be containers or lists of containers.')\n\n if not isinstance( children,list ):\n self.children.append(children)\n else:\n \n # Check every entry of list is a container\n type_arr = [ isinstance(x,Container) for x in children ]\n if not np.all(type_arr):\n raise ContainerError('All elements of list passed to add_children must be containers.')\n\n self.children = self.children + children\n\n # Return the instance with a given key from beneath this one\n # This will not work reiably if there are multiple instances \n # of the same key beneath the instance this is called from.\n def get(self,key):\n\n waiting = [self]\n\n while len(waiting) != 0:\n\n # Get current line from waiting list\n current = waiting.pop(-1)\n\n # Check if this is the desired instance\n if current.key == key:\n return current\n else:\n # Add children of current line to waiting list\n waiting = waiting + current.children\n\n return None\n\n # Add a new child beneath the object with name 'key' somewhere down the tree\n def add_child(self,key,child):\n\n if not isinstance( child, Container ):\n raise ContainerError('Objects passed to add_child must be containers.')\n\n instance = self.get(key)\n if instance is not None:\n child.parent = instance\n instance.children.append( child )\n else:\n raise ContainerError('Instance with key '+key+' does not exist.')\n\n # Find out how far down the tree this instance is\n def get_depth(self):\n\n current = self\n depth = 0\n\n while current.parent is not None:\n\n current = current.parent\n depth = depth + 1\n\n return depth\n\n # Return the attribute corresponding to a given key\n def get_attribute(self,key):\n\n instance = self.get(key)\n if instance is not None:\n return instance.attribute\n else:\n raise ContainerError('Instance with key '+key+' does not exist.')\n\n # Set attribute of an instance\n def set_attribute(self,key,attribute):\n\n instance = self.get(key)\n if instance is not None:\n instance.attribute = attribute\n else:\n raise ContainerError('Instance with key '+key+' does not exist.')\n\n def append_attribute(self,key,attribute):\n\n instance = self.get(key)\n if instance is None:\n raise ContainerError('Instance with key '+key+' does not exist.')\n\n # Check if attribute is already a list, if not make it one. \n if not isinstance( instance.attribute,list ):\n instance.attribute = [ instance.attribute ]\n\n if not isinstance( attribute,list ):\n instance.attribute.append( attribute )\n else:\n instance.attribute += attribute\n\n # Write the current instances line to the DAKOTA input file\n def write_line(self,filehandle):\n\n # Don't write the top level dakota instance\n if self.parent is None:\n return\n\n depth = self.get_depth()\n\n line = ' '*(depth-1) + self.key\n if self.attribute is not None:\n\n if self.equals:\n line = line + ' = '\n else:\n line = line + ' '\n\n if isinstance( self.attribute, list ):\n line = line + \" \".join( [ str(x) for x in self.attribute ] )\n else:\n line = line + str(self.attribute)\n\n line = line + '\\n'\n\n filehandle.write(str(line))\n\n # Loop over all children and write out lines\n def write_all(self,filehandle):\n\n waiting = [self]\n\n while len(waiting) != 0:\n\n # Get current line from waiting list\n current = waiting.pop(-1)\n\n # Write current line\n current.write_line(filehandle)\n\n # Add children of current line to waiting list\n waiting = waiting + current.children\n" ]
[ [ "numpy.all" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BrokenShell/LabsStarter
[ "04c11aa4d7149f38ee5597cab46ea3ed0408ccf3" ]
[ "model/builder.py" ]
[ "from sklearn import svm, datasets\nfrom joblib import dump\nfrom sklearn.model_selection import train_test_split\n\n\nX, y = datasets.load_iris(return_X_y=True)\nX_train, X_test, y_train, y_test = train_test_split(\n X, y,\n test_size=0.2,\n stratify=y,\n random_state=42,\n)\nmodel = svm.SVC(\n class_weight='balanced',\n probability=True,\n random_state=42,\n)\nmodel.fit(X_train, y_train)\ndump(model, '../project/app/model.joblib')\n\nprint(f\"Training Accuracy: {100 * model.score(X_train, y_train):.2f}%\")\nprint(f\"Validation Accuracy: {100 * model.score(X_test, y_test):.2f}%\")\n" ]
[ [ "sklearn.datasets.load_iris", "sklearn.model_selection.train_test_split", "sklearn.svm.SVC" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Peefy/PythonsWithVSCode
[ "711e710d2903e056ffe2ff22e279b86bd950a925" ]
[ "src/neurolab/chnn.py" ]
[ "\n# python python/chnn.py\n# python3 python/chnn.py\n\nimport numpy as np\nimport neurolab as nl\nimport pylab as pl\nimport matplotlib.pyplot as plt\n# num of input layer\nn = 9\n# num of output layer\nm = 3\n# num of hidden layer\nH = 3\n# input and output data range min\ndata_min = 0\n# input and output data range max\ndata_max = 1\n# input and output data range \ninput_range = [data_min, data_max]\n# num of input layer data range array \ninput_range_array = np.array(input_range)\n\n# Create train samples\n# input = np.random.uniform(-0.5, 0.5, (10, 9))\n# target = (input[:, 0] + input[:, 1] + input[:, 2] + input[:, 4] + input[:, 5] + \n# input[:, 6] + input[:, 7] + input[:, 8]).reshape(10, 1)\n# Create network with 9 inputs, 3 neurons in input layer and 3 in output layer\nnet = nl.net.newhop([[0, 1], [0, 1], [0, 1], [0, 1], [0, 1]\n , [0, 1], [0, 1], [0, 1], [0, 1]])\n# print num of input layer\nprint(net.ci)\n\nprint(net.co)\n# err = net.train(input, target, show=15)\nx = np.arange(0, 5, 0.1)\ny = np.sin(x)\nplt.scatter(x, y, alpha=0.5)\nplt.show()\n# net.sim([[0.2, 0.1, 0.1, 0.2, 0.3, 0.4, 0.4, 0.34, 0.2]]) " ]
[ [ "matplotlib.pyplot.scatter", "numpy.arange", "numpy.sin", "numpy.array", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gbdrt/lale
[ "291f824a6b96f088e787979ca768f50d7758424e", "291f824a6b96f088e787979ca768f50d7758424e", "291f824a6b96f088e787979ca768f50d7758424e", "291f824a6b96f088e787979ca768f50d7758424e", "291f824a6b96f088e787979ca768f50d7758424e" ]
[ "lale/lib/autogen/max_abs_scaler.py", "lale/lib/sklearn/gradient_boosting_classifier.py", "test/test_type_checking.py", "lale/lib/sklearn/linear_regression.py", "lale/lib/autogen/random_forest_regressor.py" ]
[ "from numpy import inf, nan\nfrom sklearn.preprocessing import MaxAbsScaler as Op\n\nfrom lale.docstrings import set_docstrings\nfrom lale.operators import make_operator\n\n\nclass MaxAbsScalerImpl:\n def __init__(self, **hyperparams):\n self._hyperparams = hyperparams\n self._wrapped_model = Op(**self._hyperparams)\n\n def fit(self, X, y=None):\n if y is not None:\n self._wrapped_model.fit(X, y)\n else:\n self._wrapped_model.fit(X)\n return self\n\n def transform(self, X):\n return self._wrapped_model.transform(X)\n\n\n_hyperparams_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"inherited docstring for MaxAbsScaler Scale each feature by its maximum absolute value.\",\n \"allOf\": [\n {\n \"type\": \"object\",\n \"required\": [\"copy\"],\n \"relevantToOptimizer\": [\"copy\"],\n \"additionalProperties\": False,\n \"properties\": {\n \"copy\": {\n \"XXX TODO XXX\": \"boolean, optional, default is True\",\n \"description\": \"Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array).\",\n \"type\": \"boolean\",\n \"default\": True,\n }\n },\n }\n ],\n}\n_input_fit_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"Compute the maximum absolute value to be used for later scaling.\",\n \"type\": \"object\",\n \"required\": [\"X\"],\n \"properties\": {\n \"X\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n \"description\": \"The data used to compute the per-feature minimum and maximum used for later scaling along the features axis.\",\n }\n },\n}\n_input_transform_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"Scale the data\",\n \"type\": \"object\",\n \"required\": [\"X\"],\n \"properties\": {\n \"X\": {\n \"type\": \"array\",\n \"items\": {\"laleType\": \"Any\", \"XXX TODO XXX\": \"item type\"},\n \"XXX TODO XXX\": \"{array-like, sparse matrix}\",\n \"description\": \"The data that should be scaled.\",\n }\n },\n}\n_output_transform_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"Scale the data\",\n \"laleType\": \"Any\",\n}\n_combined_schemas = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"Combined schema for expected data and hyperparameters.\",\n \"documentation_url\": \"https://scikit-learn.org/0.20/modules/generated/sklearn.preprocessing.MaxAbsScaler#sklearn-preprocessing-maxabsscaler\",\n \"import_from\": \"sklearn.preprocessing\",\n \"type\": \"object\",\n \"tags\": {\"pre\": [], \"op\": [\"transformer\"], \"post\": []},\n \"properties\": {\n \"hyperparams\": _hyperparams_schema,\n \"input_fit\": _input_fit_schema,\n \"input_transform\": _input_transform_schema,\n \"output_transform\": _output_transform_schema,\n },\n}\nset_docstrings(MaxAbsScalerImpl, _combined_schemas)\nMaxAbsScaler = make_operator(MaxAbsScalerImpl, _combined_schemas)\n", "# Copyright 2019 IBM Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sklearn\nimport sklearn.ensemble\n\nimport lale.docstrings\nimport lale.operators\n\n\nclass GradientBoostingClassifierImpl:\n def __init__(self, **hyperparams):\n self._hyperparams = hyperparams\n self._wrapped_model = sklearn.ensemble.GradientBoostingClassifier(\n **self._hyperparams\n )\n\n def fit(self, X, y, **fit_params):\n self._wrapped_model.fit(X, y, **fit_params)\n return self\n\n def predict(self, X):\n return self._wrapped_model.predict(X)\n\n def predict_proba(self, X):\n return self._wrapped_model.predict_proba(X)\n\n def decision_function(self, X):\n return self._wrapped_model.decision_function(X)\n\n\n_hyperparams_schema = {\n \"description\": \"Gradient Boosting for classification.\",\n \"allOf\": [\n {\n \"type\": \"object\",\n \"required\": [\"init\", \"presort\"],\n \"relevantToOptimizer\": [\n \"loss\",\n \"n_estimators\",\n \"min_samples_split\",\n \"min_samples_leaf\",\n \"max_depth\",\n \"max_features\",\n \"presort\",\n ],\n \"additionalProperties\": False,\n \"properties\": {\n \"loss\": {\n \"enum\": [\"deviance\", \"exponential\"],\n \"default\": \"deviance\",\n \"description\": \"loss function to be optimized. 'deviance' refers to\",\n },\n \"learning_rate\": {\n \"type\": \"number\",\n \"minimumForOptimizer\": 0.01,\n \"maximumForOptimizer\": 1.0,\n \"distribution\": \"loguniform\",\n \"default\": 0.1,\n \"description\": \"learning rate shrinks the contribution of each tree by `learning_rate`.\",\n },\n \"n_estimators\": {\n \"type\": \"integer\",\n \"minimumForOptimizer\": 10,\n \"maximumForOptimizer\": 100,\n \"distribution\": \"uniform\",\n \"default\": 100,\n \"description\": \"The number of boosting stages to perform. Gradient boosting\",\n },\n \"subsample\": {\n \"type\": \"number\",\n \"minimumForOptimizer\": 0.01,\n \"maximumForOptimizer\": 1.0,\n \"distribution\": \"uniform\",\n \"default\": 1.0,\n \"description\": \"The fraction of samples to be used for fitting the individual base\",\n },\n \"criterion\": {\n \"enum\": [\"friedman_mse\", \"mse\", \"mae\"],\n \"default\": \"friedman_mse\",\n \"description\": \"The function to measure the quality of a split. Supported criteria\",\n },\n \"min_samples_split\": {\n \"anyOf\": [\n {\n \"type\": \"integer\",\n \"minimumForOptimizer\": 2,\n \"maximumForOptimizer\": 20,\n \"distribution\": \"uniform\",\n },\n {\n \"type\": \"number\",\n \"minimumForOptimizer\": 0.01,\n \"maximumForOptimizer\": 0.5,\n \"default\": 0.05,\n },\n ],\n \"default\": 2,\n \"description\": \"The minimum number of samples required to split an internal node:\",\n },\n \"min_samples_leaf\": {\n \"anyOf\": [\n {\n \"type\": \"integer\",\n \"minimumForOptimizer\": 1,\n \"maximumForOptimizer\": 20,\n \"distribution\": \"uniform\",\n },\n {\n \"type\": \"number\",\n \"minimumForOptimizer\": 0.01,\n \"maximumForOptimizer\": 0.5,\n \"default\": 0.05,\n },\n ],\n \"default\": 1,\n \"description\": \"The minimum number of samples required to be at a leaf node.\",\n },\n \"min_weight_fraction_leaf\": {\n \"type\": \"number\",\n \"default\": 0.0,\n \"description\": \"The minimum weighted fraction of the sum total of weights (of all\",\n },\n \"max_depth\": {\n \"type\": \"integer\",\n \"minimumForOptimizer\": 3,\n \"maximumForOptimizer\": 5,\n \"default\": 3,\n \"description\": \"maximum depth of the individual regression estimators. The maximum\",\n },\n \"min_impurity_decrease\": {\n \"type\": \"number\",\n \"default\": 0.0,\n \"description\": \"A node will be split if this split induces a decrease of the impurity\",\n },\n \"min_impurity_split\": {\n \"anyOf\": [{\"type\": \"number\"}, {\"enum\": [None]}],\n \"default\": None,\n \"description\": \"Threshold for early stopping in tree growth. A node will split\",\n },\n \"init\": {\n \"anyOf\": [{\"type\": \"object\"}, {\"enum\": [\"zero\", None]}],\n \"default\": None,\n \"description\": \"An estimator object that is used to compute the initial\",\n },\n \"random_state\": {\n \"anyOf\": [\n {\"type\": \"integer\"},\n {\"laleType\": \"numpy.random.RandomState\"},\n {\"enum\": [None]},\n ],\n \"default\": None,\n \"description\": \"If int, random_state is the seed used by the random number generator;\",\n },\n \"max_features\": {\n \"anyOf\": [\n {\"type\": \"integer\", \"minimum\": 1, \"forOptimizer\": False},\n {\n \"type\": \"number\",\n \"minimum\": 0.0,\n \"exclusiveMinimum\": True,\n \"maximum\": 1.0,\n \"exclusiveMaximum\": True,\n \"minimumForOptimizer\": 0.01,\n \"default\": 0.5,\n \"distribution\": \"uniform\",\n },\n {\"enum\": [\"auto\", \"sqrt\", \"log2\", None]},\n ],\n \"default\": None,\n \"description\": \"The number of features to consider when looking for the best split.\",\n },\n \"verbose\": {\n \"type\": \"integer\",\n \"default\": 0,\n \"description\": \"Enable verbose output. If 1 then it prints progress and performance\",\n },\n \"max_leaf_nodes\": {\n \"anyOf\": [{\"type\": \"integer\"}, {\"enum\": [None]}],\n \"default\": None,\n \"description\": \"Grow trees with ``max_leaf_nodes`` in best-first fashion.\",\n },\n \"warm_start\": {\n \"type\": \"boolean\",\n \"default\": False,\n \"description\": \"When set to ``True``, reuse the solution of the previous call to fit\",\n },\n \"presort\": {\n \"anyOf\": [{\"type\": \"boolean\"}, {\"enum\": [\"auto\"]}],\n \"default\": \"auto\",\n \"description\": \"Whether to presort the data to speed up the finding of best splits in\",\n },\n \"validation_fraction\": {\n \"type\": \"number\",\n \"default\": 0.1,\n \"description\": \"The proportion of training data to set aside as validation set for\",\n },\n \"n_iter_no_change\": {\n \"anyOf\": [\n {\n \"type\": \"integer\",\n \"minimumForOptimizer\": 5,\n \"maximumForOptimizer\": 10,\n },\n {\"enum\": [None]},\n ],\n \"default\": None,\n \"description\": \"``n_iter_no_change`` is used to decide if early stopping will be used\",\n },\n \"tol\": {\n \"type\": \"number\",\n \"minimumForOptimizer\": 1e-08,\n \"maximumForOptimizer\": 0.01,\n \"distribution\": \"loguniform\",\n \"default\": 0.0001,\n \"description\": \"Tolerance for the early stopping. When the loss is not improving\",\n },\n },\n }\n ],\n}\n\n_input_fit_schema = {\n \"description\": \"Fit the gradient boosting model.\",\n \"type\": \"object\",\n \"required\": [\"X\", \"y\"],\n \"properties\": {\n \"X\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"},},\n \"description\": \"The input samples. Internally, it will be converted to\",\n },\n \"y\": {\n \"anyOf\": [\n {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n {\"type\": \"array\", \"items\": {\"type\": \"string\"}},\n {\"type\": \"array\", \"items\": {\"type\": \"boolean\"}},\n ],\n \"description\": \"Target values (strings or integers in classification, real numbers\",\n },\n \"sample_weight\": {\n \"anyOf\": [\n {\"type\": \"array\", \"items\": {\"type\": \"number\"},},\n {\"enum\": [None]},\n ],\n \"default\": None,\n \"description\": \"Sample weights. If None, then samples are equally weighted. Splits\",\n },\n \"monitor\": {\n \"anyOf\": [{\"laleType\": \"callable\"}, {\"enum\": [None]}],\n \"default\": None,\n \"description\": \"The monitor is called after each iteration with the current the current iteration, a reference to the estimator and the local variables of _fit_stages as keyword arguments callable(i, self, locals()).\",\n },\n },\n}\n_input_predict_schema = {\n \"description\": \"Predict class for X.\",\n \"type\": \"object\",\n \"properties\": {\n \"X\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"},},\n \"description\": \"The input samples. Internally, it will be converted to\",\n },\n },\n}\n_output_predict_schema = {\n \"description\": \"The predicted values.\",\n \"anyOf\": [\n {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n {\"type\": \"array\", \"items\": {\"type\": \"string\"}},\n {\"type\": \"array\", \"items\": {\"type\": \"boolean\"}},\n ],\n}\n\n_input_predict_proba_schema = {\n \"description\": \"Predict class probabilities for X.\",\n \"type\": \"object\",\n \"properties\": {\n \"X\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"},},\n \"description\": \"The input samples. Internally, it will be converted to\",\n },\n },\n}\n_output_predict_proba_schema = {\n \"description\": \"The class probabilities of the input samples. The order of the\",\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"},},\n}\n\n_input_decision_function_schema = {\n \"type\": \"object\",\n \"required\": [\"X\"],\n \"additionalProperties\": False,\n \"properties\": {\n \"X\": {\n \"description\": \"Features; the outer array is over samples.\",\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n }\n },\n}\n\n_output_decision_function_schema = {\n \"description\": \"Confidence scores for samples for each class in the model.\",\n \"anyOf\": [\n {\n \"description\": \"In the multi-way case, score per (sample, class) combination.\",\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n },\n {\n \"description\": \"In the binary case, score for `self._classes[1]`.\",\n \"type\": \"array\",\n \"items\": {\"type\": \"number\"},\n },\n ],\n}\n\n_combined_schemas = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"\"\"`Gradient boosting classifier`_ random forest from scikit-learn.\n\n.. _`Gradient boosting classifier`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html\n\"\"\",\n \"documentation_url\": \"https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.gradient_boosting_classifier.html\",\n \"import_from\": \"sklearn.ensemble\",\n \"type\": \"object\",\n \"tags\": {\"pre\": [], \"op\": [\"estimator\", \"classifier\"], \"post\": []},\n \"properties\": {\n \"hyperparams\": _hyperparams_schema,\n \"input_fit\": _input_fit_schema,\n \"input_predict\": _input_predict_schema,\n \"output_predict\": _output_predict_schema,\n \"input_predict_proba\": _input_predict_proba_schema,\n \"output_predict_proba\": _output_predict_proba_schema,\n \"input_decision_function\": _input_decision_function_schema,\n \"output_decision_function\": _output_decision_function_schema,\n },\n}\n\nGradientBoostingClassifier: lale.operators.PlannedIndividualOp\nGradientBoostingClassifier = lale.operators.make_operator(\n GradientBoostingClassifierImpl, _combined_schemas\n)\n\nif sklearn.__version__ >= \"0.22\":\n # old: https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html\n # new: https://scikit-learn.org/0.22/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html\n from lale.schemas import AnyOf, Bool, Enum, Float\n\n GradientBoostingClassifier = GradientBoostingClassifier.customize_schema(\n presort=AnyOf(\n types=[Bool(), Enum([\"deprecated\", \"auto\"])],\n desc=\"This parameter is deprecated and will be removed in v0.24.\",\n default=\"deprecated\",\n ),\n ccp_alpha=Float(\n desc=\"Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed.\",\n default=0.0,\n forOptimizer=False,\n min=0.0,\n maxForOptimizer=0.1,\n ),\n )\n\nif sklearn.__version__ >= \"0.24\":\n # old: https://scikit-learn.org/0.22/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html\n # new: https://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html\n from lale.schemas import JSON\n\n GradientBoostingClassifier = GradientBoostingClassifier.customize_schema(\n presort=None,\n criterion=JSON(\n {\n \"description\": \"Function to measure the quality of a split.\",\n \"anyOf\": [\n {\"enum\": [\"mse\", \"friedman_mse\"]},\n {\n \"description\": \"Deprecated since version 0.24.\",\n \"enum\": [\"mae\"],\n \"forOptimizer\": False,\n },\n ],\n \"default\": \"friedman_mse\",\n }\n ),\n )\n\nlale.docstrings.set_docstrings(\n GradientBoostingClassifierImpl, GradientBoostingClassifier._schemas\n)\n", "# Copyright 2019 IBM Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport jsonschema\n\nimport lale.lib.lale\nfrom lale.lib.lale import ConcatFeatures, IdentityWrapper, NoOp\nfrom lale.lib.sklearn import NMF, LogisticRegression, TfidfVectorizer\nfrom lale.settings import (\n disable_data_schema_validation,\n disable_hyperparams_schema_validation,\n set_disable_data_schema_validation,\n set_disable_hyperparams_schema_validation,\n)\n\n\nclass TestDatasetSchemas(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n from sklearn.datasets import load_iris\n\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(False)\n irisArr = load_iris()\n cls._irisArr = {\"X\": irisArr.data, \"y\": irisArr.target}\n from lale.datasets import sklearn_to_pandas\n\n (train_X, train_y), (test_X, test_y) = sklearn_to_pandas.load_iris_df()\n cls._irisDf = {\"X\": train_X, \"y\": train_y}\n (train_X, train_y), (test_X, test_y) = sklearn_to_pandas.digits_df()\n cls._digits = {\"X\": train_X, \"y\": train_y}\n (train_X, train_y), (test_X, test_y) = sklearn_to_pandas.california_housing_df()\n cls._housing = {\"X\": train_X, \"y\": train_y}\n from lale.datasets import openml\n\n (train_X, train_y), (test_X, test_y) = openml.fetch(\n \"credit-g\", \"classification\", preprocess=False\n )\n cls._creditG = {\"X\": train_X, \"y\": train_y}\n from lale.datasets import load_movie_review\n\n train_X, train_y = load_movie_review()\n cls._movies = {\"X\": train_X, \"y\": train_y}\n from lale.datasets.uci.uci_datasets import fetch_drugscom\n\n train_X, train_y, test_X, test_y = fetch_drugscom()\n cls._drugRev = {\"X\": train_X, \"y\": train_y}\n set_disable_data_schema_validation(existing_flag)\n\n @classmethod\n def tearDownClass(cls):\n cls._irisArr = None\n cls._irisDf = None\n cls._digits = None\n cls._housing = None\n cls._creditG = None\n cls._movies = None\n cls._drugRev = None\n\n def test_datasets_with_own_schemas(self):\n from lale.datasets.data_schemas import to_schema\n from lale.type_checking import validate_schema\n\n for name in [\n \"irisArr\",\n \"irisDf\",\n \"digits\",\n \"housing\",\n \"creditG\",\n \"movies\",\n \"drugRev\",\n ]:\n dataset = getattr(self, f\"_{name}\")\n data_X, data_y = dataset[\"X\"], dataset[\"y\"]\n schema_X, schema_y = to_schema(data_X), to_schema(data_y)\n validate_schema(data_X, schema_X, subsample_array=False)\n validate_schema(data_y, schema_y, subsample_array=False)\n\n def test_ndarray_to_schema(self):\n from lale.datasets.data_schemas import to_schema\n from lale.type_checking import validate_schema\n\n all_X, all_y = self._irisArr[\"X\"], self._irisArr[\"y\"]\n assert not hasattr(all_X, \"json_schema\")\n all_X_schema = to_schema(all_X)\n validate_schema(all_X, all_X_schema, subsample_array=False)\n assert not hasattr(all_y, \"json_schema\")\n all_y_schema = to_schema(all_y)\n validate_schema(all_y, all_y_schema, subsample_array=False)\n all_X_expected = {\n \"type\": \"array\",\n \"minItems\": 150,\n \"maxItems\": 150,\n \"items\": {\n \"type\": \"array\",\n \"minItems\": 4,\n \"maxItems\": 4,\n \"items\": {\"type\": \"number\"},\n },\n }\n all_y_expected = {\n \"type\": \"array\",\n \"minItems\": 150,\n \"maxItems\": 150,\n \"items\": {\"type\": \"integer\"},\n }\n self.maxDiff = None\n self.assertEqual(all_X_schema, all_X_expected)\n self.assertEqual(all_y_schema, all_y_expected)\n\n def test_pandas_to_schema(self):\n import pandas as pd\n\n from lale.datasets.data_schemas import to_schema\n from lale.type_checking import validate_schema\n\n train_X, train_y = self._irisDf[\"X\"], self._irisDf[\"y\"]\n assert isinstance(train_X, pd.DataFrame)\n assert not hasattr(train_X, \"json_schema\")\n train_X_schema = to_schema(train_X)\n validate_schema(train_X, train_X_schema, subsample_array=False)\n assert isinstance(train_y, pd.Series)\n assert not hasattr(train_y, \"json_schema\")\n train_y_schema = to_schema(train_y)\n validate_schema(train_y, train_y_schema, subsample_array=False)\n train_X_expected = {\n \"type\": \"array\",\n \"minItems\": 120,\n \"maxItems\": 120,\n \"items\": {\n \"type\": \"array\",\n \"minItems\": 4,\n \"maxItems\": 4,\n \"items\": [\n {\"description\": \"sepal length (cm)\", \"type\": \"number\"},\n {\"description\": \"sepal width (cm)\", \"type\": \"number\"},\n {\"description\": \"petal length (cm)\", \"type\": \"number\"},\n {\"description\": \"petal width (cm)\", \"type\": \"number\"},\n ],\n },\n }\n train_y_expected = {\n \"type\": \"array\",\n \"minItems\": 120,\n \"maxItems\": 120,\n \"items\": {\"description\": \"target\", \"type\": \"integer\"},\n }\n self.maxDiff = None\n self.assertEqual(train_X_schema, train_X_expected)\n self.assertEqual(train_y_schema, train_y_expected)\n\n def test_arff_to_schema(self):\n from lale.datasets.data_schemas import to_schema\n from lale.type_checking import validate_schema\n\n train_X, train_y = self._creditG[\"X\"], self._creditG[\"y\"]\n assert hasattr(train_X, \"json_schema\")\n train_X_schema = to_schema(train_X)\n validate_schema(train_X, train_X_schema, subsample_array=False)\n assert hasattr(train_y, \"json_schema\")\n train_y_schema = to_schema(train_y)\n validate_schema(train_y, train_y_schema, subsample_array=False)\n train_X_expected = {\n \"type\": \"array\",\n \"minItems\": 670,\n \"maxItems\": 670,\n \"items\": {\n \"type\": \"array\",\n \"minItems\": 20,\n \"maxItems\": 20,\n \"items\": [\n {\n \"description\": \"checking_status\",\n \"enum\": [\"<0\", \"0<=X<200\", \">=200\", \"no checking\"],\n },\n {\"description\": \"duration\", \"type\": \"number\"},\n {\n \"description\": \"credit_history\",\n \"enum\": [\n \"no credits/all paid\",\n \"all paid\",\n \"existing paid\",\n \"delayed previously\",\n \"critical/other existing credit\",\n ],\n },\n {\n \"description\": \"purpose\",\n \"enum\": [\n \"new car\",\n \"used car\",\n \"furniture/equipment\",\n \"radio/tv\",\n \"domestic appliance\",\n \"repairs\",\n \"education\",\n \"vacation\",\n \"retraining\",\n \"business\",\n \"other\",\n ],\n },\n {\"description\": \"credit_amount\", \"type\": \"number\"},\n {\n \"description\": \"savings_status\",\n \"enum\": [\n \"<100\",\n \"100<=X<500\",\n \"500<=X<1000\",\n \">=1000\",\n \"no known savings\",\n ],\n },\n {\n \"description\": \"employment\",\n \"enum\": [\"unemployed\", \"<1\", \"1<=X<4\", \"4<=X<7\", \">=7\"],\n },\n {\"description\": \"installment_commitment\", \"type\": \"number\"},\n {\n \"description\": \"personal_status\",\n \"enum\": [\n \"male div/sep\",\n \"female div/dep/mar\",\n \"male single\",\n \"male mar/wid\",\n \"female single\",\n ],\n },\n {\n \"description\": \"other_parties\",\n \"enum\": [\"none\", \"co applicant\", \"guarantor\"],\n },\n {\"description\": \"residence_since\", \"type\": \"number\"},\n {\n \"description\": \"property_magnitude\",\n \"enum\": [\n \"real estate\",\n \"life insurance\",\n \"car\",\n \"no known property\",\n ],\n },\n {\"description\": \"age\", \"type\": \"number\"},\n {\n \"description\": \"other_payment_plans\",\n \"enum\": [\"bank\", \"stores\", \"none\"],\n },\n {\"description\": \"housing\", \"enum\": [\"rent\", \"own\", \"for free\"]},\n {\"description\": \"existing_credits\", \"type\": \"number\"},\n {\n \"description\": \"job\",\n \"enum\": [\n \"unemp/unskilled non res\",\n \"unskilled resident\",\n \"skilled\",\n \"high qualif/self emp/mgmt\",\n ],\n },\n {\"description\": \"num_dependents\", \"type\": \"number\"},\n {\"description\": \"own_telephone\", \"enum\": [\"none\", \"yes\"]},\n {\"description\": \"foreign_worker\", \"enum\": [\"yes\", \"no\"]},\n ],\n },\n }\n train_y_expected = {\n \"type\": \"array\",\n \"minItems\": 670,\n \"maxItems\": 670,\n \"items\": {\"description\": \"class\", \"enum\": [\"good\", \"bad\"]},\n }\n self.maxDiff = None\n self.assertEqual(train_X_schema, train_X_expected)\n self.assertEqual(train_y_schema, train_y_expected)\n\n def test_keep_numbers(self):\n from lale.datasets.data_schemas import to_schema\n from lale.lib.lale import Project\n\n train_X = self._creditG[\"X\"]\n trainable = Project(columns={\"type\": \"number\"})\n trained = trainable.fit(train_X)\n transformed = trained.transform(train_X)\n transformed_schema = to_schema(transformed)\n transformed_expected = {\n \"type\": \"array\",\n \"minItems\": 670,\n \"maxItems\": 670,\n \"items\": {\n \"type\": \"array\",\n \"minItems\": 7,\n \"maxItems\": 7,\n \"items\": [\n {\"description\": \"duration\", \"type\": \"number\"},\n {\"description\": \"credit_amount\", \"type\": \"number\"},\n {\"description\": \"installment_commitment\", \"type\": \"number\"},\n {\"description\": \"residence_since\", \"type\": \"number\"},\n {\"description\": \"age\", \"type\": \"number\"},\n {\"description\": \"existing_credits\", \"type\": \"number\"},\n {\"description\": \"num_dependents\", \"type\": \"number\"},\n ],\n },\n }\n self.maxDiff = None\n self.assertEqual(transformed_schema, transformed_expected)\n\n def test_keep_non_numbers(self):\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(False)\n\n from lale.datasets.data_schemas import to_schema\n from lale.lib.lale import Project\n\n train_X = self._creditG[\"X\"]\n trainable = Project(columns={\"not\": {\"type\": \"number\"}})\n trained = trainable.fit(train_X)\n transformed = trained.transform(train_X)\n transformed_schema = to_schema(transformed)\n transformed_expected = {\n \"type\": \"array\",\n \"minItems\": 670,\n \"maxItems\": 670,\n \"items\": {\n \"type\": \"array\",\n \"minItems\": 13,\n \"maxItems\": 13,\n \"items\": [\n {\n \"description\": \"checking_status\",\n \"enum\": [\"<0\", \"0<=X<200\", \">=200\", \"no checking\"],\n },\n {\n \"description\": \"credit_history\",\n \"enum\": [\n \"no credits/all paid\",\n \"all paid\",\n \"existing paid\",\n \"delayed previously\",\n \"critical/other existing credit\",\n ],\n },\n {\n \"description\": \"purpose\",\n \"enum\": [\n \"new car\",\n \"used car\",\n \"furniture/equipment\",\n \"radio/tv\",\n \"domestic appliance\",\n \"repairs\",\n \"education\",\n \"vacation\",\n \"retraining\",\n \"business\",\n \"other\",\n ],\n },\n {\n \"description\": \"savings_status\",\n \"enum\": [\n \"<100\",\n \"100<=X<500\",\n \"500<=X<1000\",\n \">=1000\",\n \"no known savings\",\n ],\n },\n {\n \"description\": \"employment\",\n \"enum\": [\"unemployed\", \"<1\", \"1<=X<4\", \"4<=X<7\", \">=7\"],\n },\n {\n \"description\": \"personal_status\",\n \"enum\": [\n \"male div/sep\",\n \"female div/dep/mar\",\n \"male single\",\n \"male mar/wid\",\n \"female single\",\n ],\n },\n {\n \"description\": \"other_parties\",\n \"enum\": [\"none\", \"co applicant\", \"guarantor\"],\n },\n {\n \"description\": \"property_magnitude\",\n \"enum\": [\n \"real estate\",\n \"life insurance\",\n \"car\",\n \"no known property\",\n ],\n },\n {\n \"description\": \"other_payment_plans\",\n \"enum\": [\"bank\", \"stores\", \"none\"],\n },\n {\"description\": \"housing\", \"enum\": [\"rent\", \"own\", \"for free\"]},\n {\n \"description\": \"job\",\n \"enum\": [\n \"unemp/unskilled non res\",\n \"unskilled resident\",\n \"skilled\",\n \"high qualif/self emp/mgmt\",\n ],\n },\n {\"description\": \"own_telephone\", \"enum\": [\"none\", \"yes\"]},\n {\"description\": \"foreign_worker\", \"enum\": [\"yes\", \"no\"]},\n ],\n },\n }\n self.maxDiff = None\n self.assertEqual(transformed_schema, transformed_expected)\n set_disable_data_schema_validation(existing_flag)\n\n def test_input_schema_fit(self):\n self.maxDiff = None\n self.assertEqual(\n LogisticRegression.input_schema_fit(),\n LogisticRegression.get_schema(\"input_fit\"),\n )\n self.assertEqual(\n (NMF >> LogisticRegression).input_schema_fit(), NMF.get_schema(\"input_fit\")\n )\n self.assertEqual(\n IdentityWrapper(op=LogisticRegression).input_schema_fit(),\n LogisticRegression.get_schema(\"input_fit\"),\n )\n actual = (TfidfVectorizer | NMF).input_schema_fit()\n expected = {\n \"anyOf\": [\n {\n \"type\": \"object\",\n \"required\": [\"X\"],\n \"additionalProperties\": False,\n \"properties\": {\n \"X\": {\n \"anyOf\": [\n {\"type\": \"array\", \"items\": {\"type\": \"string\"}},\n {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"array\",\n \"minItems\": 1,\n \"maxItems\": 1,\n \"items\": {\"type\": \"string\"},\n },\n },\n ]\n },\n \"y\": {},\n },\n },\n {\n \"type\": \"object\",\n \"required\": [\"X\"],\n \"additionalProperties\": False,\n \"properties\": {\n \"X\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"number\", \"minimum\": 0.0},\n },\n },\n \"y\": {},\n },\n },\n ]\n }\n self.assertEqual(actual, expected)\n\n def test_transform_schema_NoOp(self):\n from lale.datasets.data_schemas import to_schema\n\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(False)\n\n for ds in [\n self._irisArr,\n self._irisDf,\n self._digits,\n self._housing,\n self._creditG,\n self._movies,\n self._drugRev,\n ]:\n s_input = to_schema(ds[\"X\"])\n s_output = NoOp.transform_schema(s_input)\n self.assertIs(s_input, s_output)\n set_disable_data_schema_validation(existing_flag)\n\n def test_transform_schema_pipeline(self):\n from lale.datasets.data_schemas import to_schema\n\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(False)\n pipeline = NMF >> LogisticRegression\n input_schema = to_schema(self._digits[\"X\"])\n transformed_schema = pipeline.transform_schema(input_schema)\n transformed_expected = {\n \"description\": \"Probability of the sample for each class in the model.\",\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n }\n self.maxDiff = None\n self.assertEqual(transformed_schema, transformed_expected)\n set_disable_data_schema_validation(existing_flag)\n\n def test_transform_schema_choice(self):\n from lale.datasets.data_schemas import to_schema\n\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(False)\n\n choice = NMF | LogisticRegression\n input_schema = to_schema(self._digits[\"X\"])\n transformed_schema = choice.transform_schema(input_schema)\n transformed_expected = {\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n }\n self.maxDiff = None\n self.assertEqual(transformed_schema, transformed_expected)\n set_disable_data_schema_validation(existing_flag)\n\n def test_transform_schema_higher_order(self):\n from lale.datasets.data_schemas import to_schema\n\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(False)\n\n inner = LogisticRegression\n outer = IdentityWrapper(op=LogisticRegression)\n input_schema = to_schema(self._digits[\"X\"])\n transformed_inner = inner.transform_schema(input_schema)\n transformed_outer = outer.transform_schema(input_schema)\n self.maxDiff = None\n self.assertEqual(transformed_inner, transformed_outer)\n set_disable_data_schema_validation(existing_flag)\n\n def test_transform_schema_Concat_irisArr(self):\n from lale.datasets.data_schemas import to_schema\n\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(False)\n\n data_X, data_y = self._irisArr[\"X\"], self._irisArr[\"y\"]\n s_in_X, s_in_y = to_schema(data_X), to_schema(data_y)\n\n def check(s_actual, n_expected, s_expected):\n assert s_actual[\"items\"][\"minItems\"] == n_expected, str(s_actual)\n assert s_actual[\"items\"][\"maxItems\"] == n_expected, str(s_actual)\n assert s_actual[\"items\"][\"items\"] == s_expected, str(s_actual)\n\n s_out_X = ConcatFeatures.transform_schema({\"items\": [s_in_X]})\n check(s_out_X, 4, {\"type\": \"number\"})\n s_out_y = ConcatFeatures.transform_schema({\"items\": [s_in_y]})\n check(s_out_y, 1, {\"type\": \"integer\"})\n s_out_XX = ConcatFeatures.transform_schema({\"items\": [s_in_X, s_in_X]})\n check(s_out_XX, 8, {\"type\": \"number\"})\n s_out_yy = ConcatFeatures.transform_schema({\"items\": [s_in_y, s_in_y]})\n check(s_out_yy, 2, {\"type\": \"integer\"})\n s_out_Xy = ConcatFeatures.transform_schema({\"items\": [s_in_X, s_in_y]})\n check(s_out_Xy, 5, {\"type\": \"number\"})\n s_out_XXX = ConcatFeatures.transform_schema({\"items\": [s_in_X, s_in_X, s_in_X]})\n check(s_out_XXX, 12, {\"type\": \"number\"})\n set_disable_data_schema_validation(existing_flag)\n\n def test_transform_schema_Concat_irisDf(self):\n from lale.datasets.data_schemas import to_schema\n\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(False)\n\n data_X, data_y = self._irisDf[\"X\"], self._irisDf[\"y\"]\n s_in_X, s_in_y = to_schema(data_X), to_schema(data_y)\n\n def check(s_actual, n_expected, s_expected):\n assert s_actual[\"items\"][\"minItems\"] == n_expected, str(s_actual)\n assert s_actual[\"items\"][\"maxItems\"] == n_expected, str(s_actual)\n assert s_actual[\"items\"][\"items\"] == s_expected, str(s_actual)\n\n s_out_X = ConcatFeatures.transform_schema({\"items\": [s_in_X]})\n check(s_out_X, 4, {\"type\": \"number\"})\n s_out_y = ConcatFeatures.transform_schema({\"items\": [s_in_y]})\n check(s_out_y, 1, {\"description\": \"target\", \"type\": \"integer\"})\n s_out_XX = ConcatFeatures.transform_schema({\"items\": [s_in_X, s_in_X]})\n check(s_out_XX, 8, {\"type\": \"number\"})\n s_out_yy = ConcatFeatures.transform_schema({\"items\": [s_in_y, s_in_y]})\n check(s_out_yy, 2, {\"type\": \"integer\"})\n s_out_Xy = ConcatFeatures.transform_schema({\"items\": [s_in_X, s_in_y]})\n check(s_out_Xy, 5, {\"type\": \"number\"})\n s_out_XXX = ConcatFeatures.transform_schema({\"items\": [s_in_X, s_in_X, s_in_X]})\n check(s_out_XXX, 12, {\"type\": \"number\"})\n set_disable_data_schema_validation(existing_flag)\n\n def test_lr_with_all_datasets(self):\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(False)\n\n should_succeed = [\"irisArr\", \"irisDf\", \"digits\", \"housing\"]\n should_fail = [\"creditG\", \"movies\", \"drugRev\"]\n for name in should_succeed:\n dataset = getattr(self, f\"_{name}\")\n LogisticRegression.validate_schema(**dataset)\n for name in should_fail:\n dataset = getattr(self, f\"_{name}\")\n with self.assertRaises(ValueError):\n LogisticRegression.validate_schema(**dataset)\n set_disable_data_schema_validation(existing_flag)\n\n def test_project_with_all_datasets(self):\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(False)\n\n should_succeed = [\n \"irisArr\",\n \"irisDf\",\n \"digits\",\n \"housing\",\n \"creditG\",\n \"drugRev\",\n ]\n should_fail = [\"movies\"]\n for name in should_succeed:\n dataset = getattr(self, f\"_{name}\")\n lale.lib.lale.Project.validate_schema(**dataset)\n for name in should_fail:\n dataset = getattr(self, f\"_{name}\")\n with self.assertRaises(ValueError):\n lale.lib.lale.Project.validate_schema(**dataset)\n set_disable_data_schema_validation(existing_flag)\n\n def test_nmf_with_all_datasets(self):\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(False)\n\n should_succeed = [\"digits\"]\n should_fail = [\"irisArr\", \"irisDf\", \"housing\", \"creditG\", \"movies\", \"drugRev\"]\n for name in should_succeed:\n dataset = getattr(self, f\"_{name}\")\n NMF.validate_schema(**dataset)\n for name in should_fail:\n dataset = getattr(self, f\"_{name}\")\n with self.assertRaises(ValueError):\n NMF.validate_schema(**dataset)\n set_disable_data_schema_validation(existing_flag)\n\n def test_tfidf_with_all_datasets(self):\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(False)\n\n should_succeed = [\"movies\"]\n should_fail = [\"irisArr\", \"irisDf\", \"digits\", \"housing\", \"creditG\", \"drugRev\"]\n for name in should_succeed:\n dataset = getattr(self, f\"_{name}\")\n TfidfVectorizer.validate_schema(**dataset)\n for name in should_fail:\n dataset = getattr(self, f\"_{name}\")\n with self.assertRaises(ValueError):\n TfidfVectorizer.validate_schema(**dataset)\n set_disable_data_schema_validation(existing_flag)\n\n def test_decision_function_binary(self):\n from lale.lib.lale import Project\n\n train_X, train_y = self._creditG[\"X\"], self._creditG[\"y\"]\n trainable = Project(columns={\"type\": \"number\"}) >> LogisticRegression()\n trained = trainable.fit(train_X, train_y)\n _ = trained.decision_function(train_X)\n\n\nclass TestErrorMessages(unittest.TestCase):\n def test_wrong_cont(self):\n with self.assertRaises(jsonschema.ValidationError) as cm:\n LogisticRegression(C=-1)\n summary = cm.exception.message.split(\"\\n\")[0]\n self.assertEqual(\n summary,\n \"Invalid configuration for LogisticRegression(C=-1) due to invalid value C=-1.\",\n )\n\n def test_wrong_cat(self):\n with self.assertRaises(jsonschema.ValidationError) as cm:\n LogisticRegression(solver=\"adam\")\n summary = cm.exception.message.split(\"\\n\")[0]\n self.assertEqual(\n summary,\n \"Invalid configuration for LogisticRegression(solver='adam') due to invalid value solver=adam.\",\n )\n\n def test_unknown_arg(self):\n with self.assertRaises(jsonschema.ValidationError) as cm:\n LogisticRegression(activation=\"relu\")\n summary = cm.exception.message.split(\"\\n\")[0]\n self.assertEqual(\n summary,\n \"Invalid configuration for LogisticRegression(activation='relu') due to argument 'activation' was unexpected.\",\n )\n\n def test_constraint(self):\n with self.assertRaises(jsonschema.ValidationError) as cm:\n LogisticRegression(solver=\"sag\", penalty=\"l1\")\n summary = cm.exception.message.split(\"\\n\")[0]\n self.assertEqual(\n summary,\n \"Invalid configuration for LogisticRegression(solver='sag', penalty='l1') due to constraint the newton-cg, sag, and lbfgs solvers support only l2 or no penalties.\",\n )\n\n\nclass TestSchemaValidation(unittest.TestCase):\n def test_any(self):\n from lale.type_checking import is_subschema\n\n num_schema = {\"type\": \"number\"}\n any_schema = {\"laleType\": \"Any\"}\n jsonschema.validate(42, num_schema)\n jsonschema.validate(42, any_schema)\n self.assertTrue(is_subschema(num_schema, any_schema))\n self.assertTrue(is_subschema(any_schema, num_schema))\n\n def test_bool_label(self):\n import pandas as pd\n\n data_records = [\n {\n \"IS_TENT\": False,\n \"GENDER\": \"M\",\n \"AGE\": 20,\n \"MARITAL_STATUS\": \"Single\",\n \"PROFESSION\": \"Sales\",\n },\n {\n \"IS_TENT\": False,\n \"GENDER\": \"M\",\n \"AGE\": 20,\n \"MARITAL_STATUS\": \"Single\",\n \"PROFESSION\": \"Sales\",\n },\n {\n \"IS_TENT\": False,\n \"GENDER\": \"F\",\n \"AGE\": 37,\n \"MARITAL_STATUS\": \"Single\",\n \"PROFESSION\": \"Other\",\n },\n {\n \"IS_TENT\": False,\n \"GENDER\": \"M\",\n \"AGE\": 42,\n \"MARITAL_STATUS\": \"Married\",\n \"PROFESSION\": \"Other\",\n },\n {\n \"IS_TENT\": True,\n \"GENDER\": \"F\",\n \"AGE\": 24,\n \"MARITAL_STATUS\": \"Married\",\n \"PROFESSION\": \"Retail\",\n },\n {\n \"IS_TENT\": False,\n \"GENDER\": \"F\",\n \"AGE\": 24,\n \"MARITAL_STATUS\": \"Married\",\n \"PROFESSION\": \"Retail\",\n },\n {\n \"IS_TENT\": False,\n \"GENDER\": \"M\",\n \"AGE\": 29,\n \"MARITAL_STATUS\": \"Single\",\n \"PROFESSION\": \"Retail\",\n },\n {\n \"IS_TENT\": False,\n \"GENDER\": \"M\",\n \"AGE\": 29,\n \"MARITAL_STATUS\": \"Single\",\n \"PROFESSION\": \"Retail\",\n },\n {\n \"IS_TENT\": True,\n \"GENDER\": \"M\",\n \"AGE\": 43,\n \"MARITAL_STATUS\": \"Married\",\n \"PROFESSION\": \"Trades\",\n },\n {\n \"IS_TENT\": False,\n \"GENDER\": \"M\",\n \"AGE\": 43,\n \"MARITAL_STATUS\": \"Married\",\n \"PROFESSION\": \"Trades\",\n },\n ]\n df = pd.DataFrame.from_records(data_records)\n X = df.drop([\"IS_TENT\"], axis=1).values\n y = df[\"IS_TENT\"].values\n from lale.lib.sklearn import GradientBoostingClassifier as Clf\n from lale.lib.sklearn import OneHotEncoder as Enc\n\n trainable = Enc() >> Clf()\n _ = trainable.fit(X, y)\n\n\nclass TestWithScorer(unittest.TestCase):\n def test_bare_array(self):\n import sklearn.datasets\n import sklearn.metrics\n from numpy import ndarray\n\n from lale.datasets.data_schemas import NDArrayWithSchema\n\n X, y = sklearn.datasets.load_iris(return_X_y=True)\n self.assertIsInstance(X, ndarray)\n self.assertIsInstance(y, ndarray)\n self.assertNotIsInstance(X, NDArrayWithSchema)\n self.assertNotIsInstance(y, NDArrayWithSchema)\n trainable = LogisticRegression()\n trained = trainable.fit(X, y)\n scorer = sklearn.metrics.make_scorer(sklearn.metrics.accuracy_score)\n out = scorer(trained, X, y)\n self.assertIsInstance(out, float)\n self.assertNotIsInstance(out, NDArrayWithSchema)\n\n\nclass TestDisablingSchemaValidation(unittest.TestCase):\n def setUp(self):\n from sklearn.datasets import load_iris\n from sklearn.model_selection import train_test_split\n\n data = load_iris()\n X, y = data.data, data.target\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y)\n\n def test_disable_schema_validation_individual_op(self):\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(True)\n import lale.schemas as schemas\n from lale.lib.sklearn import PCA\n\n pca_input = schemas.Object(\n X=schemas.AnyOf(\n [\n schemas.Array(schemas.Array(schemas.String())),\n schemas.Array(schemas.String()),\n ]\n )\n )\n\n foo = PCA.customize_schema(input_fit=pca_input)\n\n pca_output = schemas.Object(\n X=schemas.AnyOf(\n [\n schemas.Array(schemas.Array(schemas.String())),\n schemas.Array(schemas.String()),\n ]\n )\n )\n\n foo = foo.customize_schema(output_transform=pca_output)\n\n abc = foo()\n trained_pca = abc.fit(self.X_train)\n trained_pca.transform(self.X_test)\n set_disable_data_schema_validation(existing_flag)\n\n def test_enable_schema_validation_individual_op(self):\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(False)\n import lale.schemas as schemas\n from lale.lib.sklearn import PCA\n\n pca_input = schemas.Object(\n X=schemas.AnyOf(\n [\n schemas.Array(schemas.Array(schemas.String())),\n schemas.Array(schemas.String()),\n ]\n )\n )\n\n foo = PCA.customize_schema(input_fit=pca_input)\n\n pca_output = schemas.Object(\n X=schemas.AnyOf(\n [\n schemas.Array(schemas.Array(schemas.String())),\n schemas.Array(schemas.String()),\n ]\n )\n )\n\n foo = foo.customize_schema(output_transform=pca_output)\n\n abc = foo()\n with self.assertRaises(ValueError):\n trained_pca = abc.fit(self.X_train)\n trained_pca.transform(self.X_test)\n set_disable_data_schema_validation(existing_flag)\n\n def test_disable_schema_validation_pipeline(self):\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(True)\n\n import lale.schemas as schemas\n from lale.lib.sklearn import PCA, LogisticRegression\n\n lr_input = schemas.Object(\n required=[\"X\", \"y\"],\n X=schemas.AnyOf(\n [\n schemas.Array(schemas.Array(schemas.String())),\n schemas.Array(schemas.String()),\n ]\n ),\n y=schemas.Array(schemas.String()),\n )\n\n foo = LogisticRegression.customize_schema(input_fit=lr_input)\n abc = foo()\n pipeline = PCA() >> abc\n trained_pipeline = pipeline.fit(self.X_train, self.y_train)\n trained_pipeline.predict(self.X_test)\n set_disable_data_schema_validation(existing_flag)\n\n def test_enable_schema_validation_pipeline(self):\n existing_flag = disable_data_schema_validation\n set_disable_data_schema_validation(False)\n\n import lale.schemas as schemas\n from lale.lib.sklearn import PCA, LogisticRegression\n\n lr_input = schemas.Object(\n required=[\"X\", \"y\"],\n X=schemas.AnyOf(\n [\n schemas.Array(schemas.Array(schemas.String())),\n schemas.Array(schemas.String()),\n ]\n ),\n y=schemas.Array(schemas.String()),\n )\n\n foo = LogisticRegression.customize_schema(input_fit=lr_input)\n abc = foo()\n pipeline = PCA() >> abc\n with self.assertRaises(ValueError):\n trained_pipeline = pipeline.fit(self.X_train, self.y_train)\n trained_pipeline.predict(self.X_test)\n set_disable_data_schema_validation(existing_flag)\n\n def test_disable_enable_hyperparam_validation(self):\n from lale.lib.sklearn import PCA\n\n existing_flag = disable_hyperparams_schema_validation\n set_disable_hyperparams_schema_validation(True)\n PCA(n_components=True)\n set_disable_hyperparams_schema_validation(False)\n with self.assertRaises(jsonschema.exceptions.ValidationError):\n PCA(n_components=True)\n set_disable_hyperparams_schema_validation(existing_flag)\n", "# Copyright 2019 IBM Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sklearn\nimport sklearn.linear_model\n\nimport lale.docstrings\nimport lale.operators\n\n\nclass LinearRegressionImpl:\n def __init__(self, **hyperparams):\n self._hyperparams = hyperparams\n self._wrapped_model = sklearn.linear_model.LinearRegression(**self._hyperparams)\n\n def fit(self, X, y, **fit_params):\n self._wrapped_model.fit(X, y, **fit_params)\n return self\n\n def predict(self, X):\n return self._wrapped_model.predict(X)\n\n\n_hyperparams_schema = {\n \"allOf\": [\n {\n \"type\": \"object\",\n \"required\": [\"fit_intercept\", \"normalize\", \"copy_X\"],\n \"relevantToOptimizer\": [\"fit_intercept\", \"normalize\", \"copy_X\"],\n \"additionalProperties\": False,\n \"properties\": {\n \"fit_intercept\": {\n \"type\": \"boolean\",\n \"default\": True,\n \"description\": \"Whether to calculate the intercept for this model.\",\n },\n \"normalize\": {\n \"type\": \"boolean\",\n \"default\": False,\n \"description\": \"If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm.\",\n },\n \"copy_X\": {\n \"type\": \"boolean\",\n \"default\": True,\n \"description\": \"If True, X will be copied; else, it may be overwritten.\",\n },\n \"n_jobs\": {\n \"anyOf\": [\n {\n \"description\": \"1 unless in joblib.parallel_backend context.\",\n \"enum\": [None],\n },\n {\"description\": \"Use all processors.\", \"enum\": [-1]},\n {\n \"description\": \"Number of CPU cores.\",\n \"type\": \"integer\",\n \"minimum\": 1,\n },\n ],\n \"default\": None,\n \"description\": \"The number of jobs to run in parallel.\",\n },\n },\n }\n ]\n}\n\n_input_fit_schema = {\n \"type\": \"object\",\n \"required\": [\"X\", \"y\"],\n \"properties\": {\n \"X\": {\n \"description\": \"Features; the outer array is over samples.\",\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n },\n \"y\": {\n \"anyOf\": [\n {\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n },\n {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n ],\n \"description\": \"Target values. Will be cast to X's dtype if necessary\",\n },\n \"sample_weight\": {\n \"anyOf\": [\n {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n {\"enum\": [None], \"description\": \"Samples are equally weighted.\"},\n ],\n \"description\": \"Sample weights.\",\n },\n },\n}\n\n_input_predict_schema = {\n \"type\": \"object\",\n \"properties\": {\n \"X\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n \"description\": \"Samples.\",\n }\n },\n}\n\n_output_predict_schema = {\n \"description\": \"Returns predicted values.\",\n \"anyOf\": [\n {\"type\": \"array\", \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}}},\n {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n ],\n}\n\n_combined_schemas = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"\"\"`Linear regression`_ linear model from scikit-learn for classification.\n\n.. _`Linear regression`: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html\n\"\"\",\n \"documentation_url\": \"https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.linear_regression.html\",\n \"import_from\": \"sklearn.linear_model\",\n \"type\": \"object\",\n \"tags\": {\"pre\": [], \"op\": [\"estimator\", \"regressor\"], \"post\": []},\n \"properties\": {\n \"hyperparams\": _hyperparams_schema,\n \"input_fit\": _input_fit_schema,\n \"input_predict\": _input_predict_schema,\n \"output_predict\": _output_predict_schema,\n },\n}\n\nLinearRegression: lale.operators.PlannedIndividualOp\nLinearRegression = lale.operators.make_operator(LinearRegressionImpl, _combined_schemas)\n\nif sklearn.__version__ >= \"0.24\":\n # old: https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression.html\n # new: https://scikit-learn.org/0.24/modules/generated/sklearn.linear_model.LinearRegression.html\n from lale.schemas import JSON, Bool\n\n LinearRegression = LinearRegression.customize_schema(\n positive=Bool(\n desc=\"When set to True, forces the coefficients to be positive.\",\n default=False,\n forOptimizer=False,\n )\n )\n LinearRegression = LinearRegression.customize_schema(\n constraint=JSON(\n {\n \"description\": \"Setting positive=True is only supported for dense arrays.\",\n \"anyOf\": [\n {\"type\": \"object\", \"properties\": {\"positive\": {\"enum\": [False]}}},\n {\"type\": \"object\", \"laleNot\": \"X/isSparse\"},\n ],\n }\n )\n )\n\nlale.docstrings.set_docstrings(LinearRegressionImpl, LinearRegression._schemas)\n", "from numpy import inf, nan\nfrom sklearn.ensemble import RandomForestRegressor as Op\n\nfrom lale.docstrings import set_docstrings\nfrom lale.operators import make_operator\n\n\nclass RandomForestRegressorImpl:\n def __init__(self, **hyperparams):\n self._hyperparams = hyperparams\n self._wrapped_model = Op(**self._hyperparams)\n\n def fit(self, X, y=None):\n if y is not None:\n self._wrapped_model.fit(X, y)\n else:\n self._wrapped_model.fit(X)\n return self\n\n def predict(self, X):\n return self._wrapped_model.predict(X)\n\n\n_hyperparams_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"inherited docstring for RandomForestRegressor A random forest regressor.\",\n \"allOf\": [\n {\n \"type\": \"object\",\n \"required\": [\n \"n_estimators\",\n \"criterion\",\n \"max_depth\",\n \"min_samples_split\",\n \"min_samples_leaf\",\n \"min_weight_fraction_leaf\",\n \"max_features\",\n \"max_leaf_nodes\",\n \"min_impurity_decrease\",\n \"min_impurity_split\",\n \"bootstrap\",\n \"oob_score\",\n \"n_jobs\",\n \"random_state\",\n \"verbose\",\n \"warm_start\",\n ],\n \"relevantToOptimizer\": [\n \"n_estimators\",\n \"criterion\",\n \"max_depth\",\n \"min_samples_split\",\n \"min_samples_leaf\",\n \"max_features\",\n \"bootstrap\",\n ],\n \"additionalProperties\": False,\n \"properties\": {\n \"n_estimators\": {\n \"type\": \"integer\",\n \"minimumForOptimizer\": 10,\n \"maximumForOptimizer\": 100,\n \"distribution\": \"uniform\",\n \"default\": 10,\n \"description\": \"The number of trees in the forest\",\n },\n \"criterion\": {\n \"enum\": [\"friedman_mse\", \"mse\"],\n \"default\": \"mse\",\n \"description\": \"The function to measure the quality of a split\",\n },\n \"max_depth\": {\n \"anyOf\": [\n {\n \"type\": \"integer\",\n \"minimumForOptimizer\": 3,\n \"maximumForOptimizer\": 5,\n \"distribution\": \"uniform\",\n },\n {\"enum\": [None]},\n ],\n \"default\": None,\n \"description\": \"The maximum depth of the tree\",\n },\n \"min_samples_split\": {\n \"anyOf\": [\n {\n \"type\": \"integer\",\n \"minimumForOptimizer\": 2,\n \"maximumForOptimizer\": 5,\n \"distribution\": \"uniform\",\n },\n {\n \"type\": \"number\",\n \"minimumForOptimizer\": 2,\n \"maximumForOptimizer\": 5,\n \"distribution\": \"uniform\",\n },\n ],\n \"default\": 2,\n \"description\": \"The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number\",\n },\n \"min_samples_leaf\": {\n \"anyOf\": [\n {\n \"type\": \"integer\",\n \"minimumForOptimizer\": 1,\n \"maximumForOptimizer\": 5,\n \"distribution\": \"uniform\",\n },\n {\n \"type\": \"number\",\n \"minimumForOptimizer\": 1,\n \"maximumForOptimizer\": 5,\n \"distribution\": \"uniform\",\n },\n ],\n \"default\": 1,\n \"description\": \"The minimum number of samples required to be at a leaf node\",\n },\n \"min_weight_fraction_leaf\": {\n \"type\": \"number\",\n \"default\": 0.0,\n \"description\": \"The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node\",\n },\n \"max_features\": {\n \"anyOf\": [\n {\"type\": \"integer\", \"forOptimizer\": False},\n {\n \"type\": \"number\",\n \"minimumForOptimizer\": 0.01,\n \"maximumForOptimizer\": 1.0,\n \"distribution\": \"uniform\",\n },\n {\"type\": \"string\", \"forOptimizer\": False},\n {\"enum\": [None]},\n ],\n \"default\": \"auto\",\n \"description\": \"The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split\",\n },\n \"max_leaf_nodes\": {\n \"anyOf\": [{\"type\": \"integer\"}, {\"enum\": [None]}],\n \"default\": None,\n \"description\": \"Grow trees with ``max_leaf_nodes`` in best-first fashion\",\n },\n \"min_impurity_decrease\": {\n \"type\": \"number\",\n \"default\": 0.0,\n \"description\": \"A node will be split if this split induces a decrease of the impurity greater than or equal to this value\",\n },\n \"min_impurity_split\": {\n \"anyOf\": [{\"type\": \"number\"}, {\"enum\": [None]}],\n \"default\": None,\n \"description\": \"Threshold for early stopping in tree growth\",\n },\n \"bootstrap\": {\n \"type\": \"boolean\",\n \"default\": True,\n \"description\": \"Whether bootstrap samples are used when building trees\",\n },\n \"oob_score\": {\n \"type\": \"boolean\",\n \"default\": False,\n \"description\": \"whether to use out-of-bag samples to estimate the R^2 on unseen data.\",\n },\n \"n_jobs\": {\n \"anyOf\": [{\"type\": \"integer\"}, {\"enum\": [None]}],\n \"default\": 4,\n \"description\": \"The number of jobs to run in parallel for both `fit` and `predict`\",\n },\n \"random_state\": {\n \"anyOf\": [\n {\"type\": \"integer\"},\n {\"laleType\": \"numpy.random.RandomState\"},\n {\"enum\": [None]},\n ],\n \"default\": None,\n \"description\": \"If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.\",\n },\n \"verbose\": {\n \"type\": \"integer\",\n \"default\": 0,\n \"description\": \"Controls the verbosity when fitting and predicting.\",\n },\n \"warm_start\": {\n \"type\": \"boolean\",\n \"default\": False,\n \"description\": \"When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest\",\n },\n },\n },\n {\n \"XXX TODO XXX\": \"Parameter: min_samples_leaf > only be considered if it leaves at least min_samples_leaf training samples in each of the left and right branches\"\n },\n ],\n}\n_input_fit_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"Build a forest of trees from the training set (X, y).\",\n \"type\": \"object\",\n \"required\": [\"X\", \"y\"],\n \"properties\": {\n \"X\": {\n \"anyOf\": [\n {\n \"type\": \"array\",\n \"items\": {\"laleType\": \"Any\", \"XXX TODO XXX\": \"item type\"},\n \"XXX TODO XXX\": \"array-like or sparse matrix of shape = [n_samples, n_features]\",\n },\n {\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n },\n ],\n \"description\": \"The training input samples\",\n },\n \"y\": {\n \"anyOf\": [\n {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n {\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n },\n ],\n \"description\": \"The target values (class labels in classification, real numbers in regression).\",\n },\n \"sample_weight\": {\n \"anyOf\": [{\"type\": \"array\", \"items\": {\"type\": \"number\"}}, {\"enum\": [None]}],\n \"description\": \"Sample weights\",\n },\n },\n}\n_input_predict_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"Predict regression target for X.\",\n \"type\": \"object\",\n \"required\": [\"X\"],\n \"properties\": {\n \"X\": {\n \"anyOf\": [\n {\n \"type\": \"array\",\n \"items\": {\"laleType\": \"Any\", \"XXX TODO XXX\": \"item type\"},\n \"XXX TODO XXX\": \"array-like or sparse matrix of shape = [n_samples, n_features]\",\n },\n {\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n },\n ],\n \"description\": \"The input samples\",\n }\n },\n}\n_output_predict_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"The predicted values.\",\n \"anyOf\": [\n {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n {\"type\": \"array\", \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}}},\n ],\n}\n_combined_schemas = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"Combined schema for expected data and hyperparameters.\",\n \"documentation_url\": \"https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.RandomForestRegressor#sklearn-ensemble-randomforestregressor\",\n \"import_from\": \"sklearn.ensemble\",\n \"type\": \"object\",\n \"tags\": {\"pre\": [], \"op\": [\"estimator\", \"regressor\"], \"post\": []},\n \"properties\": {\n \"hyperparams\": _hyperparams_schema,\n \"input_fit\": _input_fit_schema,\n \"input_predict\": _input_predict_schema,\n \"output_predict\": _output_predict_schema,\n },\n}\nset_docstrings(RandomForestRegressorImpl, _combined_schemas)\nRandomForestRegressor = make_operator(RandomForestRegressorImpl, _combined_schemas)\n" ]
[ [ "sklearn.preprocessing.MaxAbsScaler" ], [ "sklearn.ensemble.GradientBoostingClassifier" ], [ "pandas.DataFrame.from_records", "sklearn.datasets.load_iris", "sklearn.model_selection.train_test_split" ], [ "sklearn.linear_model.LinearRegression" ], [ "sklearn.ensemble.RandomForestRegressor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FredrikM97/Medical-ROI
[ "54246341460c04caeced2ef6dcab984f6c260c9d" ]
[ "src/models/resnet_brew2.py" ]
[ "import torch\nimport torch.nn as nn\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',\n 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',\n 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',\n 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\n\n Args:\n in_planes: \n out_planes: \n stride: (Default value = 1)\n groups: (Default value = 1)\n dilation: (Default value = 1)\n\n Returns:\n\n Raises:\n\n \"\"\"\n return nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\n\n Args:\n in_planes: \n out_planes: \n stride: (Default value = 1)\n\n Returns:\n\n Raises:\n\n \"\"\"\n return nn.Conv3d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n \"\"\" \"\"\"\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n \"\"\"\n\n Parameters\n ----------\n inplanes :\n \n planes :\n \n stride :\n (Default value = 1)\n downsample :\n (Default value = None)\n groups :\n (Default value = 1)\n base_width :\n (Default value = 64)\n dilation :\n (Default value = 1)\n norm_layer :\n (Default value = None)\n\n Returns\n -------\n\n \n \"\"\"\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm3d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n \"\"\"\n\n Args:\n x: \n\n Returns:\n\n Raises:\n\n \"\"\"\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n \"\"\" \"\"\"\n # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)\n # while original implementation places the stride at the first 1x1 convolution(self.conv1)\n # according to \"Deep residual learning for image recognition\"https://arxiv.org/abs/1512.03385.\n # This variant is also known as ResNet V1.5 and improves accuracy according to\n # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.\n\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n \"\"\"\n\n Parameters\n ----------\n inplanes :\n \n planes :\n \n stride :\n (Default value = 1)\n downsample :\n (Default value = None)\n groups :\n (Default value = 1)\n base_width :\n (Default value = 64)\n dilation :\n (Default value = 1)\n norm_layer :\n (Default value = None)\n\n Returns\n -------\n\n \n \"\"\"\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm3d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n \"\"\"\n\n Args:\n x: \n\n Returns:\n\n Raises:\n\n \"\"\"\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n \"\"\" \"\"\"\n\n def __init__(self, block, layers, num_channels=1,num_classes=3, zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None,\n norm_layer=None, **kwargs): #, **kwargs is just a dummy to take whatever we want!\n super(ResNet, self).__init__()\n if norm_layer is None:\n \n norm_layer = nn.BatchNorm3d\n self._norm_layer = norm_layer\n\n self.inplanes = 32\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv3d(num_channels, self.inplanes, kernel_size=3, stride=1, padding=1,\n bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 32, layers[0])\n self.layer2 = self._make_layer(block, 64, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block, 128, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1])\n self.layer4 = self._make_layer(block, 256, layers[3])\n self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))\n \n self.fc = nn.Linear(256 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm3d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False):\n \"\"\"\n\n Args:\n block: \n planes: \n blocks: \n stride: (Default value = 1)\n dilate: (Default value = False)\n\n Returns:\n\n Raises:\n\n \"\"\"\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def _forward_impl(self, x):\n \"\"\"\n\n Args:\n x: \n\n Returns:\n\n Raises:\n\n \"\"\"\n # See note [TorchScript super()]\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n #print(x.shape)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n\n return x\n\n def forward(self, x):\n \"\"\"\n\n Args:\n x: \n\n Returns:\n\n Raises:\n\n \"\"\"\n return self._forward_impl(x)\n\n\ndef _resnet(arch, block, layers, pretrained, progress, **kwargs):\n \"\"\"\n\n Args:\n arch: \n block: \n layers: \n pretrained: \n progress: \n **kwargs: \n\n Returns:\n\n Raises:\n\n \"\"\"\n model = ResNet(block, layers, **kwargs)\n\n return model\n\n\ndef resnet18_brew2(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-18 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)\n progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)\n **kwargs: \n\n Returns:\n\n Raises:\n\n \"\"\"\n return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,\n **kwargs)\n\n\n\ndef resnet34_brew2(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-34 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)\n progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)\n **kwargs: \n\n Returns:\n\n Raises:\n\n \"\"\"\n return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,\n **kwargs)\n\n\n\ndef resnet50_brew2(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-50 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)\n progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)\n **kwargs: \n\n Returns:\n\n Raises:\n\n \"\"\"\n return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, #2,4,2\n **kwargs)\n\n\ndef resnet101_brew2(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-101 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)\n progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)\n **kwargs: \n\n Returns:\n\n Raises:\n\n \"\"\"\n return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,\n **kwargs)\n\n\n\ndef resnet152_brew2(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNet-152 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)\n progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)\n **kwargs: \n\n Returns:\n\n Raises:\n\n \"\"\"\n return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,\n **kwargs)\n\n\n\ndef resnext50_32x4d_brew2(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNeXt-50 32x4d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)\n progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)\n **kwargs: \n\n Returns:\n\n Raises:\n\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 4\n return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],\n pretrained, progress, **kwargs)\n\n\n\ndef resnext101_32x8d_brew2(pretrained=False, progress=True, **kwargs):\n r\"\"\"ResNeXt-101 32x8d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)\n progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)\n **kwargs: \n\n Returns:\n\n Raises:\n\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 8\n return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],\n pretrained, progress, **kwargs)\n\n\n\ndef wide_resnet50_2_brew2(pretrained=False, progress=True, **kwargs):\n r\"\"\"Wide ResNet-50-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_\n \n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)\n progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)\n **kwargs: \n\n Returns:\n\n Raises:\n\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],\n pretrained, progress, **kwargs)\n\n\n\ndef wide_resnet101_2_brew2(pretrained=False, progress=True, **kwargs):\n r\"\"\"Wide ResNet-101-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_\n \n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained(bool, optional): If True, returns a model pre-trained on ImageNet (Default value = False)\n progress(bool, optional): If True, displays a progress bar of the download to stderr (Default value = True)\n **kwargs: \n\n Returns:\n\n Raises:\n\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],\n pretrained, progress, **kwargs)\n" ]
[ [ "torch.nn.Sequential", "torch.nn.init.constant_", "torch.nn.AdaptiveAvgPool3d", "torch.nn.MaxPool3d", "torch.nn.Conv3d", "torch.nn.Linear", "torch.flatten", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gepcel/spyder
[ "a7449407a5cac27a24419316b0c42f6737608b16" ]
[ "spyder/plugins/variableexplorer/widgets/collectionseditor.py" ]
[ "# -*- coding: utf-8 -*-\r\n# -----------------------------------------------------------------------------\r\n# Copyright © Spyder Project Contributors\r\n#\r\n# Licensed under the terms of the MIT License\r\n# (see spyder/__init__.py for details)\r\n# ----------------------------------------------------------------------------\r\n\r\n\"\"\"\r\nCollections (i.e. dictionary, list, set and tuple) editor widget and dialog.\r\n\"\"\"\r\n\r\n#TODO: Multiple selection: open as many editors (array/dict/...) as necessary,\r\n# at the same time\r\n\r\n# pylint: disable=C0103\r\n# pylint: disable=R0903\r\n# pylint: disable=R0911\r\n# pylint: disable=R0201\r\n\r\n# Standard library imports\r\nfrom __future__ import print_function\r\nimport datetime\r\nimport gc\r\nimport sys\r\nimport warnings\r\n\r\n# Third party imports\r\nimport cloudpickle\r\nfrom qtpy.compat import getsavefilename, to_qvariant\r\nfrom qtpy.QtCore import (QAbstractTableModel, QDateTime, QModelIndex, Qt,\r\n Signal, Slot)\r\nfrom qtpy.QtGui import QColor, QKeySequence\r\nfrom qtpy.QtWidgets import (QAbstractItemDelegate, QApplication, QDateEdit,\r\n QDateTimeEdit, QDialog, QHBoxLayout, QHeaderView,\r\n QInputDialog, QItemDelegate, QLineEdit, QMenu,\r\n QMessageBox, QPushButton, QTableView,\r\n QVBoxLayout, QWidget)\r\nfrom spyder_kernels.utils.misc import fix_reference_name\r\nfrom spyder_kernels.utils.nsview import (\r\n array, DataFrame, Index, display_to_value, FakeObject,\r\n get_color_name, get_human_readable_type, get_size, Image, is_editable_type,\r\n is_known_type, MaskedArray, ndarray, np_savetxt, Series, sort_against,\r\n try_to_eval, unsorted_unique, value_to_display, get_object_attrs,\r\n get_type_string)\r\n\r\n# Local imports\r\nfrom spyder.config.base import _, PICKLE_PROTOCOL\r\nfrom spyder.config.fonts import DEFAULT_SMALL_DELTA\r\nfrom spyder.config.gui import get_font\r\nfrom spyder.py3compat import (io, is_binary_string, is_text_string,\r\n PY3, to_text_string, is_type_text_string)\r\nfrom spyder.utils import icon_manager as ima\r\nfrom spyder.utils.misc import getcwd_or_home\r\nfrom spyder.utils.qthelpers import (add_actions, create_action,\r\n mimedata2url)\r\nfrom spyder.plugins.variableexplorer.widgets.importwizard import ImportWizard\r\nfrom spyder.plugins.variableexplorer.widgets.texteditor import TextEditor\r\n\r\nif ndarray is not FakeObject:\r\n from spyder.plugins.variableexplorer.widgets.arrayeditor import (\r\n ArrayEditor)\r\n\r\nif DataFrame is not FakeObject:\r\n from spyder.plugins.variableexplorer.widgets.dataframeeditor import (\r\n DataFrameEditor)\r\n\r\n\r\n# Maximum length of a serialized variable to be set in the kernel\r\nMAX_SERIALIZED_LENGHT = 1e6\r\n\r\nLARGE_NROWS = 100\r\nROWS_TO_LOAD = 50\r\n\r\n\r\nclass ProxyObject(object):\r\n \"\"\"Dictionary proxy to an unknown object.\"\"\"\r\n\r\n def __init__(self, obj):\r\n \"\"\"Constructor.\"\"\"\r\n self.__obj__ = obj\r\n\r\n def __len__(self):\r\n \"\"\"Get len according to detected attributes.\"\"\"\r\n return len(get_object_attrs(self.__obj__))\r\n\r\n def __getitem__(self, key):\r\n \"\"\"Get the attribute corresponding to the given key.\"\"\"\r\n # Catch NotImplementedError to fix #6284 in pandas MultiIndex\r\n # due to NA checking not being supported on a multiindex.\r\n # Catch AttributeError to fix #5642 in certain special classes like xml\r\n # when this method is called on certain attributes.\r\n # Catch TypeError to prevent fatal Python crash to desktop after\r\n # modifying certain pandas objects. Fix issue #6727 .\r\n # Catch ValueError to allow viewing and editing of pandas offsets.\r\n # Fix issue #6728 .\r\n try:\r\n attribute_toreturn = getattr(self.__obj__, key)\r\n except (NotImplementedError, AttributeError, TypeError, ValueError):\r\n attribute_toreturn = None\r\n return attribute_toreturn\r\n\r\n def __setitem__(self, key, value):\r\n \"\"\"Set attribute corresponding to key with value.\"\"\"\r\n # Catch AttributeError to gracefully handle inability to set an\r\n # attribute due to it not being writeable or set-table.\r\n # Fix issue #6728 . Also, catch NotImplementedError for safety.\r\n try:\r\n setattr(self.__obj__, key, value)\r\n except (TypeError, AttributeError, NotImplementedError):\r\n pass\r\n except Exception as e:\r\n if \"cannot set values for\" not in str(e):\r\n raise\r\n\r\n\r\nclass ReadOnlyCollectionsModel(QAbstractTableModel):\r\n \"\"\"CollectionsEditor Read-Only Table Model\"\"\"\r\n\r\n sig_setting_data = Signal()\r\n\r\n def __init__(self, parent, data, title=\"\", names=False,\r\n minmax=False, dataframe_format=None, remote=False):\r\n QAbstractTableModel.__init__(self, parent)\r\n if data is None:\r\n data = {}\r\n self.names = names\r\n self.minmax = minmax\r\n self.dataframe_format = dataframe_format\r\n self.remote = remote\r\n self.header0 = None\r\n self._data = None\r\n self.total_rows = None\r\n self.showndata = None\r\n self.keys = None\r\n self.title = to_text_string(title) # in case title is not a string\r\n if self.title:\r\n self.title = self.title + ' - '\r\n self.sizes = []\r\n self.types = []\r\n self.set_data(data)\r\n \r\n def get_data(self):\r\n \"\"\"Return model data\"\"\"\r\n return self._data\r\n\r\n def set_data(self, data, coll_filter=None):\r\n \"\"\"Set model data\"\"\"\r\n self._data = data\r\n data_type = get_type_string(data)\r\n\r\n if coll_filter is not None and not self.remote and \\\r\n isinstance(data, (tuple, list, dict, set)):\r\n data = coll_filter(data)\r\n self.showndata = data\r\n\r\n self.header0 = _(\"Index\")\r\n if self.names:\r\n self.header0 = _(\"Name\")\r\n if isinstance(data, tuple):\r\n self.keys = list(range(len(data)))\r\n self.title += _(\"Tuple\")\r\n elif isinstance(data, list):\r\n self.keys = list(range(len(data)))\r\n self.title += _(\"List\")\r\n elif isinstance(data, set):\r\n self.keys = list(range(len(data)))\r\n self.title += _(\"Set\")\r\n self._data = list(data)\r\n elif isinstance(data, dict):\r\n self.keys = list(data.keys())\r\n self.title += _(\"Dictionary\")\r\n if not self.names:\r\n self.header0 = _(\"Key\")\r\n else:\r\n self.keys = get_object_attrs(data)\r\n self._data = data = self.showndata = ProxyObject(data)\r\n if not self.names:\r\n self.header0 = _(\"Attribute\")\r\n\r\n if not isinstance(self._data, ProxyObject):\r\n self.title += (' (' + str(len(self.keys)) + ' ' +\r\n _(\"elements\") + ')')\r\n else:\r\n self.title += data_type\r\n\r\n self.total_rows = len(self.keys)\r\n if self.total_rows > LARGE_NROWS:\r\n self.rows_loaded = ROWS_TO_LOAD\r\n else:\r\n self.rows_loaded = self.total_rows\r\n self.sig_setting_data.emit()\r\n self.set_size_and_type()\r\n self.reset()\r\n\r\n def set_size_and_type(self, start=None, stop=None):\r\n data = self._data\r\n \r\n if start is None and stop is None:\r\n start = 0\r\n stop = self.rows_loaded\r\n fetch_more = False\r\n else:\r\n fetch_more = True\r\n\r\n # Ignore pandas warnings that certain attributes are deprecated\r\n # and will be removed, since they will only be accessed if they exist.\r\n with warnings.catch_warnings():\r\n warnings.filterwarnings(\r\n \"ignore\", message=(r\"^\\w+\\.\\w+ is deprecated and \"\r\n \"will be removed in a future version\"))\r\n if self.remote:\r\n sizes = [data[self.keys[index]]['size']\r\n for index in range(start, stop)]\r\n types = [data[self.keys[index]]['type']\r\n for index in range(start, stop)]\r\n else:\r\n sizes = [get_size(data[self.keys[index]])\r\n for index in range(start, stop)]\r\n types = [get_human_readable_type(data[self.keys[index]])\r\n for index in range(start, stop)]\r\n\r\n if fetch_more:\r\n self.sizes = self.sizes + sizes\r\n self.types = self.types + types\r\n else:\r\n self.sizes = sizes\r\n self.types = types\r\n\r\n def sort(self, column, order=Qt.AscendingOrder):\r\n \"\"\"Overriding sort method\"\"\"\r\n reverse = (order==Qt.DescendingOrder)\r\n if column == 0:\r\n self.sizes = sort_against(self.sizes, self.keys, reverse)\r\n self.types = sort_against(self.types, self.keys, reverse)\r\n try:\r\n self.keys.sort(reverse=reverse)\r\n except:\r\n pass\r\n elif column == 1:\r\n self.keys[:self.rows_loaded] = sort_against(self.keys, self.types,\r\n reverse)\r\n self.sizes = sort_against(self.sizes, self.types, reverse)\r\n try:\r\n self.types.sort(reverse=reverse)\r\n except:\r\n pass\r\n elif column == 2:\r\n self.keys[:self.rows_loaded] = sort_against(self.keys, self.sizes,\r\n reverse)\r\n self.types = sort_against(self.types, self.sizes, reverse)\r\n try:\r\n self.sizes.sort(reverse=reverse)\r\n except:\r\n pass\r\n elif column == 3:\r\n values = [self._data[key] for key in self.keys]\r\n self.keys = sort_against(self.keys, values, reverse)\r\n self.sizes = sort_against(self.sizes, values, reverse)\r\n self.types = sort_against(self.types, values, reverse)\r\n self.beginResetModel()\r\n self.endResetModel()\r\n\r\n def columnCount(self, qindex=QModelIndex()):\r\n \"\"\"Array column number\"\"\"\r\n return 4\r\n\r\n def rowCount(self, index=QModelIndex()):\r\n \"\"\"Array row number\"\"\"\r\n if self.total_rows <= self.rows_loaded:\r\n return self.total_rows\r\n else:\r\n return self.rows_loaded\r\n \r\n def canFetchMore(self, index=QModelIndex()):\r\n if self.total_rows > self.rows_loaded:\r\n return True\r\n else:\r\n return False\r\n \r\n def fetchMore(self, index=QModelIndex()):\r\n reminder = self.total_rows - self.rows_loaded\r\n items_to_fetch = min(reminder, ROWS_TO_LOAD)\r\n self.set_size_and_type(self.rows_loaded,\r\n self.rows_loaded + items_to_fetch)\r\n self.beginInsertRows(QModelIndex(), self.rows_loaded,\r\n self.rows_loaded + items_to_fetch - 1)\r\n self.rows_loaded += items_to_fetch\r\n self.endInsertRows()\r\n \r\n def get_index_from_key(self, key):\r\n try:\r\n return self.createIndex(self.keys.index(key), 0)\r\n except (RuntimeError, ValueError):\r\n return QModelIndex()\r\n \r\n def get_key(self, index):\r\n \"\"\"Return current key\"\"\"\r\n return self.keys[index.row()]\r\n \r\n def get_value(self, index):\r\n \"\"\"Return current value\"\"\"\r\n if index.column() == 0:\r\n return self.keys[ index.row() ]\r\n elif index.column() == 1:\r\n return self.types[ index.row() ]\r\n elif index.column() == 2:\r\n return self.sizes[ index.row() ]\r\n else:\r\n return self._data[ self.keys[index.row()] ]\r\n\r\n def get_bgcolor(self, index):\r\n \"\"\"Background color depending on value\"\"\"\r\n if index.column() == 0:\r\n color = QColor(Qt.lightGray)\r\n color.setAlphaF(.05)\r\n elif index.column() < 3:\r\n color = QColor(Qt.lightGray)\r\n color.setAlphaF(.2)\r\n else:\r\n color = QColor(Qt.lightGray)\r\n color.setAlphaF(.3)\r\n return color\r\n\r\n def data(self, index, role=Qt.DisplayRole):\r\n \"\"\"Cell content\"\"\"\r\n if not index.isValid():\r\n return to_qvariant()\r\n value = self.get_value(index)\r\n if index.column() == 3 and self.remote:\r\n value = value['view']\r\n if index.column() == 3:\r\n display = value_to_display(value, minmax=self.minmax)\r\n else:\r\n if is_type_text_string(value):\r\n display = to_text_string(value, encoding=\"utf-8\")\r\n else:\r\n display = to_text_string(value)\r\n if role == Qt.DisplayRole:\r\n return to_qvariant(display)\r\n elif role == Qt.EditRole:\r\n return to_qvariant(value_to_display(value))\r\n elif role == Qt.TextAlignmentRole:\r\n if index.column() == 3:\r\n if len(display.splitlines()) < 3:\r\n return to_qvariant(int(Qt.AlignLeft|Qt.AlignVCenter))\r\n else:\r\n return to_qvariant(int(Qt.AlignLeft|Qt.AlignTop))\r\n else:\r\n return to_qvariant(int(Qt.AlignLeft|Qt.AlignVCenter))\r\n elif role == Qt.BackgroundColorRole:\r\n return to_qvariant( self.get_bgcolor(index) )\r\n elif role == Qt.FontRole:\r\n return to_qvariant(get_font(font_size_delta=DEFAULT_SMALL_DELTA))\r\n return to_qvariant()\r\n\r\n def headerData(self, section, orientation, role=Qt.DisplayRole):\r\n \"\"\"Overriding method headerData\"\"\"\r\n if role != Qt.DisplayRole:\r\n return to_qvariant()\r\n i_column = int(section)\r\n if orientation == Qt.Horizontal:\r\n headers = (self.header0, _(\"Type\"), _(\"Size\"), _(\"Value\"))\r\n return to_qvariant( headers[i_column] )\r\n else:\r\n return to_qvariant()\r\n\r\n def flags(self, index):\r\n \"\"\"Overriding method flags\"\"\"\r\n # This method was implemented in CollectionsModel only, but to enable\r\n # tuple exploration (even without editing), this method was moved here\r\n if not index.isValid():\r\n return Qt.ItemIsEnabled\r\n return Qt.ItemFlags(QAbstractTableModel.flags(self, index)|\r\n Qt.ItemIsEditable)\r\n def reset(self):\r\n self.beginResetModel()\r\n self.endResetModel()\r\n\r\n\r\nclass CollectionsModel(ReadOnlyCollectionsModel):\r\n \"\"\"Collections Table Model\"\"\"\r\n\r\n def set_value(self, index, value):\r\n \"\"\"Set value\"\"\"\r\n self._data[ self.keys[index.row()] ] = value\r\n self.showndata[ self.keys[index.row()] ] = value\r\n self.sizes[index.row()] = get_size(value)\r\n self.types[index.row()] = get_human_readable_type(value)\r\n self.sig_setting_data.emit()\r\n\r\n def get_bgcolor(self, index):\r\n \"\"\"Background color depending on value\"\"\"\r\n value = self.get_value(index)\r\n if index.column() < 3:\r\n color = ReadOnlyCollectionsModel.get_bgcolor(self, index)\r\n else:\r\n if self.remote:\r\n color_name = value['color']\r\n else:\r\n color_name = get_color_name(value)\r\n color = QColor(color_name)\r\n color.setAlphaF(.2)\r\n return color\r\n\r\n def setData(self, index, value, role=Qt.EditRole):\r\n \"\"\"Cell content change\"\"\"\r\n if not index.isValid():\r\n return False\r\n if index.column() < 3:\r\n return False\r\n value = display_to_value(value, self.get_value(index),\r\n ignore_errors=True)\r\n self.set_value(index, value)\r\n self.dataChanged.emit(index, index)\r\n return True\r\n\r\n\r\nclass CollectionsDelegate(QItemDelegate):\r\n \"\"\"CollectionsEditor Item Delegate\"\"\"\r\n sig_free_memory = Signal()\r\n\r\n def __init__(self, parent=None):\r\n QItemDelegate.__init__(self, parent)\r\n self._editors = {} # keep references on opened editors\r\n \r\n def get_value(self, index):\r\n if index.isValid():\r\n return index.model().get_value(index)\r\n \r\n def set_value(self, index, value):\r\n if index.isValid():\r\n index.model().set_value(index, value)\r\n\r\n def show_warning(self, index):\r\n \"\"\"\r\n Decide if showing a warning when the user is trying to view\r\n a big variable associated to a Tablemodel index\r\n\r\n This avoids getting the variables' value to know its\r\n size and type, using instead those already computed by\r\n the TableModel.\r\n \r\n The problem is when a variable is too big, it can take a\r\n lot of time just to get its value\r\n \"\"\"\r\n try:\r\n val_size = index.model().sizes[index.row()]\r\n val_type = index.model().types[index.row()]\r\n except:\r\n return False\r\n if val_type in ['list', 'set', 'tuple', 'dict'] and \\\r\n int(val_size) > 1e5:\r\n return True\r\n else:\r\n return False\r\n\r\n def createEditor(self, parent, option, index):\r\n \"\"\"Overriding method createEditor\"\"\"\r\n if index.column() < 3:\r\n return None\r\n if self.show_warning(index):\r\n answer = QMessageBox.warning(self.parent(), _(\"Warning\"),\r\n _(\"Opening this variable can be slow\\n\\n\"\r\n \"Do you want to continue anyway?\"),\r\n QMessageBox.Yes | QMessageBox.No)\r\n if answer == QMessageBox.No:\r\n return None\r\n try:\r\n value = self.get_value(index)\r\n if value is None:\r\n return None\r\n except Exception as msg:\r\n QMessageBox.critical(self.parent(), _(\"Error\"),\r\n _(\"Spyder was unable to retrieve the value of \"\r\n \"this variable from the console.<br><br>\"\r\n \"The error mesage was:<br>\"\r\n \"<i>%s</i>\"\r\n ) % to_text_string(msg))\r\n return\r\n key = index.model().get_key(index)\r\n readonly = (isinstance(value, (tuple, set)) or self.parent().readonly\r\n or not is_known_type(value))\r\n # CollectionsEditor for a list, tuple, dict, etc.\r\n if isinstance(value, (list, set, tuple, dict)):\r\n editor = CollectionsEditor(parent=parent)\r\n editor.setup(value, key, icon=self.parent().windowIcon(),\r\n readonly=readonly)\r\n self.create_dialog(editor, dict(model=index.model(), editor=editor,\r\n key=key, readonly=readonly))\r\n return None\r\n # ArrayEditor for a Numpy array\r\n elif isinstance(value, (ndarray, MaskedArray)) \\\r\n and ndarray is not FakeObject:\r\n editor = ArrayEditor(parent=parent)\r\n if not editor.setup_and_check(value, title=key, readonly=readonly):\r\n return\r\n self.create_dialog(editor, dict(model=index.model(), editor=editor,\r\n key=key, readonly=readonly))\r\n return None\r\n # ArrayEditor for an images\r\n elif isinstance(value, Image) and ndarray is not FakeObject \\\r\n and Image is not FakeObject:\r\n arr = array(value)\r\n editor = ArrayEditor(parent=parent)\r\n if not editor.setup_and_check(arr, title=key, readonly=readonly):\r\n return\r\n conv_func = lambda arr: Image.fromarray(arr, mode=value.mode)\r\n self.create_dialog(editor, dict(model=index.model(), editor=editor,\r\n key=key, readonly=readonly,\r\n conv=conv_func))\r\n return None\r\n # DataFrameEditor for a pandas dataframe, series or index\r\n elif isinstance(value, (DataFrame, Index, Series)) \\\r\n and DataFrame is not FakeObject:\r\n editor = DataFrameEditor(parent=parent)\r\n if not editor.setup_and_check(value, title=key):\r\n return\r\n editor.dataModel.set_format(index.model().dataframe_format)\r\n editor.sig_option_changed.connect(self.change_option)\r\n self.create_dialog(editor, dict(model=index.model(), editor=editor,\r\n key=key, readonly=readonly))\r\n return None\r\n # QDateEdit and QDateTimeEdit for a dates or datetime respectively\r\n elif isinstance(value, datetime.date):\r\n if readonly:\r\n return None\r\n else:\r\n if isinstance(value, datetime.datetime):\r\n editor = QDateTimeEdit(value, parent=parent)\r\n else:\r\n editor = QDateEdit(value, parent=parent)\r\n editor.setCalendarPopup(True)\r\n editor.setFont(get_font(font_size_delta=DEFAULT_SMALL_DELTA))\r\n return editor\r\n # TextEditor for a long string\r\n elif is_text_string(value) and len(value) > 40:\r\n te = TextEditor(None, parent=parent)\r\n if te.setup_and_check(value):\r\n editor = TextEditor(value, key,\r\n readonly=readonly, parent=parent)\r\n self.create_dialog(editor, dict(model=index.model(),\r\n editor=editor, key=key,\r\n readonly=readonly))\r\n return None\r\n # QLineEdit for an individual value (int, float, short string, etc)\r\n elif is_editable_type(value):\r\n if readonly:\r\n return None\r\n else:\r\n editor = QLineEdit(parent=parent)\r\n editor.setFont(get_font(font_size_delta=DEFAULT_SMALL_DELTA))\r\n editor.setAlignment(Qt.AlignLeft)\r\n # This is making Spyder crash because the QLineEdit that it's\r\n # been modified is removed and a new one is created after\r\n # evaluation. So the object on which this method is trying to\r\n # act doesn't exist anymore.\r\n # editor.returnPressed.connect(self.commitAndCloseEditor)\r\n return editor\r\n # CollectionsEditor for an arbitrary Python object\r\n else:\r\n editor = CollectionsEditor(parent=parent)\r\n editor.setup(value, key, icon=self.parent().windowIcon(),\r\n readonly=readonly)\r\n self.create_dialog(editor, dict(model=index.model(), editor=editor,\r\n key=key, readonly=readonly))\r\n return None\r\n\r\n def create_dialog(self, editor, data):\r\n self._editors[id(editor)] = data\r\n editor.accepted.connect(\r\n lambda eid=id(editor): self.editor_accepted(eid))\r\n editor.rejected.connect(\r\n lambda eid=id(editor): self.editor_rejected(eid))\r\n editor.show()\r\n \r\n @Slot(str, object)\r\n def change_option(self, option_name, new_value):\r\n \"\"\"\r\n Change configuration option.\r\n\r\n This function is called when a `sig_option_changed` signal is received.\r\n At the moment, this signal can only come from a DataFrameEditor.\r\n \"\"\"\r\n if option_name == 'dataframe_format':\r\n self.parent().set_dataframe_format(new_value)\r\n\r\n def editor_accepted(self, editor_id):\r\n data = self._editors[editor_id]\r\n if not data['readonly']:\r\n index = data['model'].get_index_from_key(data['key'])\r\n value = data['editor'].get_value()\r\n conv_func = data.get('conv', lambda v: v)\r\n self.set_value(index, conv_func(value))\r\n # This is needed to avoid the problem reported on\r\n # issue 8557\r\n try:\r\n self._editors.pop(editor_id)\r\n except KeyError:\r\n pass\r\n self.free_memory()\r\n \r\n def editor_rejected(self, editor_id):\r\n # This is needed to avoid the problem reported on\r\n # issue 8557\r\n try:\r\n self._editors.pop(editor_id)\r\n except KeyError:\r\n pass\r\n self.free_memory()\r\n\r\n def free_memory(self):\r\n \"\"\"Free memory after closing an editor.\"\"\"\r\n try:\r\n self.sig_free_memory.emit()\r\n except RuntimeError:\r\n pass\r\n\r\n def commitAndCloseEditor(self):\r\n \"\"\"Overriding method commitAndCloseEditor\"\"\"\r\n editor = self.sender()\r\n # Avoid a segfault with PyQt5. Variable value won't be changed\r\n # but at least Spyder won't crash. It seems generated by a bug in sip.\r\n try:\r\n self.commitData.emit(editor)\r\n except AttributeError:\r\n pass\r\n self.closeEditor.emit(editor, QAbstractItemDelegate.NoHint)\r\n\r\n def setEditorData(self, editor, index):\r\n \"\"\"\r\n Overriding method setEditorData\r\n Model --> Editor\r\n \"\"\"\r\n value = self.get_value(index)\r\n if isinstance(editor, QLineEdit):\r\n if is_binary_string(value):\r\n try:\r\n value = to_text_string(value, 'utf8')\r\n except:\r\n pass\r\n if not is_text_string(value):\r\n value = repr(value)\r\n editor.setText(value)\r\n elif isinstance(editor, QDateEdit):\r\n editor.setDate(value)\r\n elif isinstance(editor, QDateTimeEdit):\r\n editor.setDateTime(QDateTime(value.date(), value.time()))\r\n\r\n def setModelData(self, editor, model, index):\r\n \"\"\"\r\n Overriding method setModelData\r\n Editor --> Model\r\n \"\"\"\r\n if not hasattr(model, \"set_value\"):\r\n # Read-only mode\r\n return\r\n \r\n if isinstance(editor, QLineEdit):\r\n value = editor.text()\r\n try:\r\n value = display_to_value(to_qvariant(value),\r\n self.get_value(index),\r\n ignore_errors=False)\r\n except Exception as msg:\r\n raise\r\n QMessageBox.critical(editor, _(\"Edit item\"),\r\n _(\"<b>Unable to assign data to item.</b>\"\r\n \"<br><br>Error message:<br>%s\"\r\n ) % str(msg))\r\n return\r\n elif isinstance(editor, QDateEdit):\r\n qdate = editor.date()\r\n value = datetime.date( qdate.year(), qdate.month(), qdate.day() )\r\n elif isinstance(editor, QDateTimeEdit):\r\n qdatetime = editor.dateTime()\r\n qdate = qdatetime.date()\r\n qtime = qdatetime.time()\r\n value = datetime.datetime( qdate.year(), qdate.month(),\r\n qdate.day(), qtime.hour(),\r\n qtime.minute(), qtime.second() )\r\n else:\r\n # Should not happen...\r\n raise RuntimeError(\"Unsupported editor widget\")\r\n self.set_value(index, value)\r\n\r\n\r\nclass BaseHeaderView(QHeaderView):\r\n \"\"\"\r\n A header view for the BaseTableView that emits a signal when the width of\r\n one of its sections is resized by the user.\r\n \"\"\"\r\n sig_user_resized_section = Signal(int, int, int)\r\n\r\n def __init__(self, parent=None):\r\n super(BaseHeaderView, self).__init__(Qt.Horizontal, parent)\r\n self._handle_section_is_pressed = False\r\n self.sectionResized.connect(self.sectionResizeEvent)\r\n\r\n def mousePressEvent(self, e):\r\n self._handle_section_is_pressed = (self.cursor().shape() ==\r\n Qt.SplitHCursor)\r\n super(BaseHeaderView, self).mousePressEvent(e)\r\n\r\n def mouseReleaseEvent(self, e):\r\n self._handle_section_is_pressed = False\r\n super(BaseHeaderView, self).mouseReleaseEvent(e)\r\n\r\n def sectionResizeEvent(self, logicalIndex, oldSize, newSize):\r\n if self._handle_section_is_pressed:\r\n self.sig_user_resized_section.emit(logicalIndex, oldSize, newSize)\r\n\r\n\r\nclass BaseTableView(QTableView):\r\n \"\"\"Base collection editor table view\"\"\"\r\n sig_option_changed = Signal(str, object)\r\n sig_files_dropped = Signal(list)\r\n redirect_stdio = Signal(bool)\r\n sig_free_memory = Signal()\r\n\r\n def __init__(self, parent):\r\n QTableView.__init__(self, parent)\r\n self.array_filename = None\r\n self.menu = None\r\n self.empty_ws_menu = None\r\n self.paste_action = None\r\n self.copy_action = None\r\n self.edit_action = None\r\n self.plot_action = None\r\n self.hist_action = None\r\n self.imshow_action = None\r\n self.save_array_action = None\r\n self.insert_action = None\r\n self.remove_action = None\r\n self.minmax_action = None\r\n self.rename_action = None\r\n self.duplicate_action = None\r\n self.delegate = None\r\n self.setAcceptDrops(True)\r\n self.automatic_column_width = True\r\n self.setHorizontalHeader(BaseHeaderView(parent=self))\r\n self.horizontalHeader().sig_user_resized_section.connect(\r\n self.user_resize_columns)\r\n\r\n def setup_table(self):\r\n \"\"\"Setup table\"\"\"\r\n self.horizontalHeader().setStretchLastSection(True)\r\n self.adjust_columns()\r\n # Sorting columns\r\n self.setSortingEnabled(True)\r\n self.sortByColumn(0, Qt.AscendingOrder)\r\n \r\n def setup_menu(self, minmax):\r\n \"\"\"Setup context menu\"\"\"\r\n if self.minmax_action is not None:\r\n self.minmax_action.setChecked(minmax)\r\n return\r\n \r\n resize_action = create_action(self, _(\"Resize rows to contents\"),\r\n triggered=self.resizeRowsToContents)\r\n resize_columns_action = create_action(\r\n self,\r\n _(\"Resize columns to contents\"),\r\n triggered=self.resize_column_contents)\r\n self.paste_action = create_action(self, _(\"Paste\"),\r\n icon=ima.icon('editpaste'),\r\n triggered=self.paste)\r\n self.copy_action = create_action(self, _(\"Copy\"),\r\n icon=ima.icon('editcopy'),\r\n triggered=self.copy)\r\n self.edit_action = create_action(self, _(\"Edit\"),\r\n icon=ima.icon('edit'),\r\n triggered=self.edit_item)\r\n self.plot_action = create_action(self, _(\"Plot\"),\r\n icon=ima.icon('plot'),\r\n triggered=lambda: self.plot_item('plot'))\r\n self.plot_action.setVisible(False)\r\n self.hist_action = create_action(self, _(\"Histogram\"),\r\n icon=ima.icon('hist'),\r\n triggered=lambda: self.plot_item('hist'))\r\n self.hist_action.setVisible(False)\r\n self.imshow_action = create_action(self, _(\"Show image\"),\r\n icon=ima.icon('imshow'),\r\n triggered=self.imshow_item)\r\n self.imshow_action.setVisible(False)\r\n self.save_array_action = create_action(self, _(\"Save array\"),\r\n icon=ima.icon('filesave'),\r\n triggered=self.save_array)\r\n self.save_array_action.setVisible(False)\r\n self.insert_action = create_action(self, _(\"Insert\"),\r\n icon=ima.icon('insert'),\r\n triggered=self.insert_item)\r\n self.remove_action = create_action(self, _(\"Remove\"),\r\n icon=ima.icon('editdelete'),\r\n triggered=self.remove_item)\r\n self.minmax_action = create_action(self, _(\"Show arrays min/max\"),\r\n toggled=self.toggle_minmax)\r\n self.minmax_action.setChecked(minmax)\r\n self.toggle_minmax(minmax)\r\n self.rename_action = create_action(self, _(\"Rename\"),\r\n icon=ima.icon('rename'),\r\n triggered=self.rename_item)\r\n self.duplicate_action = create_action(self, _(\"Duplicate\"),\r\n icon=ima.icon('edit_add'),\r\n triggered=self.duplicate_item)\r\n menu = QMenu(self)\r\n menu_actions = [self.edit_action, self.plot_action, self.hist_action,\r\n self.imshow_action, self.save_array_action,\r\n self.insert_action, self.remove_action,\r\n self.copy_action, self.paste_action,\r\n None, self.rename_action, self.duplicate_action,\r\n None, resize_action, resize_columns_action]\r\n if ndarray is not FakeObject:\r\n menu_actions.append(self.minmax_action)\r\n add_actions(menu, menu_actions)\r\n self.empty_ws_menu = QMenu(self)\r\n add_actions(self.empty_ws_menu,\r\n [self.insert_action, self.paste_action,\r\n None, resize_action, resize_columns_action])\r\n return menu\r\n \r\n #------ Remote/local API --------------------------------------------------\r\n def remove_values(self, keys):\r\n \"\"\"Remove values from data\"\"\"\r\n raise NotImplementedError\r\n\r\n def copy_value(self, orig_key, new_key):\r\n \"\"\"Copy value\"\"\"\r\n raise NotImplementedError\r\n \r\n def new_value(self, key, value):\r\n \"\"\"Create new value in data\"\"\"\r\n raise NotImplementedError\r\n \r\n def is_list(self, key):\r\n \"\"\"Return True if variable is a list, a set or a tuple\"\"\"\r\n raise NotImplementedError\r\n \r\n def get_len(self, key):\r\n \"\"\"Return sequence length\"\"\"\r\n raise NotImplementedError\r\n \r\n def is_array(self, key):\r\n \"\"\"Return True if variable is a numpy array\"\"\"\r\n raise NotImplementedError\r\n\r\n def is_image(self, key):\r\n \"\"\"Return True if variable is a PIL.Image image\"\"\"\r\n raise NotImplementedError\r\n \r\n def is_dict(self, key):\r\n \"\"\"Return True if variable is a dictionary\"\"\"\r\n raise NotImplementedError\r\n \r\n def get_array_shape(self, key):\r\n \"\"\"Return array's shape\"\"\"\r\n raise NotImplementedError\r\n \r\n def get_array_ndim(self, key):\r\n \"\"\"Return array's ndim\"\"\"\r\n raise NotImplementedError\r\n \r\n def oedit(self, key):\r\n \"\"\"Edit item\"\"\"\r\n raise NotImplementedError\r\n \r\n def plot(self, key, funcname):\r\n \"\"\"Plot item\"\"\"\r\n raise NotImplementedError\r\n \r\n def imshow(self, key):\r\n \"\"\"Show item's image\"\"\"\r\n raise NotImplementedError\r\n \r\n def show_image(self, key):\r\n \"\"\"Show image (item is a PIL image)\"\"\"\r\n raise NotImplementedError\r\n #--------------------------------------------------------------------------\r\n \r\n def refresh_menu(self):\r\n \"\"\"Refresh context menu\"\"\"\r\n index = self.currentIndex()\r\n condition = index.isValid()\r\n self.edit_action.setEnabled( condition )\r\n self.remove_action.setEnabled( condition )\r\n self.refresh_plot_entries(index)\r\n \r\n def refresh_plot_entries(self, index):\r\n if index.isValid():\r\n key = self.model.get_key(index)\r\n is_list = self.is_list(key)\r\n is_array = self.is_array(key) and self.get_len(key) != 0\r\n condition_plot = (is_array and len(self.get_array_shape(key)) <= 2)\r\n condition_hist = (is_array and self.get_array_ndim(key) == 1)\r\n condition_imshow = condition_plot and self.get_array_ndim(key) == 2\r\n condition_imshow = condition_imshow or self.is_image(key)\r\n else:\r\n is_array = condition_plot = condition_imshow = is_list \\\r\n = condition_hist = False\r\n self.plot_action.setVisible(condition_plot or is_list)\r\n self.hist_action.setVisible(condition_hist or is_list)\r\n self.imshow_action.setVisible(condition_imshow)\r\n self.save_array_action.setVisible(is_array)\r\n\r\n def resize_column_contents(self):\r\n \"\"\"Resize columns to contents.\"\"\"\r\n self.automatic_column_width = True\r\n self.adjust_columns()\r\n\r\n def user_resize_columns(self, logical_index, old_size, new_size):\r\n \"\"\"Handle the user resize action.\"\"\"\r\n self.automatic_column_width = False\r\n\r\n def adjust_columns(self):\r\n \"\"\"Resize two first columns to contents\"\"\"\r\n if self.automatic_column_width:\r\n for col in range(3):\r\n self.resizeColumnToContents(col)\r\n\r\n def set_data(self, data):\r\n \"\"\"Set table data\"\"\"\r\n if data is not None:\r\n self.model.set_data(data, self.dictfilter)\r\n self.sortByColumn(0, Qt.AscendingOrder)\r\n\r\n def mousePressEvent(self, event):\r\n \"\"\"Reimplement Qt method\"\"\"\r\n if event.button() != Qt.LeftButton:\r\n QTableView.mousePressEvent(self, event)\r\n return\r\n index_clicked = self.indexAt(event.pos())\r\n if index_clicked.isValid():\r\n if index_clicked == self.currentIndex() \\\r\n and index_clicked in self.selectedIndexes():\r\n self.clearSelection()\r\n else:\r\n QTableView.mousePressEvent(self, event)\r\n else:\r\n self.clearSelection()\r\n event.accept()\r\n \r\n def mouseDoubleClickEvent(self, event):\r\n \"\"\"Reimplement Qt method\"\"\"\r\n index_clicked = self.indexAt(event.pos())\r\n if index_clicked.isValid():\r\n row = index_clicked.row()\r\n # TODO: Remove hard coded \"Value\" column number (3 here)\r\n index_clicked = index_clicked.child(row, 3)\r\n self.edit(index_clicked)\r\n else:\r\n event.accept()\r\n \r\n def keyPressEvent(self, event):\r\n \"\"\"Reimplement Qt methods\"\"\"\r\n if event.key() == Qt.Key_Delete:\r\n self.remove_item()\r\n elif event.key() == Qt.Key_F2:\r\n self.rename_item()\r\n elif event == QKeySequence.Copy:\r\n self.copy()\r\n elif event == QKeySequence.Paste:\r\n self.paste()\r\n else:\r\n QTableView.keyPressEvent(self, event)\r\n \r\n def contextMenuEvent(self, event):\r\n \"\"\"Reimplement Qt method\"\"\"\r\n if self.model.showndata:\r\n self.refresh_menu()\r\n self.menu.popup(event.globalPos())\r\n event.accept()\r\n else:\r\n self.empty_ws_menu.popup(event.globalPos())\r\n event.accept()\r\n\r\n def dragEnterEvent(self, event):\r\n \"\"\"Allow user to drag files\"\"\"\r\n if mimedata2url(event.mimeData()):\r\n event.accept()\r\n else:\r\n event.ignore()\r\n \r\n def dragMoveEvent(self, event):\r\n \"\"\"Allow user to move files\"\"\"\r\n if mimedata2url(event.mimeData()):\r\n event.setDropAction(Qt.CopyAction)\r\n event.accept()\r\n else:\r\n event.ignore()\r\n\r\n def dropEvent(self, event):\r\n \"\"\"Allow user to drop supported files\"\"\"\r\n urls = mimedata2url(event.mimeData())\r\n if urls:\r\n event.setDropAction(Qt.CopyAction)\r\n event.accept()\r\n self.sig_files_dropped.emit(urls)\r\n else:\r\n event.ignore()\r\n\r\n @Slot(bool)\r\n def toggle_minmax(self, state):\r\n \"\"\"Toggle min/max display for numpy arrays\"\"\"\r\n self.sig_option_changed.emit('minmax', state)\r\n self.model.minmax = state\r\n\r\n @Slot(str)\r\n def set_dataframe_format(self, new_format):\r\n \"\"\"\r\n Set format to use in DataframeEditor.\r\n\r\n Args:\r\n new_format (string): e.g. \"%.3f\"\r\n \"\"\"\r\n self.sig_option_changed.emit('dataframe_format', new_format)\r\n self.model.dataframe_format = new_format\r\n\r\n @Slot()\r\n def edit_item(self):\r\n \"\"\"Edit item\"\"\"\r\n index = self.currentIndex()\r\n if not index.isValid():\r\n return\r\n # TODO: Remove hard coded \"Value\" column number (3 here)\r\n self.edit(index.child(index.row(), 3))\r\n\r\n @Slot()\r\n def remove_item(self):\r\n \"\"\"Remove item\"\"\"\r\n indexes = self.selectedIndexes()\r\n if not indexes:\r\n return\r\n for index in indexes:\r\n if not index.isValid():\r\n return\r\n one = _(\"Do you want to remove the selected item?\")\r\n more = _(\"Do you want to remove all selected items?\")\r\n answer = QMessageBox.question(self, _( \"Remove\"),\r\n one if len(indexes) == 1 else more,\r\n QMessageBox.Yes | QMessageBox.No)\r\n if answer == QMessageBox.Yes:\r\n idx_rows = unsorted_unique([idx.row() for idx in indexes])\r\n keys = [ self.model.keys[idx_row] for idx_row in idx_rows ]\r\n self.remove_values(keys)\r\n\r\n def copy_item(self, erase_original=False):\r\n \"\"\"Copy item\"\"\"\r\n indexes = self.selectedIndexes()\r\n if not indexes:\r\n return\r\n idx_rows = unsorted_unique([idx.row() for idx in indexes])\r\n if len(idx_rows) > 1 or not indexes[0].isValid():\r\n return\r\n orig_key = self.model.keys[idx_rows[0]]\r\n if erase_original:\r\n title = _('Rename')\r\n field_text = _('New variable name:')\r\n else:\r\n title = _('Duplicate')\r\n field_text = _('Variable name:')\r\n data = self.model.get_data()\r\n if isinstance(data, (list, set)):\r\n new_key, valid = len(data), True\r\n else:\r\n new_key, valid = QInputDialog.getText(self, title, field_text,\r\n QLineEdit.Normal, orig_key)\r\n if valid and to_text_string(new_key):\r\n new_key = try_to_eval(to_text_string(new_key))\r\n if new_key == orig_key:\r\n return\r\n self.copy_value(orig_key, new_key)\r\n if erase_original:\r\n self.remove_values([orig_key])\r\n\r\n @Slot()\r\n def duplicate_item(self):\r\n \"\"\"Duplicate item\"\"\"\r\n self.copy_item()\r\n\r\n @Slot()\r\n def rename_item(self):\r\n \"\"\"Rename item\"\"\"\r\n self.copy_item(True)\r\n\r\n @Slot()\r\n def insert_item(self):\r\n \"\"\"Insert item\"\"\"\r\n index = self.currentIndex()\r\n if not index.isValid():\r\n row = self.model.rowCount()\r\n else:\r\n row = index.row()\r\n data = self.model.get_data()\r\n if isinstance(data, list):\r\n key = row\r\n data.insert(row, '')\r\n elif isinstance(data, dict):\r\n key, valid = QInputDialog.getText(self, _( 'Insert'), _( 'Key:'),\r\n QLineEdit.Normal)\r\n if valid and to_text_string(key):\r\n key = try_to_eval(to_text_string(key))\r\n else:\r\n return\r\n else:\r\n return\r\n value, valid = QInputDialog.getText(self, _('Insert'), _('Value:'),\r\n QLineEdit.Normal)\r\n if valid and to_text_string(value):\r\n self.new_value(key, try_to_eval(to_text_string(value)))\r\n \r\n def __prepare_plot(self):\r\n try:\r\n import guiqwt.pyplot #analysis:ignore\r\n return True\r\n except:\r\n try:\r\n if 'matplotlib' not in sys.modules:\r\n import matplotlib\r\n matplotlib.use(\"Qt4Agg\")\r\n return True\r\n except:\r\n QMessageBox.warning(self, _(\"Import error\"),\r\n _(\"Please install <b>matplotlib</b>\"\r\n \" or <b>guiqwt</b>.\"))\r\n\r\n def plot_item(self, funcname):\r\n \"\"\"Plot item\"\"\"\r\n index = self.currentIndex()\r\n if self.__prepare_plot():\r\n key = self.model.get_key(index)\r\n try:\r\n self.plot(key, funcname)\r\n except (ValueError, TypeError) as error:\r\n QMessageBox.critical(self, _( \"Plot\"),\r\n _(\"<b>Unable to plot data.</b>\"\r\n \"<br><br>Error message:<br>%s\"\r\n ) % str(error))\r\n\r\n @Slot()\r\n def imshow_item(self):\r\n \"\"\"Imshow item\"\"\"\r\n index = self.currentIndex()\r\n if self.__prepare_plot():\r\n key = self.model.get_key(index)\r\n try:\r\n if self.is_image(key):\r\n self.show_image(key)\r\n else:\r\n self.imshow(key)\r\n except (ValueError, TypeError) as error:\r\n QMessageBox.critical(self, _( \"Plot\"),\r\n _(\"<b>Unable to show image.</b>\"\r\n \"<br><br>Error message:<br>%s\"\r\n ) % str(error))\r\n\r\n @Slot()\r\n def save_array(self):\r\n \"\"\"Save array\"\"\"\r\n title = _( \"Save array\")\r\n if self.array_filename is None:\r\n self.array_filename = getcwd_or_home()\r\n self.redirect_stdio.emit(False)\r\n filename, _selfilter = getsavefilename(self, title,\r\n self.array_filename,\r\n _(\"NumPy arrays\")+\" (*.npy)\")\r\n self.redirect_stdio.emit(True)\r\n if filename:\r\n self.array_filename = filename\r\n data = self.delegate.get_value( self.currentIndex() )\r\n try:\r\n import numpy as np\r\n np.save(self.array_filename, data)\r\n except Exception as error:\r\n QMessageBox.critical(self, title,\r\n _(\"<b>Unable to save array</b>\"\r\n \"<br><br>Error message:<br>%s\"\r\n ) % str(error))\r\n \r\n @Slot()\r\n def copy(self):\r\n \"\"\"Copy text to clipboard\"\"\"\r\n clipboard = QApplication.clipboard()\r\n clipl = []\r\n for idx in self.selectedIndexes():\r\n if not idx.isValid():\r\n continue\r\n obj = self.delegate.get_value(idx)\r\n # Check if we are trying to copy a numpy array, and if so make sure\r\n # to copy the whole thing in a tab separated format\r\n if isinstance(obj, (ndarray, MaskedArray)) \\\r\n and ndarray is not FakeObject:\r\n if PY3:\r\n output = io.BytesIO()\r\n else:\r\n output = io.StringIO()\r\n try:\r\n np_savetxt(output, obj, delimiter='\\t')\r\n except:\r\n QMessageBox.warning(self, _(\"Warning\"),\r\n _(\"It was not possible to copy \"\r\n \"this array\"))\r\n return\r\n obj = output.getvalue().decode('utf-8')\r\n output.close()\r\n elif isinstance(obj, (DataFrame, Series)) \\\r\n and DataFrame is not FakeObject:\r\n output = io.StringIO()\r\n try:\r\n obj.to_csv(output, sep='\\t', index=True, header=True)\r\n except Exception:\r\n QMessageBox.warning(self, _(\"Warning\"),\r\n _(\"It was not possible to copy \"\r\n \"this dataframe\"))\r\n return\r\n if PY3:\r\n obj = output.getvalue()\r\n else:\r\n obj = output.getvalue().decode('utf-8')\r\n output.close()\r\n elif is_binary_string(obj):\r\n obj = to_text_string(obj, 'utf8')\r\n else:\r\n obj = to_text_string(obj)\r\n clipl.append(obj)\r\n clipboard.setText('\\n'.join(clipl))\r\n\r\n def import_from_string(self, text, title=None):\r\n \"\"\"Import data from string\"\"\"\r\n data = self.model.get_data()\r\n # Check if data is a dict\r\n if not hasattr(data, \"keys\"):\r\n return\r\n editor = ImportWizard(self, text, title=title,\r\n contents_title=_(\"Clipboard contents\"),\r\n varname=fix_reference_name(\"data\",\r\n blacklist=list(data.keys())))\r\n if editor.exec_():\r\n var_name, clip_data = editor.get_data()\r\n self.new_value(var_name, clip_data)\r\n\r\n @Slot()\r\n def paste(self):\r\n \"\"\"Import text/data/code from clipboard\"\"\"\r\n clipboard = QApplication.clipboard()\r\n cliptext = ''\r\n if clipboard.mimeData().hasText():\r\n cliptext = to_text_string(clipboard.text())\r\n if cliptext.strip():\r\n self.import_from_string(cliptext, title=_(\"Import from clipboard\"))\r\n else:\r\n QMessageBox.warning(self, _( \"Empty clipboard\"),\r\n _(\"Nothing to be imported from clipboard.\"))\r\n\r\n\r\nclass CollectionsEditorTableView(BaseTableView):\r\n \"\"\"CollectionsEditor table view\"\"\"\r\n def __init__(self, parent, data, readonly=False, title=\"\",\r\n names=False, minmax=False):\r\n BaseTableView.__init__(self, parent)\r\n self.dictfilter = None\r\n self.readonly = readonly or isinstance(data, (tuple, set))\r\n CollectionsModelClass = ReadOnlyCollectionsModel if self.readonly \\\r\n else CollectionsModel\r\n self.model = CollectionsModelClass(self, data, title, names=names,\r\n minmax=minmax)\r\n self.setModel(self.model)\r\n self.delegate = CollectionsDelegate(self)\r\n self.setItemDelegate(self.delegate)\r\n\r\n self.setup_table()\r\n self.menu = self.setup_menu(minmax)\r\n\r\n if isinstance(data, set):\r\n self.horizontalHeader().hideSection(0)\r\n\r\n #------ Remote/local API --------------------------------------------------\r\n def remove_values(self, keys):\r\n \"\"\"Remove values from data\"\"\"\r\n data = self.model.get_data()\r\n for key in sorted(keys, reverse=True):\r\n data.pop(key)\r\n self.set_data(data)\r\n\r\n def copy_value(self, orig_key, new_key):\r\n \"\"\"Copy value\"\"\"\r\n data = self.model.get_data()\r\n if isinstance(data, list):\r\n data.append(data[orig_key])\r\n if isinstance(data, set):\r\n data.add(data[orig_key])\r\n else:\r\n data[new_key] = data[orig_key]\r\n self.set_data(data)\r\n \r\n def new_value(self, key, value):\r\n \"\"\"Create new value in data\"\"\"\r\n data = self.model.get_data()\r\n data[key] = value\r\n self.set_data(data)\r\n \r\n def is_list(self, key):\r\n \"\"\"Return True if variable is a list or a tuple\"\"\"\r\n data = self.model.get_data()\r\n return isinstance(data[key], (tuple, list))\r\n\r\n def is_set(self, key):\r\n \"\"\"Return True if variable is a set\"\"\"\r\n data = self.model.get_data()\r\n return isinstance(data[key], set)\r\n\r\n def get_len(self, key):\r\n \"\"\"Return sequence length\"\"\"\r\n data = self.model.get_data()\r\n return len(data[key])\r\n \r\n def is_array(self, key):\r\n \"\"\"Return True if variable is a numpy array\"\"\"\r\n data = self.model.get_data()\r\n return isinstance(data[key], (ndarray, MaskedArray))\r\n \r\n def is_image(self, key):\r\n \"\"\"Return True if variable is a PIL.Image image\"\"\"\r\n data = self.model.get_data()\r\n return isinstance(data[key], Image)\r\n \r\n def is_dict(self, key):\r\n \"\"\"Return True if variable is a dictionary\"\"\"\r\n data = self.model.get_data()\r\n return isinstance(data[key], dict)\r\n \r\n def get_array_shape(self, key):\r\n \"\"\"Return array's shape\"\"\"\r\n data = self.model.get_data()\r\n return data[key].shape\r\n \r\n def get_array_ndim(self, key):\r\n \"\"\"Return array's ndim\"\"\"\r\n data = self.model.get_data()\r\n return data[key].ndim\r\n\r\n def oedit(self, key):\r\n \"\"\"Edit item\"\"\"\r\n data = self.model.get_data()\r\n from spyder.plugins.variableexplorer.widgets.objecteditor import (\r\n oedit)\r\n oedit(data[key])\r\n\r\n def plot(self, key, funcname):\r\n \"\"\"Plot item\"\"\"\r\n data = self.model.get_data()\r\n import spyder.pyplot as plt\r\n plt.figure()\r\n getattr(plt, funcname)(data[key])\r\n plt.show()\r\n \r\n def imshow(self, key):\r\n \"\"\"Show item's image\"\"\"\r\n data = self.model.get_data()\r\n import spyder.pyplot as plt\r\n plt.figure()\r\n plt.imshow(data[key])\r\n plt.show()\r\n \r\n def show_image(self, key):\r\n \"\"\"Show image (item is a PIL image)\"\"\"\r\n data = self.model.get_data()\r\n data[key].show()\r\n #--------------------------------------------------------------------------\r\n\r\n def refresh_menu(self):\r\n \"\"\"Refresh context menu\"\"\"\r\n data = self.model.get_data()\r\n index = self.currentIndex()\r\n condition = (not isinstance(data, (tuple, set))) and index.isValid() \\\r\n and not self.readonly\r\n self.edit_action.setEnabled( condition )\r\n self.remove_action.setEnabled( condition )\r\n self.insert_action.setEnabled( not self.readonly )\r\n self.duplicate_action.setEnabled(condition)\r\n condition_rename = not isinstance(data, (tuple, list, set))\r\n self.rename_action.setEnabled(condition_rename)\r\n self.refresh_plot_entries(index)\r\n \r\n def set_filter(self, dictfilter=None):\r\n \"\"\"Set table dict filter\"\"\"\r\n self.dictfilter = dictfilter\r\n\r\n\r\nclass CollectionsEditorWidget(QWidget):\r\n \"\"\"Dictionary Editor Widget\"\"\"\r\n def __init__(self, parent, data, readonly=False, title=\"\", remote=False):\r\n QWidget.__init__(self, parent)\r\n if remote:\r\n self.editor = RemoteCollectionsEditorTableView(self, data, readonly)\r\n else:\r\n self.editor = CollectionsEditorTableView(self, data, readonly,\r\n title)\r\n layout = QVBoxLayout()\r\n layout.addWidget(self.editor)\r\n self.setLayout(layout)\r\n \r\n def set_data(self, data):\r\n \"\"\"Set DictEditor data\"\"\"\r\n self.editor.set_data(data)\r\n \r\n def get_title(self):\r\n \"\"\"Get model title\"\"\"\r\n return self.editor.model.title\r\n\r\n\r\nclass CollectionsEditor(QDialog):\r\n \"\"\"Collections Editor Dialog\"\"\"\r\n def __init__(self, parent=None):\r\n QDialog.__init__(self, parent)\r\n\r\n # Destroying the C++ object right after closing the dialog box,\r\n # otherwise it may be garbage-collected in another QThread\r\n # (e.g. the editor's analysis thread in Spyder), thus leading to\r\n # a segmentation fault on UNIX or an application crash on Windows\r\n self.setAttribute(Qt.WA_DeleteOnClose)\r\n\r\n self.data_copy = None\r\n self.widget = None\r\n self.btn_save_and_close = None\r\n self.btn_close = None\r\n\r\n def setup(self, data, title='', readonly=False, width=650, remote=False,\r\n icon=None, parent=None):\r\n \"\"\"Setup editor.\"\"\"\r\n if isinstance(data, (dict, set)):\r\n # dictionnary, set\r\n self.data_copy = data.copy()\r\n datalen = len(data)\r\n elif isinstance(data, (tuple, list)):\r\n # list, tuple\r\n self.data_copy = data[:]\r\n datalen = len(data)\r\n else:\r\n # unknown object\r\n import copy\r\n try:\r\n self.data_copy = copy.deepcopy(data)\r\n except NotImplementedError:\r\n self.data_copy = copy.copy(data)\r\n except (TypeError, AttributeError):\r\n readonly = True\r\n self.data_copy = data\r\n datalen = len(get_object_attrs(data))\r\n\r\n # If the copy has a different type, then do not allow editing, because\r\n # this would change the type after saving; cf. issue #6936\r\n if type(self.data_copy) != type(data):\r\n readonly = True\r\n\r\n self.widget = CollectionsEditorWidget(self, self.data_copy,\r\n title=title, readonly=readonly,\r\n remote=remote)\r\n self.widget.editor.model.sig_setting_data.connect(\r\n self.save_and_close_enable)\r\n layout = QVBoxLayout()\r\n layout.addWidget(self.widget)\r\n self.setLayout(layout)\r\n\r\n # Buttons configuration\r\n btn_layout = QHBoxLayout()\r\n btn_layout.addStretch()\r\n\r\n if not readonly:\r\n self.btn_save_and_close = QPushButton(_('Save and Close'))\r\n self.btn_save_and_close.setDisabled(True)\r\n self.btn_save_and_close.clicked.connect(self.accept)\r\n btn_layout.addWidget(self.btn_save_and_close)\r\n\r\n self.btn_close = QPushButton(_('Close'))\r\n self.btn_close.setAutoDefault(True)\r\n self.btn_close.setDefault(True)\r\n self.btn_close.clicked.connect(self.reject)\r\n btn_layout.addWidget(self.btn_close)\r\n\r\n layout.addLayout(btn_layout)\r\n\r\n constant = 121\r\n row_height = 30\r\n error_margin = 10\r\n height = constant + row_height * min([10, datalen]) + error_margin\r\n self.resize(width, height)\r\n\r\n self.setWindowTitle(self.widget.get_title())\r\n if icon is None:\r\n self.setWindowIcon(ima.icon('dictedit'))\r\n # Make the dialog act as a window\r\n self.setWindowFlags(Qt.Window)\r\n\r\n @Slot()\r\n def save_and_close_enable(self):\r\n \"\"\"Handle the data change event to enable the save and close button.\"\"\"\r\n if self.btn_save_and_close:\r\n self.btn_save_and_close.setEnabled(True)\r\n self.btn_save_and_close.setAutoDefault(True)\r\n self.btn_save_and_close.setDefault(True)\r\n\r\n def get_value(self):\r\n \"\"\"Return modified copy of dictionary or list\"\"\"\r\n # It is import to avoid accessing Qt C++ object as it has probably\r\n # already been destroyed, due to the Qt.WA_DeleteOnClose attribute\r\n return self.data_copy\r\n\r\n\r\n#==============================================================================\r\n# Remote versions of CollectionsDelegate and CollectionsEditorTableView\r\n#==============================================================================\r\nclass RemoteCollectionsDelegate(CollectionsDelegate):\r\n \"\"\"CollectionsEditor Item Delegate\"\"\"\r\n def __init__(self, parent=None):\r\n CollectionsDelegate.__init__(self, parent)\r\n\r\n def get_value(self, index):\r\n if index.isValid():\r\n name = index.model().keys[index.row()]\r\n return self.parent().get_value(name)\r\n \r\n def set_value(self, index, value):\r\n if index.isValid():\r\n name = index.model().keys[index.row()]\r\n self.parent().new_value(name, value)\r\n\r\n\r\nclass RemoteCollectionsEditorTableView(BaseTableView):\r\n \"\"\"DictEditor table view\"\"\"\r\n def __init__(self, parent, data, minmax=False, shellwidget=None,\r\n remote_editing=False, dataframe_format=None):\r\n BaseTableView.__init__(self, parent)\r\n\r\n self.shellwidget = shellwidget\r\n self.var_properties = {}\r\n\r\n self.dictfilter = None\r\n self.model = None\r\n self.delegate = None\r\n self.readonly = False\r\n self.model = CollectionsModel(self, data, names=True,\r\n minmax=minmax,\r\n dataframe_format=dataframe_format,\r\n remote=True)\r\n self.setModel(self.model)\r\n\r\n self.delegate = RemoteCollectionsDelegate(self)\r\n self.delegate.sig_free_memory.connect(self.sig_free_memory.emit)\r\n self.setItemDelegate(self.delegate)\r\n\r\n self.setup_table()\r\n self.menu = self.setup_menu(minmax)\r\n\r\n #------ Remote/local API --------------------------------------------------\r\n def get_value(self, name):\r\n \"\"\"Get the value of a variable\"\"\"\r\n value = self.shellwidget.get_value(name)\r\n # Reset temporal variable where value is saved to\r\n # save memory\r\n self.shellwidget._kernel_value = None\r\n return value\r\n\r\n def new_value(self, name, value):\r\n \"\"\"Create new value in data\"\"\"\r\n try:\r\n # We need to enclose values in a list to be able to send\r\n # them to the kernel in Python 2\r\n svalue = [cloudpickle.dumps(value, protocol=PICKLE_PROTOCOL)]\r\n\r\n # Needed to prevent memory leaks. See issue 7158\r\n if len(svalue) < MAX_SERIALIZED_LENGHT:\r\n self.shellwidget.set_value(name, svalue)\r\n else:\r\n QMessageBox.warning(self, _(\"Warning\"),\r\n _(\"The object you are trying to modify is \"\r\n \"too big to be sent back to the kernel. \"\r\n \"Therefore, your modifications won't \"\r\n \"take place.\"))\r\n except TypeError as e:\r\n QMessageBox.critical(self, _(\"Error\"),\r\n \"TypeError: %s\" % to_text_string(e))\r\n self.shellwidget.refresh_namespacebrowser()\r\n\r\n def remove_values(self, names):\r\n \"\"\"Remove values from data\"\"\"\r\n for name in names:\r\n self.shellwidget.remove_value(name)\r\n self.shellwidget.refresh_namespacebrowser()\r\n\r\n def copy_value(self, orig_name, new_name):\r\n \"\"\"Copy value\"\"\"\r\n self.shellwidget.copy_value(orig_name, new_name)\r\n self.shellwidget.refresh_namespacebrowser()\r\n\r\n def is_list(self, name):\r\n \"\"\"Return True if variable is a list, a tuple or a set\"\"\"\r\n return self.var_properties[name]['is_list']\r\n\r\n def is_dict(self, name):\r\n \"\"\"Return True if variable is a dictionary\"\"\"\r\n return self.var_properties[name]['is_dict']\r\n\r\n def get_len(self, name):\r\n \"\"\"Return sequence length\"\"\"\r\n return self.var_properties[name]['len']\r\n\r\n def is_array(self, name):\r\n \"\"\"Return True if variable is a NumPy array\"\"\"\r\n return self.var_properties[name]['is_array']\r\n\r\n def is_image(self, name):\r\n \"\"\"Return True if variable is a PIL.Image image\"\"\"\r\n return self.var_properties[name]['is_image']\r\n\r\n def is_data_frame(self, name):\r\n \"\"\"Return True if variable is a DataFrame\"\"\"\r\n return self.var_properties[name]['is_data_frame']\r\n\r\n def is_series(self, name):\r\n \"\"\"Return True if variable is a Series\"\"\"\r\n return self.var_properties[name]['is_series']\r\n\r\n def get_array_shape(self, name):\r\n \"\"\"Return array's shape\"\"\"\r\n return self.var_properties[name]['array_shape']\r\n\r\n def get_array_ndim(self, name):\r\n \"\"\"Return array's ndim\"\"\"\r\n return self.var_properties[name]['array_ndim']\r\n\r\n def plot(self, name, funcname):\r\n \"\"\"Plot item\"\"\"\r\n sw = self.shellwidget\r\n if sw._reading:\r\n sw.dbg_exec_magic('varexp', '--%s %s' % (funcname, name))\r\n else:\r\n sw.execute(\"%%varexp --%s %s\" % (funcname, name))\r\n\r\n def imshow(self, name):\r\n \"\"\"Show item's image\"\"\"\r\n sw = self.shellwidget\r\n if sw._reading:\r\n sw.dbg_exec_magic('varexp', '--imshow %s' % name)\r\n else:\r\n sw.execute(\"%%varexp --imshow %s\" % name)\r\n\r\n def show_image(self, name):\r\n \"\"\"Show image (item is a PIL image)\"\"\"\r\n command = \"%s.show()\" % name\r\n sw = self.shellwidget\r\n if sw._reading:\r\n sw.kernel_client.input(command)\r\n else:\r\n sw.execute(command)\r\n\r\n # -------------------------------------------------------------------------\r\n\r\n def setup_menu(self, minmax):\r\n \"\"\"Setup context menu.\"\"\"\r\n menu = BaseTableView.setup_menu(self, minmax)\r\n return menu\r\n\r\n\r\n# =============================================================================\r\n# Tests\r\n# =============================================================================\r\ndef get_test_data():\r\n \"\"\"Create test data.\"\"\"\r\n import numpy as np\r\n from spyder.pil_patch import Image\r\n image = Image.fromarray(np.random.randint(256, size=(100, 100)),\r\n mode='P')\r\n testdict = {'d': 1, 'a': np.random.rand(10, 10), 'b': [1, 2]}\r\n testdate = datetime.date(1945, 5, 8)\r\n test_timedelta = datetime.timedelta(days=-1, minutes=42, seconds=13)\r\n\r\n try:\r\n import pandas as pd\r\n except (ModuleNotFoundError, ImportError):\r\n test_timestamp, test_pd_td, test_dtindex, test_series, test_df = None\r\n else:\r\n test_timestamp = pd.Timestamp(\"1945-05-08T23:01:00.12345\")\r\n test_pd_td = pd.Timedelta(days=2193, hours=12)\r\n test_dtindex = pd.DatetimeIndex(start=\"1939-09-01T\",\r\n end=\"1939-10-06\",\r\n freq=\"12H\")\r\n test_series = pd.Series({\"series_name\": [0, 1, 2, 3, 4, 5]})\r\n test_df = pd.DataFrame({\"string_col\": [\"a\", \"b\", \"c\", \"d\"],\r\n \"int_col\": [0, 1, 2, 3],\r\n \"float_col\": [1.1, 2.2, 3.3, 4.4],\r\n \"bool_col\": [True, False, False, True]})\r\n\r\n class Foobar(object):\r\n\r\n def __init__(self):\r\n self.text = \"toto\"\r\n self.testdict = testdict\r\n self.testdate = testdate\r\n\r\n foobar = Foobar()\r\n return {'object': foobar,\r\n 'module': np,\r\n 'str': 'kjkj kj k j j kj k jkj',\r\n 'unicode': to_text_string('éù', 'utf-8'),\r\n 'list': [1, 3, [sorted, 5, 6], 'kjkj', None],\r\n 'set': {1, 2, 1, 3, None, 'A', 'B', 'C', True, False},\r\n 'tuple': ([1, testdate, testdict, test_timedelta], 'kjkj', None),\r\n 'dict': testdict,\r\n 'float': 1.2233,\r\n 'int': 223,\r\n 'bool': True,\r\n 'array': np.random.rand(10, 10).astype(np.int64),\r\n 'masked_array': np.ma.array([[1, 0], [1, 0]],\r\n mask=[[True, False], [False, False]]),\r\n '1D-array': np.linspace(-10, 10).astype(np.float16),\r\n '3D-array': np.random.randint(2, size=(5, 5, 5)).astype(np.bool_),\r\n 'empty_array': np.array([]),\r\n 'image': image,\r\n 'date': testdate,\r\n 'datetime': datetime.datetime(1945, 5, 8, 23, 1, 0, int(1.5e5)),\r\n 'timedelta': test_timedelta,\r\n 'complex': 2+1j,\r\n 'complex64': np.complex64(2+1j),\r\n 'complex128': np.complex128(9j),\r\n 'int8_scalar': np.int8(8),\r\n 'int16_scalar': np.int16(16),\r\n 'int32_scalar': np.int32(32),\r\n 'int64_scalar': np.int64(64),\r\n 'float16_scalar': np.float16(16),\r\n 'float32_scalar': np.float32(32),\r\n 'float64_scalar': np.float64(64),\r\n 'bool_scalar': np.bool(8),\r\n 'bool__scalar': np.bool_(8),\r\n 'timestamp': test_timestamp,\r\n 'timedelta_pd': test_pd_td,\r\n 'datetimeindex': test_dtindex,\r\n 'series': test_series,\r\n 'ddataframe': test_df,\r\n 'None': None,\r\n 'unsupported1': np.arccos,\r\n 'unsupported2': np.cast,\r\n # Test for Issue #3518\r\n 'big_struct_array': np.zeros(1000, dtype=[('ID', 'f8'),\r\n ('param1', 'f8', 5000)]),\r\n }\r\n\r\n\r\ndef editor_test():\r\n \"\"\"Test Collections editor.\"\"\"\r\n from spyder.utils.qthelpers import qapplication\r\n\r\n app = qapplication() #analysis:ignore\r\n dialog = CollectionsEditor()\r\n dialog.setup(get_test_data())\r\n dialog.show()\r\n app.exec_()\r\n\r\n\r\ndef remote_editor_test():\r\n \"\"\"Test remote collections editor.\"\"\"\r\n from spyder.utils.qthelpers import qapplication\r\n app = qapplication()\r\n\r\n from spyder.config.main import CONF\r\n from spyder_kernels.utils.nsview import (make_remote_view,\r\n REMOTE_SETTINGS)\r\n\r\n settings = {}\r\n for name in REMOTE_SETTINGS:\r\n settings[name] = CONF.get('variable_explorer', name)\r\n\r\n remote = make_remote_view(get_test_data(), settings)\r\n dialog = CollectionsEditor()\r\n dialog.setup(remote, remote=True)\r\n dialog.show()\r\n app.exec_()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n editor_test()\r\n remote_editor_test()\r\n" ]
[ [ "numpy.complex128", "pandas.Series", "numpy.linspace", "numpy.bool", "pandas.DataFrame", "numpy.ma.array", "numpy.bool_", "numpy.complex64", "numpy.random.randint", "numpy.float16", "numpy.int8", "pandas.DatetimeIndex", "numpy.save", "numpy.float32", "numpy.zeros", "pandas.Timedelta", "numpy.int64", "numpy.random.rand", "numpy.array", "matplotlib.use", "numpy.int32", "numpy.int16", "numpy.float64", "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
darpanshah-wsu/openAlt_W2021
[ "21926665bcb0ef5b0d6e8f130788bbea6fb3ebb0", "21926665bcb0ef5b0d6e8f130788bbea6fb3ebb0" ]
[ "web/uploadUni.py", "web/downloadResultsJSON.py" ]
[ "# Author: Darpan (Lines 1-236)\n\"\"\"\nMIT License\n\nCopyright (c) 2020 tdbowman-CompSci-F2020\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport os\nimport json\nimport csv\nimport pandas\nimport logging\nimport flask\nimport platform\nimport mysql\nimport shutil\nimport datetime as dt\nimport time\nimport dbQuery\nfrom flask import redirect\nimport emailResults as er\n\n# Importing app config file\npath = os.getcwd() \nparent = os.path.dirname(path) \n#config_path = os.path.join(path, \"config\", \"openAltConfig.json\")\nconfig_path = \"C:\\\\Users\\\\darpa\\\\Desktop\\\\openAlt_W2021\\\\config\\\\openAltConfig.json\"\nf = open(config_path)\n\nAPP_CONFIG = json.load(f)\n\n### SAMPLE AUTHOR API INFO ###\n### https://api.crossref.org/works?query=renear+ontologies ###\n\n\n# Setter for zip directory\ndef setZipUni(path):\n global zipUni\n zipUni = path\n print(\"RESULTS DIRECTORY:\", zipUni)\n\n\n# Getter for zip directory, used to retrieve directory in front end\ndef getZipUni():\n return zipUni\n\n# Setter for stats\ndef setStats(x, y):\n global stats\n stats = 'RESULTS: ' + str(x) + '/' + str(y) + ' FOUND'\n print(stats)\n\n# Getter for stats\ndef getStats():\n return stats\n\n\ndef downloadUni(mysql, dir_csv, type, email):\n\n # time execution of script\n start_time = time.time()\n\n # Directories\n dir_file = str(os.path.dirname(os.path.realpath(__file__)))\n\n # Path of uploaded file\n dir_template = dir_csv\n\n # Path of results folde with current time\n if not os.path.exists(dir_file + '\\\\Results'):\n os.mkdir(dir_file + '\\\\Results')\n\n dir_results = dir_file + '\\\\Results\\\\universityEvents_' + \\\n str(dt.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\"))\n\n # Create folder to hold results\n if not os.path.exists(dir_results):\n os.mkdir(dir_results)\n\n \n # Set the logging parameters\n if not os.path.exists(dir_file + '\\\\Logs'):\n os.mkdir(dir_file + '\\\\Logs')\n logging.basicConfig(filename=dir_file + '\\\\Logs\\\\uploadUniversity.log', filemode='a', level=logging.INFO,\n format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')\n\n # Array containing universities listed in uploaded file\n uni_arr = []\n\n\n # Pandas library reads doi list\n uni_list = pandas.read_csv(dir_template, header=None)\n\n\n # Adds doi values into array and prints the array\n for x in range(len(uni_list)):\n uni_arr.append(uni_list.values[x][0].lower())\n\n # Remove duplicates from author array\n uni_arr = list(dict.fromkeys(uni_arr))\n\n # Set up cursor to run SQL query\n db = mysql.connection\n cursor = db.cursor()\n\n # Creating text file with API instructions\n f = open(dir_results + '\\\\API_Instructions.txt', 'w+')\n f.write(\"Thank you for using OpenAlt v2.0!\\n\"\n \"We do not provide the complete information listed from the APIs. For more complete and raw information, consider using the CrossRef API with the instructions listed below\\n\\n\"\n \"1) Download Postman from https://www.postman.com/downloads/\\n\"\n \"2) Run a GET Request on Postman, enter a link listed below and hit send\\n\"\n \"3) You will see the output in the body section on the lower third half of the window. Make sure that the *Body* setting is set to *Pretty* and the dropdown to *JSON*\\n\\n\"\n \"You may also use any other API retrieval method, Postman happens to be the method the developers here at OpenAlt use to test APIs\\n\\n\"\n \"For more information about the CrossRef API, checkout the links listed below:\\n\"\n \"https://www.crossref.org/education/retrieve-metadata/rest-api/\\n\"\n \"https://github.com/CrossRef/rest-api-doc\\n\\n\\n\"\n \"YOUR API QUERIES: \\n\")\n\n # Execution of query and output of result + log\n resultSet = []\n count = 0\n\n for uni in uni_arr:\n # Get university Authors\n resultSet = dbQuery.getUniAuthors(uni, cursor)\n logging.info(resultSet)\n\n # Writing API query to API_Instructions.txt\n uni_api = uni.replace(' ','+')\n f.write(APP_CONFIG['Crossref-Metadata-API']['uni_url'] + uni_api + \"\\n\")\n\n # Write result to file.\n df = pandas.DataFrame(resultSet)\n df = df.drop_duplicates()\n\n # If query outputs no results, then author not in database\n if df.empty:\n # CSV containing list of results not found\n emptyResultPath = dir_results + '\\\\NotFound.csv'\n\n with open(emptyResultPath,'a',newline='') as emptyCSV:\n writer = csv.writer(emptyCSV)\n writer.writerow([uni])\n\n logging.info(\"UNIVERSITY NOT FOUND: \" + uni)\n\n else:\n count = count + 1\n # Replace invalid chars for file name\n invalid_chars = ['/','.','(',')',':','<','>','?','|','\\\"','*']\n file_id = uni.replace(' ', '-')\n for char in invalid_chars:\n file_id = file_id.replace(char,'-')\n #print('FILE ID:', file_id)\n\n df.columns = [i[0] for i in cursor.description] ###### CAUSED ISSUE ON SALSBILS MACHINE #######\n \n if type == 'csv':\n resultPath = dir_results + '\\\\' + str(file_id) + '_authorInfo.csv'\n df.to_csv(resultPath,index=False)\n elif type == 'json':\n resultPath = dir_results + '\\\\' + str(file_id) + '_authorInfo.json'\n df.to_json(resultPath, orient='index', indent=2)\n\n\n\n # Author Associated DOIs Query\n resultSet = dbQuery.getUniArticles(uni, cursor)\n logging.info(resultSet)\n\n # Write associated DOI info to file.\n df = pandas.DataFrame(resultSet)\n df = df.drop_duplicates()\n\n if not df.empty:\n df.columns = [i[0] for i in cursor.description]\n\n if type == 'csv':\n resultPath = dir_results + '\\\\' + str(file_id) + '_DOIs.csv'\n df.to_csv(resultPath,index=False)\n elif type == 'json':\n resultPath = dir_results + '\\\\' + str(file_id) + '_DOIs.json'\n df.to_json(resultPath, orient='index', indent=2)\n\n\n # Close API_Instructions.txt\n f.close()\n\n # Stats of query\n print('\\n')\n setStats(count, len(uni_arr))\n\n # Zip the folder\n shutil.make_archive(str(dir_results), 'zip', dir_results)\n\n # Delete unzipped folder\n if os.path.exists(dir_results):\n shutil.rmtree(dir_results)\n\n # Path of zip folder\n zipUni = dir_results + '.zip'\n setZipUni(zipUni)\n \n # Send Results via email\n er.emailResults(zipUni, email, 'uni')\n\n # Insert User to Table\n dbQuery.bulkSearchUserInsert(email, 'uni', cursor, db)\n\n dbQuery.checkUser(email, 'uni', cursor)\n\n # Time taken to execute script\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n return zipUni\n\n\n###### Darpan End ######\n\n# ====================== Beginning of Salsabil's Code ======================\n\ndef searchByUni(mysql, fileName, type, email):\n\n # Directory of doi list\n dir = '../web/uploadFiles/' + fileName\n\n downloadUni(mysql, dir, type, email)\n\n # Delete uploaded file\n if os.path.exists(dir):\n os.remove(dir)\n\n return flask.render_template('downloadUni.html', results=getStats())\n\n# ========================= End of Salsabil's Code =========================\n", "# Tabish Shaikh's work\n\nimport zipfile\nimport zlib\nimport json\nimport os\nimport pandas\n\n# directories\ndir_file = str(os.path.dirname(os.path.realpath(__file__)))\ndir_results = dir_file + '\\\\Results\\\\'\n\ndef downloadResultsAsJSON(csvDir,zipName,jsonName):\n # Check for any zip file with the same name\n if os.path.exists(zipName):\n # If necessary, delete it to prevent any errors\n os.remove(zipName)\n # Folder where zip file will be stored\n zipPath = dir_results + str(zipName)\n # Convert file from DataFrame to json\n tempFile = pandas.read_csv(csvDir)\n tempFile.to_json(jsonName)\n # Zip downloaded json file\n zipfile.ZipFile(zipPath, mode = 'w', compression = zipfile.ZIP_DEFLATED).write(jsonName)\n\nif __name__ == '__main__':\n # Filler top level code\n downloadResultsAsJSON('uploadDOI_Results.json','uploadDOI_ResultsJSON.zip','uploadDOI_Results.json')\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ], [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
CGruich/ocp
[ "dd97972b39d4a05e37f745e393a5245657ef5f9e" ]
[ "ocpmodels/trainers/forces_trainer.py" ]
[ "\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\nThis source code is licensed under the MIT license found in the\nLICENSE file in the root directory of this source tree.\n\"\"\"\n\nimport os\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport torch_geometric\nfrom torch.utils.data import DataLoader, DistributedSampler\nfrom tqdm import tqdm\n\nfrom ocpmodels.common import distutils\nfrom ocpmodels.common.data_parallel import ParallelCollater\nfrom ocpmodels.common.registry import registry\nfrom ocpmodels.common.relaxation.ml_relaxation import ml_relax\nfrom ocpmodels.common.utils import plot_histogram\nfrom ocpmodels.modules.evaluator import Evaluator\nfrom ocpmodels.modules.normalizer import Normalizer\nfrom ocpmodels.trainers.base_trainer import BaseTrainer\n\n\[email protected]_trainer(\"forces\")\nclass ForcesTrainer(BaseTrainer):\n \"\"\"\n Trainer class for the Structure to Energy & Force (S2EF) and Initial State to\n Relaxed State (IS2RS) tasks.\n\n .. note::\n\n Examples of configurations for task, model, dataset and optimizer\n can be found in `configs/ocp_s2ef <https://github.com/Open-Catalyst-Project/baselines/tree/master/configs/ocp_is2re/>`_\n and `configs/ocp_is2rs <https://github.com/Open-Catalyst-Project/baselines/tree/master/configs/ocp_is2rs/>`_.\n\n Args:\n task (dict): Task configuration.\n model (dict): Model configuration.\n dataset (dict): Dataset configuration. The dataset needs to be a SinglePointLMDB dataset.\n optimizer (dict): Optimizer configuration.\n identifier (str): Experiment identifier that is appended to log directory.\n run_dir (str, optional): Path to the run directory where logs are to be saved.\n (default: :obj:`None`)\n is_debug (bool, optional): Run in debug mode.\n (default: :obj:`False`)\n is_vis (bool, optional): Run in debug mode.\n (default: :obj:`False`)\n is_hpo (bool, optional): Run hyperparameter optimization with Ray Tune.\n (default: :obj:`False`)\n print_every (int, optional): Frequency of printing logs.\n (default: :obj:`100`)\n seed (int, optional): Random number seed.\n (default: :obj:`None`)\n logger (str, optional): Type of logger to be used.\n (default: :obj:`tensorboard`)\n local_rank (int, optional): Local rank of the process, only applicable for distributed training.\n (default: :obj:`0`)\n amp (bool, optional): Run using automatic mixed precision.\n (default: :obj:`False`)\n \"\"\"\n\n def __init__(\n self,\n task,\n model,\n dataset,\n optimizer,\n identifier,\n run_dir=None,\n is_debug=False,\n is_vis=False,\n is_hpo=False,\n print_every=100,\n seed=None,\n logger=\"tensorboard\",\n local_rank=0,\n amp=False,\n cpu=False,\n ):\n super().__init__(\n task=task,\n model=model,\n dataset=dataset,\n optimizer=optimizer,\n identifier=identifier,\n run_dir=run_dir,\n is_debug=is_debug,\n is_vis=is_vis,\n is_hpo=is_hpo,\n print_every=print_every,\n seed=seed,\n logger=logger,\n local_rank=local_rank,\n amp=amp,\n cpu=cpu,\n name=\"s2ef\",\n )\n\n def load_task(self):\n print(\"### Loading dataset: {}\".format(self.config[\"task\"][\"dataset\"]))\n\n self.parallel_collater = ParallelCollater(\n 1 if not self.cpu else 0,\n self.config[\"model_attributes\"].get(\"otf_graph\", False),\n )\n if self.config[\"task\"][\"dataset\"] == \"trajectory_lmdb\":\n self.train_dataset = registry.get_dataset_class(\n self.config[\"task\"][\"dataset\"]\n )(self.config[\"dataset\"])\n\n self.train_sampler = DistributedSampler(\n self.train_dataset,\n num_replicas=distutils.get_world_size(),\n rank=distutils.get_rank(),\n shuffle=True,\n )\n\n self.train_loader = DataLoader(\n self.train_dataset,\n batch_size=self.config[\"optim\"][\"batch_size\"],\n collate_fn=self.parallel_collater,\n num_workers=self.config[\"optim\"][\"num_workers\"],\n pin_memory=True,\n sampler=self.train_sampler,\n )\n\n self.val_loader = self.test_loader = None\n self.val_sampler = self.test_sampler = None\n\n if \"val_dataset\" in self.config:\n self.val_dataset = registry.get_dataset_class(\n self.config[\"task\"][\"dataset\"]\n )(self.config[\"val_dataset\"])\n self.val_sampler = DistributedSampler(\n self.val_dataset,\n num_replicas=distutils.get_world_size(),\n rank=distutils.get_rank(),\n shuffle=False,\n )\n self.val_loader = DataLoader(\n self.val_dataset,\n self.config[\"optim\"].get(\"eval_batch_size\", 64),\n collate_fn=self.parallel_collater,\n num_workers=self.config[\"optim\"][\"num_workers\"],\n pin_memory=True,\n sampler=self.val_sampler,\n )\n if \"test_dataset\" in self.config:\n self.test_dataset = registry.get_dataset_class(\n self.config[\"task\"][\"dataset\"]\n )(self.config[\"test_dataset\"])\n self.test_sampler = DistributedSampler(\n self.test_dataset,\n num_replicas=distutils.get_world_size(),\n rank=distutils.get_rank(),\n shuffle=False,\n )\n self.test_loader = DataLoader(\n self.test_dataset,\n self.config[\"optim\"].get(\"eval_batch_size\", 64),\n collate_fn=self.parallel_collater,\n num_workers=self.config[\"optim\"][\"num_workers\"],\n pin_memory=True,\n sampler=self.test_sampler,\n )\n\n if \"relax_dataset\" in self.config[\"task\"]:\n assert os.path.isfile(self.config[\"task\"][\"relax_dataset\"][\"src\"])\n\n self.relax_dataset = registry.get_dataset_class(\n \"single_point_lmdb\"\n )(self.config[\"task\"][\"relax_dataset\"])\n\n self.relax_sampler = DistributedSampler(\n self.relax_dataset,\n num_replicas=distutils.get_world_size(),\n rank=distutils.get_rank(),\n shuffle=False,\n )\n self.relax_loader = DataLoader(\n self.relax_dataset,\n batch_size=self.config[\"optim\"].get(\"eval_batch_size\", 64),\n collate_fn=self.parallel_collater,\n num_workers=self.config[\"optim\"][\"num_workers\"],\n pin_memory=True,\n sampler=self.relax_sampler,\n )\n\n self.num_targets = 1\n\n # Normalizer for the dataset.\n # Compute mean, std of training set labels.\n self.normalizers = {}\n if self.config[\"dataset\"].get(\"normalize_labels\", False):\n if \"target_mean\" in self.config[\"dataset\"]:\n self.normalizers[\"target\"] = Normalizer(\n mean=self.config[\"dataset\"][\"target_mean\"],\n std=self.config[\"dataset\"][\"target_std\"],\n device=self.device,\n )\n else:\n self.normalizers[\"target\"] = Normalizer(\n tensor=self.train_loader.dataset.data.y[\n self.train_loader.dataset.__indices__\n ],\n device=self.device,\n )\n\n # If we're computing gradients wrt input, set mean of normalizer to 0 --\n # since it is lost when compute dy / dx -- and std to forward target std\n if self.config[\"model_attributes\"].get(\"regress_forces\", True):\n if self.config[\"dataset\"].get(\"normalize_labels\", False):\n if \"grad_target_mean\" in self.config[\"dataset\"]:\n self.normalizers[\"grad_target\"] = Normalizer(\n mean=self.config[\"dataset\"][\"grad_target_mean\"],\n std=self.config[\"dataset\"][\"grad_target_std\"],\n device=self.device,\n )\n else:\n self.normalizers[\"grad_target\"] = Normalizer(\n tensor=self.train_loader.dataset.data.y[\n self.train_loader.dataset.__indices__\n ],\n device=self.device,\n )\n self.normalizers[\"grad_target\"].mean.fill_(0)\n\n if (\n self.is_vis\n and self.config[\"task\"][\"dataset\"] != \"qm9\"\n and distutils.is_master()\n ):\n # Plot label distribution.\n plots = [\n plot_histogram(\n self.train_loader.dataset.data.y.tolist(),\n xlabel=\"{}/raw\".format(self.config[\"task\"][\"labels\"][0]),\n ylabel=\"# Examples\",\n title=\"Split: train\",\n ),\n plot_histogram(\n self.val_loader.dataset.data.y.tolist(),\n xlabel=\"{}/raw\".format(self.config[\"task\"][\"labels\"][0]),\n ylabel=\"# Examples\",\n title=\"Split: val\",\n ),\n plot_histogram(\n self.test_loader.dataset.data.y.tolist(),\n xlabel=\"{}/raw\".format(self.config[\"task\"][\"labels\"][0]),\n ylabel=\"# Examples\",\n title=\"Split: test\",\n ),\n ]\n self.logger.log_plots(plots)\n\n # Takes in a new data source and generates predictions on it.\n @torch.no_grad()\n def predict(\n self, data_loader, per_image=True, results_file=None, disable_tqdm=True\n ):\n if distutils.is_master() and not disable_tqdm:\n print(\"### Predicting on test.\")\n assert isinstance(\n data_loader,\n (\n torch.utils.data.dataloader.DataLoader,\n torch_geometric.data.Batch,\n ),\n )\n rank = distutils.get_rank()\n\n if isinstance(data_loader, torch_geometric.data.Batch):\n data_loader = [[data_loader]]\n\n # Set model to evaluation mode.\n self.model.eval()\n\n # If dropout_on_inference = True, turn the dropout layers on to use dropout on prediction.\n # dropout_on_inference is defined in the model section of config.yml files. Otherwise it defaults to False.\n # For every layer in the PyTorch model\n if self.config[\"model_attributes\"].get(\"dropout_on_inference\", False):\n for module in self.model.modules():\n # Layers are custom classes. If the custom class has the name \"Dropout\",\n if module.__class__.__name__.startswith(\"Dropout\"):\n print(\"DROPOUT LAYER ENABLED IN PREDICTION\")\n # Set that layer to training mode.\n # Note that dropout layers in PyTorch (nn.Dropout()) are only activated in training mode.\n # Otherwise they are disabled in evaluation mode (model.eval())\n # So if we wanted to use the dropout layers outside of training, we just manually reset them to training mode again.\n module.train()\n\n if self.normalizers is not None and \"target\" in self.normalizers:\n self.normalizers[\"target\"].to(self.device)\n self.normalizers[\"grad_target\"].to(self.device)\n\n predictions = {\"id\": [], \"energy\": [], \"forces\": [], \"chunk_idx\": []}\n\n for i, batch_list in tqdm(\n enumerate(data_loader),\n total=len(data_loader),\n position=rank,\n desc=\"device {}\".format(rank),\n disable=disable_tqdm,\n ):\n with torch.cuda.amp.autocast(enabled=self.scaler is not None):\n out = self._forward(batch_list)\n\n if self.normalizers is not None and \"target\" in self.normalizers:\n out[\"energy\"] = self.normalizers[\"target\"].denorm(\n out[\"energy\"]\n )\n out[\"forces\"] = self.normalizers[\"grad_target\"].denorm(\n out[\"forces\"]\n )\n if per_image:\n systemids = [\n str(i) + \"_\" + str(j)\n for i, j in zip(\n batch_list[0].sid.tolist(), batch_list[0].fid.tolist()\n )\n ]\n predictions[\"id\"].extend(systemids)\n predictions[\"energy\"].extend(\n out[\"energy\"].to(torch.float16).tolist()\n )\n batch_natoms = torch.cat(\n [batch.natoms for batch in batch_list]\n )\n batch_fixed = torch.cat([batch.fixed for batch in batch_list])\n forces = out[\"forces\"].cpu().detach().to(torch.float16)\n per_image_forces = torch.split(forces, batch_natoms.tolist())\n per_image_forces = [\n force.numpy() for force in per_image_forces\n ]\n # evalAI only requires forces on free atoms\n if results_file is not None:\n _per_image_fixed = torch.split(\n batch_fixed, batch_natoms.tolist()\n )\n _per_image_free_forces = [\n force[(fixed == 0).tolist()]\n for force, fixed in zip(\n per_image_forces, _per_image_fixed\n )\n ]\n _chunk_idx = np.array(\n [\n free_force.shape[0]\n for free_force in _per_image_free_forces\n ]\n )\n per_image_forces = _per_image_free_forces\n predictions[\"chunk_idx\"].extend(_chunk_idx)\n predictions[\"forces\"].extend(per_image_forces)\n else:\n predictions[\"energy\"] = out[\"energy\"].detach()\n predictions[\"forces\"] = out[\"forces\"].detach()\n return predictions\n\n predictions[\"forces\"] = np.array(predictions[\"forces\"])\n predictions[\"chunk_idx\"] = np.array(predictions[\"chunk_idx\"])\n predictions[\"energy\"] = np.array(predictions[\"energy\"])\n predictions[\"id\"] = np.array(predictions[\"id\"])\n self.save_results(\n predictions, results_file, keys=[\"energy\", \"forces\", \"chunk_idx\"]\n )\n return predictions\n\n def train(self):\n eval_every = self.config[\"optim\"].get(\n \"eval_every\", len(self.train_loader)\n )\n primary_metric = self.config[\"task\"].get(\n \"primary_metric\", self.evaluator.task_primary_metric[self.name]\n )\n self.best_val_metric = 1e9 if \"mae\" in primary_metric else -1.0\n iters = 0\n self.metrics = {}\n\n start_epoch = self.start_step // len(self.train_loader)\n for epoch in range(start_epoch, self.config[\"optim\"][\"max_epochs\"]):\n self.train_sampler.set_epoch(epoch)\n skip_steps = 0\n if epoch == start_epoch and start_epoch > 0:\n skip_steps = start_epoch % len(self.train_loader)\n train_loader_iter = iter(self.train_loader)\n\n for i in range(skip_steps, len(self.train_loader)):\n self.model.train()\n current_epoch = epoch + (i + 1) / len(self.train_loader)\n current_step = epoch * len(self.train_loader) + (i + 1)\n\n # Get a batch.\n batch = next(train_loader_iter)\n\n # Forward, loss, backward.\n with torch.cuda.amp.autocast(enabled=self.scaler is not None):\n out = self._forward(batch)\n loss = self._compute_loss(out, batch)\n loss = self.scaler.scale(loss) if self.scaler else loss\n self._backward(loss)\n scale = self.scaler.get_scale() if self.scaler else 1.0\n\n # Compute metrics.\n self.metrics = self._compute_metrics(\n out,\n batch,\n self.evaluator,\n self.metrics,\n )\n self.metrics = self.evaluator.update(\n \"loss\", loss.item() / scale, self.metrics\n )\n\n # Log metrics.\n log_dict = {k: self.metrics[k][\"metric\"] for k in self.metrics}\n log_dict.update(\n {\n \"lr\": self.scheduler.get_lr(),\n \"epoch\": current_epoch,\n \"step\": current_step,\n }\n )\n if (\n current_step % self.config[\"cmd\"][\"print_every\"] == 0\n and distutils.is_master()\n and not self.is_hpo\n ):\n log_str = [\n \"{}: {:.2e}\".format(k, v) for k, v in log_dict.items()\n ]\n print(\", \".join(log_str))\n self.metrics = {}\n\n if self.logger is not None:\n self.logger.log(\n log_dict,\n step=current_step,\n split=\"train\",\n )\n\n iters += 1\n\n # Evaluate on val set every `eval_every` iterations.\n if iters % eval_every == 0:\n if self.val_loader is not None:\n val_metrics = self.validate(\n split=\"val\",\n epoch=epoch - 1 + (i + 1) / len(self.train_loader),\n )\n if (\n \"mae\" in primary_metric\n and val_metrics[primary_metric][\"metric\"]\n < self.best_val_metric\n ) or (\n val_metrics[primary_metric][\"metric\"]\n > self.best_val_metric\n ):\n self.best_val_metric = val_metrics[primary_metric][\n \"metric\"\n ]\n self.save(current_epoch, current_step, val_metrics)\n if self.test_loader is not None:\n self.predict(\n self.test_loader,\n results_file=\"predictions\",\n disable_tqdm=False,\n )\n\n if self.is_hpo:\n self.hpo_update(\n current_epoch,\n current_step,\n self.metrics,\n val_metrics,\n )\n\n else:\n self.save(current_epoch, current_step, self.metrics)\n\n if self.scheduler.scheduler_type == \"ReduceLROnPlateau\":\n if iters % eval_every == 0:\n self.scheduler.step(\n metrics=val_metrics[primary_metric][\"metric\"],\n )\n else:\n self.scheduler.step()\n\n torch.cuda.empty_cache()\n\n self.train_dataset.close_db()\n if \"val_dataset\" in self.config:\n self.val_dataset.close_db()\n if \"test_dataset\" in self.config:\n self.test_dataset.close_db()\n\n def _forward(self, batch_list):\n # forward pass.\n if self.config[\"model_attributes\"].get(\"regress_forces\", True):\n out_energy, out_forces = self.model(batch_list)\n else:\n out_energy = self.model(batch_list)\n\n if out_energy.shape[-1] == 1:\n out_energy = out_energy.view(-1)\n\n out = {\n \"energy\": out_energy,\n }\n\n if self.config[\"model_attributes\"].get(\"regress_forces\", True):\n out[\"forces\"] = out_forces\n\n return out\n\n def _compute_loss(self, out, batch_list):\n loss = []\n\n # Energy loss.\n energy_target = torch.cat(\n [batch.y.to(self.device) for batch in batch_list], dim=0\n )\n if self.config[\"dataset\"].get(\"normalize_labels\", False):\n energy_target = self.normalizers[\"target\"].norm(energy_target)\n energy_mult = self.config[\"optim\"].get(\"energy_coefficient\", 1)\n loss.append(energy_mult * self.criterion(out[\"energy\"], energy_target))\n\n # Force loss.\n if self.config[\"model_attributes\"].get(\"regress_forces\", True):\n force_target = torch.cat(\n [batch.force.to(self.device) for batch in batch_list], dim=0\n )\n if self.config[\"dataset\"].get(\"normalize_labels\", False):\n force_target = self.normalizers[\"grad_target\"].norm(\n force_target\n )\n\n tag_specific_weights = self.config[\"task\"].get(\n \"tag_specific_weights\", []\n )\n if tag_specific_weights != []:\n # handle tag specific weights as introduced in forcenet\n assert len(tag_specific_weights) == 3\n\n batch_tags = torch.cat(\n [\n batch.tags.float().to(self.device)\n for batch in batch_list\n ],\n dim=0,\n )\n weight = torch.zeros_like(batch_tags)\n weight[batch_tags == 0] = tag_specific_weights[0]\n weight[batch_tags == 1] = tag_specific_weights[1]\n weight[batch_tags == 2] = tag_specific_weights[2]\n\n loss_force_list = torch.abs(out[\"forces\"] - force_target)\n train_loss_force_unnormalized = torch.sum(\n loss_force_list * weight.view(-1, 1)\n )\n train_loss_force_normalizer = 3.0 * weight.sum()\n\n # add up normalizer to obtain global normalizer\n distutils.all_reduce(train_loss_force_normalizer)\n\n # perform loss normalization before backprop\n train_loss_force_normalized = train_loss_force_unnormalized * (\n distutils.get_world_size() / train_loss_force_normalizer\n )\n loss.append(train_loss_force_normalized)\n\n else:\n # Force coefficient = 30 has been working well for us.\n force_mult = self.config[\"optim\"].get(\"force_coefficient\", 30)\n if self.config[\"task\"].get(\"train_on_free_atoms\", False):\n fixed = torch.cat(\n [batch.fixed.to(self.device) for batch in batch_list]\n )\n mask = fixed == 0\n loss.append(\n force_mult\n * self.criterion(\n out[\"forces\"][mask], force_target[mask]\n )\n )\n else:\n loss.append(\n force_mult\n * self.criterion(out[\"forces\"], force_target)\n )\n # Sanity check to make sure the compute graph is correct.\n for lc in loss:\n assert hasattr(lc, \"grad_fn\")\n\n loss = sum(loss)\n return loss\n\n def _compute_metrics(self, out, batch_list, evaluator, metrics={}):\n natoms = torch.cat(\n [batch.natoms.to(self.device) for batch in batch_list], dim=0\n )\n\n target = {\n \"energy\": torch.cat(\n [batch.y.to(self.device) for batch in batch_list], dim=0\n ),\n \"forces\": torch.cat(\n [batch.force.to(self.device) for batch in batch_list], dim=0\n ),\n \"natoms\": natoms,\n }\n\n out[\"natoms\"] = natoms\n\n if self.config[\"task\"].get(\"eval_on_free_atoms\", True):\n fixed = torch.cat(\n [batch.fixed.to(self.device) for batch in batch_list]\n )\n mask = fixed == 0\n out[\"forces\"] = out[\"forces\"][mask]\n target[\"forces\"] = target[\"forces\"][mask]\n\n s_idx = 0\n natoms_free = []\n for natoms in target[\"natoms\"]:\n natoms_free.append(\n torch.sum(mask[s_idx : s_idx + natoms]).item()\n )\n s_idx += natoms\n target[\"natoms\"] = torch.LongTensor(natoms_free).to(self.device)\n out[\"natoms\"] = torch.LongTensor(natoms_free).to(self.device)\n\n if self.config[\"dataset\"].get(\"normalize_labels\", False):\n out[\"energy\"] = self.normalizers[\"target\"].denorm(out[\"energy\"])\n out[\"forces\"] = self.normalizers[\"grad_target\"].denorm(\n out[\"forces\"]\n )\n\n metrics = evaluator.eval(out, target, prev_metrics=metrics)\n return metrics\n\n def run_relaxations(self, split=\"val\", epoch=None):\n print(\"### Running ML-relaxations\")\n self.model.eval()\n\n evaluator, metrics = Evaluator(task=\"is2rs\"), {}\n\n if hasattr(self.relax_dataset[0], \"pos_relaxed\") and hasattr(\n self.relax_dataset[0], \"y_relaxed\"\n ):\n split = \"val\"\n else:\n split = \"test\"\n\n ids = []\n relaxed_positions = []\n chunk_idx = []\n for i, batch in tqdm(\n enumerate(self.relax_loader), total=len(self.relax_loader)\n ):\n relaxed_batch = ml_relax(\n batch=batch,\n model=self,\n steps=self.config[\"task\"].get(\"relaxation_steps\", 200),\n fmax=self.config[\"task\"].get(\"relaxation_fmax\", 0.0),\n relax_opt=self.config[\"task\"][\"relax_opt\"],\n device=self.device,\n transform=None,\n )\n\n if self.config[\"task\"].get(\"write_pos\", False):\n systemids = [str(i) for i in relaxed_batch.sid.tolist()]\n natoms = relaxed_batch.natoms.tolist()\n positions = torch.split(relaxed_batch.pos, natoms)\n batch_relaxed_positions = [pos.tolist() for pos in positions]\n\n relaxed_positions += batch_relaxed_positions\n chunk_idx += natoms\n ids += systemids\n\n if split == \"val\":\n mask = relaxed_batch.fixed == 0\n s_idx = 0\n natoms_free = []\n for natoms in relaxed_batch.natoms:\n natoms_free.append(\n torch.sum(mask[s_idx : s_idx + natoms]).item()\n )\n s_idx += natoms\n\n target = {\n \"energy\": relaxed_batch.y_relaxed,\n \"positions\": relaxed_batch.pos_relaxed[mask],\n \"cell\": relaxed_batch.cell,\n \"pbc\": torch.tensor([True, True, True]),\n \"natoms\": torch.LongTensor(natoms_free),\n }\n\n prediction = {\n \"energy\": relaxed_batch.y,\n \"positions\": relaxed_batch.pos[mask],\n \"cell\": relaxed_batch.cell,\n \"pbc\": torch.tensor([True, True, True]),\n \"natoms\": torch.LongTensor(natoms_free),\n }\n\n metrics = evaluator.eval(prediction, target, metrics)\n\n if self.config[\"task\"].get(\"write_pos\", False):\n rank = distutils.get_rank()\n pos_filename = os.path.join(\n self.config[\"cmd\"][\"results_dir\"], f\"relaxed_pos_{rank}.npz\"\n )\n np.savez_compressed(\n pos_filename,\n ids=ids,\n pos=np.array(relaxed_positions, dtype=object),\n chunk_idx=chunk_idx,\n )\n\n distutils.synchronize()\n if distutils.is_master():\n gather_results = defaultdict(list)\n full_path = os.path.join(\n self.config[\"cmd\"][\"results_dir\"],\n \"relaxed_positions.npz\",\n )\n\n for i in range(distutils.get_world_size()):\n rank_path = os.path.join(\n self.config[\"cmd\"][\"results_dir\"],\n f\"relaxed_pos_{i}.npz\",\n )\n rank_results = np.load(rank_path, allow_pickle=True)\n gather_results[\"ids\"].extend(rank_results[\"ids\"])\n gather_results[\"pos\"].extend(rank_results[\"pos\"])\n gather_results[\"chunk_idx\"].extend(\n rank_results[\"chunk_idx\"]\n )\n os.remove(rank_path)\n\n # Because of how distributed sampler works, some system ids\n # might be repeated to make no. of samples even across GPUs.\n _, idx = np.unique(gather_results[\"ids\"], return_index=True)\n gather_results[\"ids\"] = np.array(gather_results[\"ids\"])[idx]\n gather_results[\"pos\"] = np.concatenate(\n np.array(gather_results[\"pos\"])[idx]\n )\n gather_results[\"chunk_idx\"] = np.cumsum(\n np.array(gather_results[\"chunk_idx\"])[idx]\n )[\n :-1\n ] # np.split does not need last idx, assumes n-1:end\n\n print(f\"Writing results to {full_path}\")\n np.savez_compressed(full_path, **gather_results)\n\n if split == \"val\":\n aggregated_metrics = {}\n for k in metrics:\n aggregated_metrics[k] = {\n \"total\": distutils.all_reduce(\n metrics[k][\"total\"], average=False, device=self.device\n ),\n \"numel\": distutils.all_reduce(\n metrics[k][\"numel\"], average=False, device=self.device\n ),\n }\n aggregated_metrics[k][\"metric\"] = (\n aggregated_metrics[k][\"total\"]\n / aggregated_metrics[k][\"numel\"]\n )\n metrics = aggregated_metrics\n\n # Make plots.\n log_dict = {k: metrics[k][\"metric\"] for k in metrics}\n if self.logger is not None and epoch is not None:\n self.logger.log(\n log_dict,\n step=(epoch + 1) * len(self.train_loader),\n split=split,\n )\n\n if distutils.is_master():\n print(metrics)\n" ]
[ [ "torch.abs", "torch.LongTensor", "torch.cat", "numpy.unique", "torch.utils.data.DataLoader", "torch.cuda.empty_cache", "torch.zeros_like", "torch.cuda.amp.autocast", "torch.tensor", "torch.sum", "numpy.savez_compressed", "torch.no_grad", "torch.split", "numpy.load", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thatguuyG/ivy
[ "09447a9670d440a309b62cfb468c1036e3a4f5ed" ]
[ "ivy_tests/test_ivy/test_functional/test_nn/test_losses.py" ]
[ "# global\nimport pytest\nimport numpy as np\nfrom hypothesis import given, assume, strategies as st\n\n# local\nimport ivy\nimport ivy.functional.backends.numpy as ivy_np\nimport ivy_tests.test_ivy.helpers as helpers\n\n\n# cross_entropy\n@given(\n dtype_and_x=helpers.dtype_and_values(ivy_np.valid_numeric_dtype_strs, 2),\n as_variable=helpers.list_of_length(st.booleans(), 2),\n num_positional_args=st.integers(0, 2),\n native_array=helpers.list_of_length(st.booleans(), 2),\n container=helpers.list_of_length(st.booleans(), 2),\n instance_method=st.booleans(),\n)\ndef test_cross_entropy(\n dtype_and_x,\n as_variable,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n):\n dtype, x = dtype_and_x\n assume(not any(d in ivy.invalid_dtype_strs for d in dtype))\n if fw == \"torch\" and dtype == \"float16\":\n return\n helpers.test_array_function(\n dtype,\n as_variable,\n False,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"cross_entropy\",\n true=np.asarray(x[0], dtype=dtype[0]),\n pred=np.asarray(x[1], dtype=dtype[1]),\n )\n\n\n# binary_cross_entropy\n@given(\n dtype_and_x=helpers.dtype_and_values(ivy_np.valid_numeric_dtype_strs, 2),\n as_variable=helpers.list_of_length(st.booleans(), 2),\n num_positional_args=st.integers(0, 2),\n native_array=helpers.list_of_length(st.booleans(), 2),\n container=helpers.list_of_length(st.booleans(), 2),\n instance_method=st.booleans(),\n)\ndef test_binary_cross_entropy(\n dtype_and_x,\n as_variable,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n):\n dtype, x = dtype_and_x\n assume(not any(d in ivy.invalid_dtype_strs for d in dtype))\n if fw == \"torch\" and dtype == \"float16\":\n return\n helpers.test_array_function(\n dtype,\n as_variable,\n False,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"binary_cross_entropy\",\n true=np.asarray(x[0], dtype=dtype[0]),\n pred=np.asarray(x[1], dtype=dtype[1]),\n )\n\n\n# sparse_cross_entropy\n@given(\n dtype_and_x=helpers.dtype_and_values(ivy_np.valid_numeric_dtype_strs, 2),\n as_variable=helpers.list_of_length(st.booleans(), 2),\n num_positional_args=st.integers(0, 2),\n native_array=helpers.list_of_length(st.booleans(), 2),\n container=helpers.list_of_length(st.booleans(), 2),\n instance_method=st.booleans(),\n)\ndef test_sparse_cross_entropy(\n dtype_and_x,\n as_variable,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n):\n dtype, x = dtype_and_x\n assume(not any(d in ivy.invalid_dtype_strs for d in dtype))\n if fw == \"torch\" and dtype == \"float16\":\n return\n helpers.test_array_function(\n dtype,\n as_variable,\n False,\n num_positional_args,\n native_array,\n container,\n instance_method,\n fw,\n \"sparse_cross_entropy\",\n true=np.asarray(x[0], dtype=dtype[0]),\n pred=np.asarray(x[1], dtype=dtype[1]),\n )\n" ]
[ [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KonradKarimi/K9UVR_ML
[ "866d25c29e13e1abe676bfcd3e0fff454f22a5f3" ]
[ "generate_tfrecords.py" ]
[ "\"\"\"\nUsage:\n # From tensorflow/models/\n # Create train data:\n python generate_tfrecords.py --csv_input=data/train_labels.csv --output_path=data/records/train.record --image_dir=/Datasets/\n # Create test data:\n python generate_tfrecords.py --csv_input=data/test_labels.csv --output_path=data/records/test.record --image_dir=/Datasets/\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport os\nimport io\nimport pandas as pd\nimport tensorflow as tf\nimport dataset_util\n\nfrom absl import app, flags, logging\nfrom PIL import Image\nfrom collections import namedtuple, OrderedDict\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('csv_input', None, 'Path to the CSV input')\nflags.DEFINE_string('output_path', None, 'Path to output TFRecord')\nflags.DEFINE_string('image_dir', None, 'Path to images')\n\n# Required flags.\nflags.mark_flag_as_required(\"csv_input\")\nflags.mark_flag_as_required(\"output_path\")\nflags.mark_flag_as_required(\"image_dir\")\n\n# TO-DO replace this with label map\ndef class_text_to_int(row_label):\n if row_label == 'license_plate':\n return 1\n else:\n None\n\n\ndef split(df, group):\n data = namedtuple('data', ['filename', 'object'])\n gb = df.groupby(group)\n return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]\n\n\ndef create_tf_example(group, path):\n with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = Image.open(encoded_jpg_io)\n width, height = image.size\n\n filename = group.filename.encode('utf8')\n image_format = b'jpg'\n xmins = []\n xmaxs = []\n ymins = []\n ymaxs = []\n classes_text = []\n classes = []\n\n for index, row in group.object.iterrows():\n xmins.append(row['xmin'] / width)\n xmaxs.append(row['xmax'] / width)\n ymins.append(row['ymin'] / height)\n ymaxs.append(row['ymax'] / height)\n classes_text.append(row['class'].encode('utf8'))\n classes.append(class_text_to_int(row['class']))\n\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(filename),\n 'image/source_id': dataset_util.bytes_feature(filename),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature(image_format),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n }))\n return tf_example\n\n\ndef main(argv):\n del argv # Unused.\n\n writer = tf.io.TFRecordWriter(FLAGS.output_path)\n path = os.path.join(FLAGS.image_dir)\n examples = pd.read_csv(FLAGS.csv_input)\n grouped = split(examples, 'filename')\n for group in grouped:\n tf_example = create_tf_example(group, path)\n writer.write(tf_example.SerializeToString())\n\n writer.close()\n output_path = os.path.join(os.getcwd(), FLAGS.output_path)\n print('Successfully created the TFRecords: {}'.format(output_path))\n\n\nif __name__ == '__main__':\n app.run(main)\n" ]
[ [ "tensorflow.io.TFRecordWriter", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
NielsRogge/flax
[ "f23cb3823fe27e60ecf3be4fc345e2f8593fde18" ]
[ "examples/wmt/bleu.py" ]
[ "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Parallel BLEU score calculation.\n\nThis version of BLEU calculation is derived from the MLPerf transformer reference.\nTries to match SacreBLEU metric reasonably well, but is not identical.\n\nRefs:\n tokenizer at:\n https://github.com/tensorflow/models/blob/master/official/transformer/utils/tokenizer.py\n original preprocessing tokenizer:\n https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v14.pl#L954-L983\n original t2t code:\n https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py\n\nUsage:\n refs = '''food bar brown cow\n blee bloo dog sat\n or please take me out\n '''\n hyps = '''foo bar brown cow\n blee bloo dog sit\n please do take me out\n '''\n bleu_local(refs.split(\"\\n\"), hyps.split(\"\\n\")) # 39.65\n\"\"\"\n\nimport collections\nimport math\nimport re\nimport sys\nimport unicodedata\nimport numpy as np\nimport six\n\n\nclass UnicodeRegex(object):\n \"\"\"Ad-hoc hack to recognize all punctuation and symbols.\"\"\"\n\n def __init__(self):\n punctuation = self.property_chars(\"P\")\n self.nondigit_punct_re = re.compile(r\"([^\\d])([\" + punctuation + r\"])\")\n self.punct_nondigit_re = re.compile(r\"([\" + punctuation + r\"])([^\\d])\")\n self.symbol_re = re.compile(\"([\" + self.property_chars(\"S\") + \"])\")\n\n def property_chars(self, prefix):\n return \"\".join(\n six.unichr(x)\n for x in range(sys.maxunicode)\n if unicodedata.category(six.unichr(x)).startswith(prefix))\n\n\nuregex = UnicodeRegex()\n\n\ndef bleu_tokenize(string):\n r\"\"\"Tokenize a string following the official BLEU implementation.\n\n See https://github.com/moses-smt/mosesdecoder/'\n 'blob/master/scripts/generic/mteval-v14.pl#L954-L983\n In our case, the input string is expected to be just one line\n and no HTML entities de-escaping is needed.\n So we just tokenize on punctuation and symbols,\n except when a punctuation is preceded and followed by a digit\n (e.g. a comma/dot as a thousand/decimal separator).\n\n Note that a number (e.g. a year) followed by a dot at the end of sentence\n is NOT tokenized, i.e. the dot stays with the number because\n `s/(\\p{P})(\\P{N})/ $1 $2/g` does not match this case (unless we add a\n space after each sentence). However, this error is already in the\n original mteval-v14.pl and we want to be consistent with it.\n\n Args:\n string: the input string\n\n Returns:\n a list of tokens\n \"\"\"\n string = uregex.nondigit_punct_re.sub(r\"\\1 \\2 \", string)\n string = uregex.punct_nondigit_re.sub(r\" \\1 \\2\", string)\n string = uregex.symbol_re.sub(r\" \\1 \", string)\n return string.split()\n\n\ndef _get_ngrams(segment, max_order):\n \"\"\"Extracts all n-grams up to a given maximum order from an input segment.\n\n Args:\n segment: text segment from which n-grams will be extracted.\n max_order: maximum length in tokens of the n-grams returned by this methods.\n\n Returns:\n The Counter containing all n-grams up to max_order in segment\n with a count of how many times each n-gram occurred.\n \"\"\"\n ngram_counts = collections.Counter()\n for order in range(1, max_order + 1):\n for i in range(0, len(segment) - order + 1):\n ngram = tuple(segment[i:i + order])\n ngram_counts[ngram] += 1\n return ngram_counts\n\n\ndef compute_bleu_matches(reference_corpus,\n translation_corpus,\n max_order=4):\n \"\"\"Computes BLEU match stats of translations against one or more references.\n\n Args:\n reference_corpus: list of references for each translation. Each reference\n should be tokenized into a list of tokens.\n translation_corpus: list of translations to score. Each translation should\n be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n\n Returns:\n Aggregated n-gram stats for BLEU calculation.\n \"\"\"\n reference_length = 0\n translation_length = 0\n bp = 1.0\n geo_mean = 0\n\n matches_by_order = [0] * max_order\n possible_matches_by_order = [0] * max_order\n precisions = []\n\n for (references, translations) in zip(reference_corpus, translation_corpus):\n reference_length += len(references)\n translation_length += len(translations)\n ref_ngram_counts = _get_ngrams(references, max_order)\n translation_ngram_counts = _get_ngrams(translations, max_order)\n\n overlap = dict((ngram, min(count, translation_ngram_counts[ngram]))\n for ngram, count in ref_ngram_counts.items())\n\n for ngram in overlap:\n matches_by_order[len(ngram) - 1] += overlap[ngram]\n for ngram in translation_ngram_counts:\n possible_matches_by_order[len(ngram) -\n 1] += translation_ngram_counts[ngram]\n\n return (np.array(matches_by_order),\n np.array(possible_matches_by_order),\n np.array(reference_length),\n np.array(translation_length))\n\n\ndef bleu_partial(ref_lines, hyp_lines, case_sensitive=False):\n \"\"\"Compute n-gram statistics for two lists of references and translations.\"\"\"\n if len(ref_lines) != len(hyp_lines):\n raise ValueError(\"Reference and translation lists have different \"\n \"numbers of lines.\")\n if not case_sensitive:\n ref_lines = [x.lower() for x in ref_lines]\n hyp_lines = [x.lower() for x in hyp_lines]\n ref_tokens = [bleu_tokenize(x) for x in ref_lines]\n hyp_tokens = [bleu_tokenize(x) for x in hyp_lines]\n return compute_bleu_matches(ref_tokens, hyp_tokens)\n\n\ndef complete_bleu(matches_by_order,\n possible_matches_by_order,\n reference_length,\n translation_length,\n max_order=4,\n use_bp=True):\n \"\"\"Compute BLEU score from aggregated n-gram statistics.\"\"\"\n precisions = [0] * max_order\n smooth = 1.0\n for i in range(0, max_order):\n if possible_matches_by_order[i] > 0:\n precisions[i] = matches_by_order[i] / possible_matches_by_order[i]\n if matches_by_order[i] > 0:\n precisions[i] = matches_by_order[i] / possible_matches_by_order[i]\n else:\n smooth *= 2\n precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])\n else:\n precisions[i] = 0.0\n\n if max(precisions) > 0:\n p_log_sum = sum(math.log(p) for p in precisions if p)\n geo_mean = math.exp(p_log_sum / max_order)\n\n if use_bp:\n if not reference_length:\n bp = 1.0\n else:\n ratio = translation_length / reference_length\n if ratio <= 0.0:\n bp = 0.0\n elif ratio >= 1.0:\n bp = 1.0\n else:\n bp = math.exp(1 - 1. / ratio)\n bleu = geo_mean * bp\n return float(bleu) * 100.0\n\n\ndef bleu_local(ref_lines, hyp_lines, case_sensitive=False):\n \"\"\"Compute BLEU for two lists of reference and hypothesis translations.\"\"\"\n stats = bleu_partial(ref_lines, hyp_lines, case_sensitive=case_sensitive)\n return complete_bleu(*stats) * 100\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aminatadjer/CodeXGLUE
[ "7d3787979e8e0b504768a16607fe39ee7f1502f3" ]
[ "Code-Text/code-to-text/code/run.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).\nGPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned\nusing a masked language modeling (MLM) loss.\n\"\"\"\n\nfrom __future__ import absolute_import\nimport os\nimport sys\nfrom bleu import _bleu\nimport pickle\nimport torch\nimport json\nimport random\nimport logging\nimport argparse\nimport numpy as np\nfrom io import open\nfrom itertools import cycle\nimport torch.nn as nn\nfrom model import Seq2Seq\nfrom tqdm import tqdm, trange\nfrom torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,\n RobertaConfig, RobertaModel, RobertaTokenizer)\nMODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)}\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\nclass Example(object):\n \"\"\"A single training/test example.\"\"\"\n def __init__(self,\n idx,\n source,\n target,\n ):\n self.idx = idx\n self.source = source\n self.target = target\n\ndef read_examples(filename):\n \"\"\"Read examples from filename.\"\"\"\n examples=[]\n with open(filename,encoding=\"utf-8\") as f:\n for idx, line in enumerate(f):\n line=line.strip()\n js=json.loads(line)\n if 'idx' not in js:\n js['idx']=idx\n code=' '.join(js['code_tokens']).replace('\\n',' ')\n code=' '.join(code.strip().split())\n nl=' '.join(js['docstring_tokens']).replace('\\n','')\n nl=' '.join(nl.strip().split()) \n examples.append(\n Example(\n idx = idx,\n source=nl,\n target = code,\n ) \n )\n return examples\n\n\nclass InputFeatures(object):\n \"\"\"A single training/test features for a example.\"\"\"\n def __init__(self,\n example_id,\n source_ids,\n target_ids,\n source_mask,\n target_mask,\n\n ):\n self.example_id = example_id\n self.source_ids = source_ids\n self.target_ids = target_ids\n self.source_mask = source_mask\n self.target_mask = target_mask \n \n\n\ndef convert_examples_to_features(examples, tokenizer, args,stage=None):\n features = []\n for example_index, example in enumerate(examples):\n #source\n source_tokens = tokenizer.tokenize(example.source)[:args.max_source_length-2]\n source_tokens =[tokenizer.cls_token]+source_tokens+[tokenizer.sep_token]\n source_ids = tokenizer.convert_tokens_to_ids(source_tokens) \n source_mask = [1] * (len(source_tokens))\n padding_length = args.max_source_length - len(source_ids)\n source_ids+=[tokenizer.pad_token_id]*padding_length\n source_mask+=[0]*padding_length\n \n #target\n if stage==\"test\":\n target_tokens = tokenizer.tokenize(\"None\")\n else:\n target_tokens = tokenizer.tokenize(example.target)[:args.max_target_length-2]\n target_tokens = [tokenizer.cls_token]+target_tokens+[tokenizer.sep_token] \n target_ids = tokenizer.convert_tokens_to_ids(target_tokens)\n target_mask = [1] *len(target_ids)\n padding_length = args.max_target_length - len(target_ids)\n target_ids+=[tokenizer.pad_token_id]*padding_length\n target_mask+=[0]*padding_length \n \n if example_index < 5:\n if stage=='train':\n logger.info(\"*** Example ***\")\n logger.info(\"idx: {}\".format(example.idx))\n\n logger.info(\"source_tokens: {}\".format([x.replace('\\u0120','_') for x in source_tokens]))\n logger.info(\"source_ids: {}\".format(' '.join(map(str, source_ids))))\n logger.info(\"source_mask: {}\".format(' '.join(map(str, source_mask))))\n \n logger.info(\"target_tokens: {}\".format([x.replace('\\u0120','_') for x in target_tokens]))\n logger.info(\"target_ids: {}\".format(' '.join(map(str, target_ids))))\n logger.info(\"target_mask: {}\".format(' '.join(map(str, target_mask))))\n \n features.append(\n InputFeatures(\n example_index,\n source_ids,\n target_ids,\n source_mask,\n target_mask,\n )\n )\n return features\n\n\n\ndef set_seed(seed=42):\n random.seed(seed)\n os.environ['PYHTONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n \ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters \n parser.add_argument(\"--model_type\", default=None, type=str, required=True,\n help=\"Model type: e.g. roberta\")\n parser.add_argument(\"--model_name_or_path\", default=None, type=str, required=True,\n help=\"Path to pre-trained model: e.g. roberta-base\" ) \n parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n parser.add_argument(\"--load_model_path\", default=None, type=str, \n help=\"Path to trained model: Should contain the .bin files\" ) \n ## Other parameters\n parser.add_argument(\"--train_filename\", default=None, type=str, \n help=\"The train filename. Should contain the .jsonl files for this task.\")\n parser.add_argument(\"--dev_filename\", default=None, type=str, \n help=\"The dev filename. Should contain the .jsonl files for this task.\")\n parser.add_argument(\"--test_filename\", default=None, type=str, \n help=\"The test filename. Should contain the .jsonl files for this task.\") \n \n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Pretrained config name or path if not the same as model_name\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\") \n parser.add_argument(\"--max_source_length\", default=64, type=int,\n help=\"The maximum total source sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\")\n parser.add_argument(\"--max_target_length\", default=32, type=int,\n help=\"The maximum total target sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\")\n \n parser.add_argument(\"--do_train\", action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_test\", action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_lower_case\", action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument(\"--no_cuda\", action='store_true',\n help=\"Avoid using CUDA when available\") \n \n parser.add_argument(\"--train_batch_size\", default=8, type=int,\n help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\"--eval_batch_size\", default=8, type=int,\n help=\"Batch size per GPU/CPU for evaluation.\")\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--beam_size\", default=10, type=int,\n help=\"beam size for beam search\") \n parser.add_argument(\"--weight_decay\", default=0.0, type=float,\n help=\"Weight deay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--num_train_epochs\", default=3, type=int,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--max_steps\", default=-1, type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\")\n parser.add_argument(\"--eval_steps\", default=-1, type=int,\n help=\"\")\n parser.add_argument(\"--train_steps\", default=-1, type=int,\n help=\"\")\n parser.add_argument(\"--warmup_steps\", default=0, type=int,\n help=\"Linear warmup over warmup_steps.\")\n parser.add_argument(\"--local_rank\", type=int, default=-1,\n help=\"For distributed training: local_rank\") \n parser.add_argument('--seed', type=int, default=42,\n help=\"random seed for initialization\")\n # print arguments\n args = parser.parse_args()\n logger.info(args)\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend='nccl')\n args.n_gpu = 1\n logger.warning(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s\",\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1))\n args.device = device\n # Set seed\n set_seed(args.seed)\n # make dir if output_dir not exist\n if os.path.exists(args.output_dir) is False:\n os.makedirs(args.output_dir)\n \n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)\n tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,do_lower_case=args.do_lower_case)\n \n #budild model\n encoder = model_class.from_pretrained(args.model_name_or_path,config=config) \n decoder_layer = nn.TransformerDecoderLayer(d_model=config.hidden_size, nhead=config.num_attention_heads)\n decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)\n model=Seq2Seq(encoder=encoder,decoder=decoder,config=config,\n beam_size=args.beam_size,max_length=args.max_target_length,\n sos_id=tokenizer.cls_token_id,eos_id=tokenizer.sep_token_id)\n if args.load_model_path is not None:\n logger.info(\"reload model from {}\".format(args.load_model_path))\n model.load_state_dict(torch.load(args.load_model_path))\n \n model.to(device)\n if args.local_rank != -1:\n # Distributed training\n try:\n from apex.parallel import DistributedDataParallel as DDP\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n model = DDP(model)\n elif args.n_gpu > 1:\n # multi-gpu training\n model = torch.nn.DataParallel(model)\n\n if args.do_train:\n # Prepare training data loader\n train_examples = read_examples(args.train_filename)\n train_features = convert_examples_to_features(train_examples, tokenizer,args,stage='train')\n all_source_ids = torch.tensor([f.source_ids for f in train_features], dtype=torch.long)\n all_source_mask = torch.tensor([f.source_mask for f in train_features], dtype=torch.long)\n all_target_ids = torch.tensor([f.target_ids for f in train_features], dtype=torch.long)\n all_target_mask = torch.tensor([f.target_mask for f in train_features], dtype=torch.long) \n train_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask)\n \n if args.local_rank == -1:\n train_sampler = RandomSampler(train_data)\n else:\n train_sampler = DistributedSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size//args.gradient_accumulation_steps)\n\n num_train_optimization_steps = args.train_steps\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps=int(t_total*0.1),\n num_training_steps=t_total)\n \n #Start training\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n logger.info(\" Num epoch = %d\", args.num_train_epochs)\n \n\n model.train()\n dev_dataset={}\n nb_tr_examples, nb_tr_steps,tr_loss,global_step,best_bleu,best_loss = 0, 0,0,0,0,1e6 \n for epoch in range(args.num_train_epochs):\n bar = tqdm(train_dataloader,total=len(train_dataloader))\n for batch in bar:\n batch = tuple(t.to(device) for t in batch)\n source_ids,source_mask,target_ids,target_mask = batch\n loss,_,_ = model(source_ids=source_ids,source_mask=source_mask,target_ids=target_ids,target_mask=target_mask)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n tr_loss += loss.item()\n train_loss=round(tr_loss*args.gradient_accumulation_steps/(nb_tr_steps+1),4)\n bar.set_description(\"epoch {} loss {}\".format(epoch,train_loss))\n nb_tr_examples += source_ids.size(0)\n nb_tr_steps += 1\n loss.backward()\n\n if (nb_tr_steps + 1) % args.gradient_accumulation_steps == 0:\n #Update parameters\n optimizer.step()\n optimizer.zero_grad()\n scheduler.step()\n global_step += 1\n\n if args.do_eval:\n #Eval model with dev dataset\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0 \n eval_flag=False \n if 'dev_loss' in dev_dataset:\n eval_examples,eval_data=dev_dataset['dev_loss']\n else:\n eval_examples = read_examples(args.dev_filename)\n eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='dev')\n all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)\n all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)\n all_target_ids = torch.tensor([f.target_ids for f in eval_features], dtype=torch.long)\n all_target_mask = torch.tensor([f.target_mask for f in eval_features], dtype=torch.long) \n eval_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask) \n dev_dataset['dev_loss']=eval_examples,eval_data\n eval_sampler = SequentialSampler(eval_data)\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n logger.info(\"\\n***** Running evaluation *****\")\n logger.info(\" Num examples = %d\", len(eval_examples))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n #Start Evaling model\n model.eval()\n eval_loss,tokens_num = 0,0\n for batch in eval_dataloader:\n batch = tuple(t.to(device) for t in batch)\n source_ids,source_mask,target_ids,target_mask = batch \n\n with torch.no_grad():\n _,loss,num = model(source_ids=source_ids,source_mask=source_mask,\n target_ids=target_ids,target_mask=target_mask) \n eval_loss += loss.sum().item()\n tokens_num += num.sum().item()\n #Pring loss of dev dataset \n model.train()\n eval_loss = eval_loss / tokens_num\n result = {'eval_ppl': round(np.exp(eval_loss),5),\n 'global_step': global_step+1,\n 'train_loss': round(train_loss,5)}\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n logger.info(\" \"+\"*\"*20) \n\n #save last checkpoint\n last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')\n if not os.path.exists(last_output_dir):\n os.makedirs(last_output_dir)\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n output_model_file = os.path.join(last_output_dir, \"pytorch_model.bin\")\n torch.save(model_to_save.state_dict(), output_model_file) \n if eval_loss<best_loss:\n logger.info(\" Best ppl:%s\",round(np.exp(eval_loss),5))\n logger.info(\" \"+\"*\"*20)\n best_loss=eval_loss\n # Save best checkpoint for best ppl\n output_dir = os.path.join(args.output_dir, 'checkpoint-best-ppl')\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n output_model_file = os.path.join(output_dir, \"pytorch_model.bin\")\n torch.save(model_to_save.state_dict(), output_model_file) \n\n\n #Calculate bleu \n if 'dev_bleu' in dev_dataset:\n eval_examples,eval_data=dev_dataset['dev_bleu']\n else:\n eval_examples = read_examples(args.dev_filename)\n eval_examples = random.sample(eval_examples,min(1000,len(eval_examples)))\n eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')\n all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)\n all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long) \n eval_data = TensorDataset(all_source_ids,all_source_mask) \n dev_dataset['dev_bleu']=eval_examples,eval_data\n\n\n\n eval_sampler = SequentialSampler(eval_data)\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n model.eval() \n p=[]\n for batch in eval_dataloader:\n batch = tuple(t.to(device) for t in batch)\n source_ids,source_mask= batch \n with torch.no_grad():\n preds = model(source_ids=source_ids,source_mask=source_mask) \n for pred in preds:\n t=pred[0].cpu().numpy()\n t=list(t)\n if 0 in t:\n t=t[:t.index(0)]\n text = tokenizer.decode(t,clean_up_tokenization_spaces=False)\n p.append(text)\n model.train()\n predictions=[]\n with open(os.path.join(args.output_dir,\"dev.output\"),'w') as f, open(os.path.join(args.output_dir,\"dev.gold\"),'w') as f1:\n for ref,gold in zip(p,eval_examples):\n predictions.append(str(gold.idx)+'\\t'+ref)\n f.write(str(gold.idx)+'\\t'+ref+'\\n')\n f1.write(str(gold.idx)+'\\t'+gold.target+'\\n') \n\n (goldMap, predictionMap) = bleu.computeMaps(predictions, os.path.join(args.output_dir, \"dev.gold\")) \n dev_bleu=round(_bleu(predictions, os.path.join(args.output_dir, \"dev.gold\")),2)\n logger.info(\" %s = %s \"%(\"bleu-4\",str(dev_bleu)))\n logger.info(\" \"+\"*\"*20) \n if dev_bleu>best_bleu:\n logger.info(\" Best bleu:%s\",dev_bleu)\n logger.info(\" \"+\"*\"*20)\n best_bleu=dev_bleu\n # Save best checkpoint for best bleu\n output_dir = os.path.join(args.output_dir, 'checkpoint-best-bleu')\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n output_model_file = os.path.join(output_dir, \"pytorch_model.bin\")\n torch.save(model_to_save.state_dict(), output_model_file)\n \n if args.do_test:\n files=[]\n if args.dev_filename is not None:\n files.append(args.dev_filename)\n if args.test_filename is not None:\n files.append(args.test_filename)\n for idx,file in enumerate(files): \n logger.info(\"Test file: {}\".format(file))\n eval_examples = read_examples(file)\n eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')\n all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)\n all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long) \n eval_data = TensorDataset(all_source_ids,all_source_mask) \n\n # Calculate bleu\n eval_sampler = SequentialSampler(eval_data)\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n model.eval() \n p=[]\n for batch in tqdm(eval_dataloader,total=len(eval_dataloader)):\n batch = tuple(t.to(device) for t in batch)\n source_ids,source_mask= batch \n with torch.no_grad():\n preds = model(source_ids=source_ids,source_mask=source_mask) \n for pred in preds:\n t=pred[0].cpu().numpy()\n t=list(t)\n if 0 in t:\n t=t[:t.index(0)]\n text = tokenizer.decode(t,clean_up_tokenization_spaces=False)\n p.append(text)\n model.train()\n predictions=[]\n with open(os.path.join(args.output_dir,\"test_{}.output\".format(str(idx))),'w') as f, open(os.path.join(args.output_dir,\"test_{}.gold\".format(str(idx))),'w') as f1:\n for ref,gold in zip(p,eval_examples):\n predictions.append(str(gold.idx)+'\\t'+ref)\n f.write(str(gold.idx)+'\\t'+ref+'\\n')\n f1.write(str(gold.idx)+'\\t'+gold.target+'\\n') \n\n (goldMap, predictionMap) = bleu.computeMaps(predictions, os.path.join(args.output_dir, \"test_{}.gold\".format(idx))) \n dev_bleu=round(_bleu(predictions, os.path.join(args.output_dir, \"test_{}.gold\".format(idx))),2)\n logger.info(\" %s = %s \"%(\"bleu-4\",str(dev_bleu)))\n logger.info(\" \"+\"*\"*20) \n\n\n\n \n\n \n \nif __name__ == \"__main__\":\n main()\n\n\n" ]
[ [ "torch.load", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.is_available", "torch.device", "numpy.exp", "torch.distributed.init_process_group", "torch.nn.TransformerDecoderLayer", "torch.utils.data.distributed.DistributedSampler", "torch.utils.data.TensorDataset", "torch.nn.TransformerDecoder", "torch.tensor", "torch.cuda.device_count", "torch.cuda.manual_seed", "numpy.random.seed", "torch.cuda.set_device", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.utils.data.RandomSampler", "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bycn/dm_control
[ "cb4f4e78fe2963502447a4fa224ac84522e1e408" ]
[ "dm_control/locomotion/tasks/go_to_target.py" ]
[ "# Copyright 2019 The dm_control Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Task for a walker to move to a target.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom dm_control import composer\nfrom dm_control.composer import variation\nfrom dm_control.composer.observation import observable\nfrom dm_control.composer.variation import distributions\nimport numpy as np\n\nDEFAULT_DISTANCE_TOLERANCE_TO_TARGET = 1.0\n\n\nclass GoToTarget(composer.Task):\n \"\"\"A task that requires a walker to move towards a target.\"\"\"\n\n def __init__(self,\n walker,\n arena,\n moving_target=False,\n steps_before_moving_target=10,\n distance_tolerance=DEFAULT_DISTANCE_TOLERANCE_TO_TARGET,\n target_spawn_position=None,\n walker_spawn_position=None,\n walker_spawn_rotation=None,\n physics_timestep=0.005,\n control_timestep=0.025):\n \"\"\"Initializes this task.\n\n Args:\n walker: an instance of `locomotion.walkers.base.Walker`.\n arena: an instance of `locomotion.arenas.floors.Floor`.\n moving_target: bool, Whether the target should move after receiving the\n walker reaches it.\n steps_before_moving_target: int, the number of steps before the target\n moves, if moving_target==True.\n distance_tolerance: Accepted to distance to the target position before\n providing reward.\n target_spawn_position: a sequence of 2 numbers, or a `composer.Variation`\n instance that generates such sequences, specifying the position at\n which the target is spawned at the beginning of an episode.\n If None, the entire arena is used to generate random target positions.\n walker_spawn_position: a sequence of 2 numbers, or a `composer.Variation`\n instance that generates such sequences, specifying the position at\n which the walker is spawned at the beginning of an episode.\n If None, the entire arena is used to generate random spawn positions.\n walker_spawn_rotation: a number, or a `composer.Variation` instance that\n generates a number, specifying the yaw angle offset (in radians) that is\n applied to the walker at the beginning of an episode.\n physics_timestep: a number specifying the timestep (in seconds) of the\n physics simulation.\n control_timestep: a number specifying the timestep (in seconds) at which\n the agent applies its control inputs (in seconds).\n \"\"\"\n\n self._arena = arena\n self._walker = walker\n self._walker.create_root_joints(self._arena.attach(self._walker))\n\n arena_position = distributions.Uniform(\n low=-np.array(arena.size) / 2, high=np.array(arena.size) / 2)\n if target_spawn_position is not None:\n self._target_spawn_position = target_spawn_position\n else:\n self._target_spawn_position = arena_position\n\n if walker_spawn_position is not None:\n self._walker_spawn_position = walker_spawn_position\n else:\n self._walker_spawn_position = arena_position\n\n self._walker_spawn_rotation = walker_spawn_rotation\n\n self._distance_tolerance = distance_tolerance\n self._moving_target = moving_target\n self._steps_before_moving_target = steps_before_moving_target\n self._reward_step_counter = 0\n\n self._target = self.root_entity.mjcf_model.worldbody.add(\n 'site',\n name='target',\n type='sphere',\n pos=(0., 0., 0.),\n size=(0.1,),\n rgba=(0.9, 0.6, 0.6, 1.0))\n\n enabled_observables = []\n enabled_observables += self._walker.observables.proprioception\n enabled_observables += self._walker.observables.kinematic_sensors\n enabled_observables += self._walker.observables.dynamic_sensors\n enabled_observables.append(self._walker.observables.sensors_touch)\n for obs in enabled_observables:\n obs.enabled = True\n\n walker.observables.add_egocentric_vector(\n 'target',\n observable.MJCFFeature('pos', self._target),\n origin_callable=lambda physics: physics.bind(walker.root_body).xpos)\n\n self.set_timesteps(\n physics_timestep=physics_timestep, control_timestep=control_timestep)\n\n @property\n def root_entity(self):\n return self._arena\n\n def target_position(self, physics):\n return np.array(physics.bind(self._target).pos)\n\n def initialize_episode_mjcf(self, random_state):\n self._arena.regenerate(random_state=random_state)\n\n target_x, target_y = variation.evaluate(\n self._target_spawn_position, random_state=random_state)\n self._target.pos = [target_x, target_y, 0.]\n\n def initialize_episode(self, physics, random_state):\n self._walker.reinitialize_pose(physics, random_state)\n if self._walker_spawn_rotation:\n rotation = variation.evaluate(\n self._walker_spawn_rotation, random_state=random_state)\n quat = [np.cos(rotation / 2), 0, 0, np.sin(rotation / 2)]\n else:\n quat = None\n walker_x, walker_y = variation.evaluate(\n self._walker_spawn_position, random_state=random_state)\n self._walker.shift_pose(\n physics,\n position=[walker_x, walker_y, 0.],\n quaternion=quat,\n rotate_velocity=True)\n\n self._failure_termination = False\n walker_foot_geoms = set(self._walker.ground_contact_geoms)\n walker_nonfoot_geoms = [\n geom for geom in self._walker.mjcf_model.find_all('geom')\n if geom not in walker_foot_geoms]\n self._walker_nonfoot_geomids = set(\n physics.bind(walker_nonfoot_geoms).element_id)\n self._ground_geomids = set(\n physics.bind(self._arena.ground_geoms).element_id)\n self._ground_geomids.add(physics.bind(self._target).element_id)\n\n def _is_disallowed_contact(self, contact):\n set1, set2 = self._walker_nonfoot_geomids, self._ground_geomids\n return ((contact.geom1 in set1 and contact.geom2 in set2) or\n (contact.geom1 in set2 and contact.geom2 in set1))\n\n def should_terminate_episode(self, physics):\n return self._failure_termination\n\n def get_discount(self, physics):\n if self._failure_termination:\n return 0.\n else:\n return 1.\n\n def get_reward(self, physics):\n reward = 0.\n distance = np.linalg.norm(\n physics.bind(self._target).pos[:2] -\n physics.bind(self._walker.root_body).xpos[:2])\n if distance < self._distance_tolerance:\n reward = 1.\n if self._moving_target:\n self._reward_step_counter += 1\n return reward\n\n def before_step(self, physics, action, random_state):\n self._walker.apply_action(physics, action, random_state)\n\n def after_step(self, physics, random_state):\n self._failure_termination = False\n for contact in physics.data.contact:\n if self._is_disallowed_contact(contact):\n self._failure_termination = True\n break\n if (self._moving_target and\n self._reward_step_counter >= self._steps_before_moving_target):\n\n # Reset the target position.\n target_x, target_y = variation.evaluate(\n self._target_spawn_position, random_state=random_state)\n physics.bind(self._target).pos = [target_x, target_y, 0.]\n\n # Reset the number of steps at the target for the moving target.\n self._reward_step_counter = 0\n" ]
[ [ "numpy.array", "numpy.cos", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
UESTC-Liuxin/CVMI_Sementic_Segmentation
[ "dc5bf6e940cf6961ef65abb6e7ec372f29d55249" ]
[ "model/decode_heads/danet/danet.py" ]
[ "'''\nAuthor: Liu Xin\nDate: 2021-11-30 16:50:20\nLastEditors: Liu Xin\nLastEditTime: 2021-11-30 16:53:51\nDescription: file content\nFilePath: /CVMI_Sementic_Segmentation/model/decode_heads/danet/danet.py\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom model.builder import DECODE_HEAD\n\n\nclass DANet(nn.Module):\n def __init__(self, in_channels, num_classes, criterion, match_block, *args, **kwargs):\n super(DANet, self).__init__()\n self.num_classes = num_classes\n self.criterion = criterion\n self.match_block = match_block\n \n \nclass DANetHead(nn.Module):\n def __init__(self, in_channels, out_channels, norm_layer):\n super(DANetHead, self).__init__()\n inter_channels = in_channels // 4\n self.conv5a = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),\n norm_layer(inter_channels),\n nn.ReLU())\n \n self.conv5c = nn.Sequential(nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),\n norm_layer(inter_channels),\n nn.ReLU())\n\n self.sa = PAM_Module(inter_channels)\n self.sc = CAM_Module(inter_channels)\n self.conv51 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),\n norm_layer(inter_channels),\n nn.ReLU())\n self.conv52 = nn.Sequential(nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False),\n norm_layer(inter_channels),\n nn.ReLU())\n\n self.conv6 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(inter_channels, out_channels, 1))\n self.conv7 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(inter_channels, out_channels, 1))\n\n self.conv8 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(inter_channels, out_channels, 1))\n\n def forward(self, x):\n feat1 = self.conv5a(x)\n sa_feat = self.sa(feat1)\n sa_conv = self.conv51(sa_feat)\n sa_output = self.conv6(sa_conv)\n\n feat2 = self.conv5c(x)\n sc_feat = self.sc(feat2)\n sc_conv = self.conv52(sc_feat)\n sc_output = self.conv7(sc_conv)\n\n feat_sum = sa_conv+sc_conv\n \n sasc_output = self.conv8(feat_sum)\n\n output = [sasc_output]\n output.append(sa_output)\n output.append(sc_output)\n return tuple(output)" ]
[ [ "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.Dropout2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tvlearn/tvo
[ "5a94f78781abc56446b87e74d8447ee73b74dd5b" ]
[ "tvo/utils/data.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright (C) 2019 Machine Learning Group of the University of Oldenburg.\n# Licensed under the Academic Free License version 3.0\n\nimport torch as to\nimport torch.distributed as dist\nfrom torch.utils.data import TensorDataset, DataLoader, Dataset, Sampler\nimport numpy as np\nimport tvo\nfrom tvo.utils.parallel import broadcast\n\n\nclass TVODataLoader(DataLoader):\n def __init__(self, *data: to.Tensor, **kwargs):\n \"\"\"TVO DataLoader class. Derived from torch.utils.data.DataLoader.\n\n :param data: Tensor containing the input dataset. Must have exactly two dimensions (N,D).\n :param kwargs: forwarded to pytorch's DataLoader.\n\n TVODataLoader is constructed exactly the same way as pytorch's DataLoader,\n but it restricts datasets to TensorDataset constructed from the *data passed\n as parameter. All other arguments are forwarded to pytorch's DataLoader.\n\n When iterated over, TVODataLoader yields a tuple containing the indeces of\n the datapoints in each batch as well as the actual datapoints for each\n tensor in the input Tensor.\n\n TVODataLoader instances optionally expose the attribute `precision`, which is set to the\n dtype of the first dataset in *data if it is a floating point dtype.\n \"\"\"\n N = data[0].shape[0]\n assert all(d.shape[0] == N for d in data), \"Dimension mismatch in data sets.\"\n\n if data[0].dtype is not to.uint8:\n self.precision = data[0].dtype\n\n dataset = TensorDataset(to.arange(N), *data)\n\n if tvo.get_run_policy() == \"mpi\" and \"sampler\" not in kwargs:\n # Number of _desired_ datapoints per worker: the last worker might have less actual\n # datapoints, but we want it to sample as many as the other workers so that all\n # processes can loop over batches in sync.\n # NOTE: this means that the E-step will sometimes write over a certain K[idx] and\n # lpj[idx] twice over the course of an epoch, even in the same batch (although that\n # will happen rarely). This double writing is not a race condition: the last write wins.\n n_samples = to.tensor(N)\n assert dist.is_initialized()\n comm_size = dist.get_world_size()\n # Ranks ..., (comm_size-2), (comm_size-1) are\n # assigned one data point more than ranks\n # 0, 1, ... if the dataset cannot be evenly\n # distributed across MPI processes. The split\n # point depends on the total number of data\n # points and number of MPI processes (see\n # scatter_to_processes, gather_from_processes)\n broadcast(n_samples, src=comm_size - 1)\n kwargs[\"sampler\"] = ShufflingSampler(dataset, int(n_samples))\n kwargs[\"shuffle\"] = None\n\n super().__init__(dataset, **kwargs)\n\n\nclass ShufflingSampler(Sampler):\n def __init__(self, dataset: Dataset, n_samples: int = None):\n \"\"\"A torch sampler that shuffles datapoints.\n\n :param dataset: The torch dataset for this sampler.\n :param n_samples: Number of desired samples. Defaults to len(dataset). If larger than\n len(dataset), some datapoints will be sampled multiple times.\n \"\"\"\n self._ds_len = len(dataset)\n self.n_samples = n_samples if n_samples is not None else self._ds_len\n\n def __iter__(self):\n idxs = np.arange(self._ds_len)\n np.random.shuffle(idxs)\n\n if self.n_samples > self._ds_len:\n n_extra_samples = self.n_samples - self._ds_len\n replace = True if n_extra_samples > idxs.size else False\n extra_samples = np.random.choice(idxs, size=n_extra_samples, replace=replace)\n idxs = np.concatenate((idxs, extra_samples))\n else:\n idxs = idxs[: self.n_samples]\n\n return iter(idxs)\n\n def __len__(self):\n return self.n_samples\n" ]
[ [ "numpy.random.choice", "numpy.arange", "torch.distributed.is_initialized", "numpy.random.shuffle", "torch.tensor", "numpy.concatenate", "torch.arange", "torch.distributed.get_world_size" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ergs/transmutagen-paper
[ "0ca7100d5b3021599558b6025c928e2bb8f88ae3" ]
[ "decay_compare.py" ]
[ "#!/usr/bin/env python\nfrom pprint import pprint\nfrom collections import defaultdict\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sympy import exp\n\nfrom pyne import nucname\nfrom pyne.material import Material\nfrom pyne import cram\n\nfrom transmutagen.origen_all import TIME_STEPS\n\n\nnp.set_printoptions(precision=18)\n\n#TIMES = [0.0] + sorted(TIME_STEPS.keys())\nTIMES = [0.0] + np.logspace(1, 20, 39).tolist()\nNTIMES = len(TIMES)\nDECAY_MATS = {t: (-cram.DECAY_MATRIX*t) for t in TIMES}\n\nemptytime = lambda: np.zeros(NTIMES, dtype=float)\n\n\ndef run_nuclide(nuc):\n bateman = defaultdict(emptytime)\n bateman[nuc][0] = 1\n crammed = defaultdict(emptytime)\n crammed[nuc][0] = 1\n diagexp = defaultdict(emptytime)\n diagexp[nuc][0] = 1\n n0 = Material({nuc: 1.0}, mass=1.0, atoms_per_molecule=1.0)\n for i, t in enumerate(TIMES[1:], 1):\n # compute Bateman\n try:\n b1 = n0.decay(t).to_atom_frac()\n except RuntimeError:\n # decay can't handle all of the same nuclides CRAM can\n b1 = {}\n for key, val in b1.items():\n n = nucname.name(key)\n bateman[n][i] = val\n # compute CRAM\n c1 = n0.cram(DECAY_MATS[t], order=16).to_atom_frac()\n for key, val in c1.items():\n n = nucname.name(key)\n crammed[n][i] = val\n # compute e^x of the diagonal of the decay matrix, ie the nuc itself\n nuc_idx = cram.NUCS_IDX[nuc]\n mat_idx = cram.IJ[nuc_idx, nuc_idx]\n diagexp[nuc][i] = exp(-DECAY_MATS[t][mat_idx]).evalf(n=30)\n return bateman, crammed, diagexp\n\n\ndef diff_nuclide(a, b, abs=False, include_missing=True):\n d = defaultdict(emptytime)\n for nuc in a:\n if nuc in b or include_missing:\n d[nuc] = a[nuc] - b[nuc]\n if include_missing:\n for nuc in b:\n if nuc not in a:\n d[nuc] = -b[nuc]\n if abs:\n for nuc in d:\n d[nuc] = np.abs(d[nuc])\n return d\n\n\ndef run_nuclides(nucs=None, verbose=True):\n batemans = {}\n crammeds = {}\n diagexps = {}\n nucs = cram.NUCS if nucs is None else nucs\n for nuc in nucs:\n print('Running nuc ' + nuc)\n b, c, d = run_nuclide(nuc)\n batemans[nuc] = b\n crammeds[nuc] = c\n diagexps[nuc] = d\n return batemans, crammeds, diagexps\n\n\nif __name__ == '__main__':\n print(TIMES)\n nuc = 'H3'\n b, c, d = run_nuclide('H3')\n print('Bateman:')\n pprint(b[nuc])\n print('Decay Exponentional:')\n pprint(d[nuc])\n print('CRAM')\n pprint(c[nuc])\n print('Diff')\n pprint(diff_nuclide(d,c)[nuc])\n\n" ]
[ [ "numpy.logspace", "numpy.set_printoptions", "numpy.zeros", "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
feloundou/research-project
[ "fe7f5414901f02ae24ef33af31e65782d8511da1" ]
[ "algos/train_expert_cpo.py" ]
[ "from datetime import datetime as dt, timedelta\nimport numpy as np\nimport os\nimport torch\nfrom torch.nn import MSELoss\n\nfrom torch.optim import LBFGS, Adam\nfrom adabelief_pytorch import AdaBelief\n\nfrom torch_cpo_utils import *\n# from cpo_torch import CPO\nfrom buffer_torch import *\nfrom models_torch import MLP_DiagGaussianPolicy, MLP\n\nfrom utils import *\nfrom neural_nets import *\n\nimport wandb\nwandb.login()\nPROJECT_NAME = 'cpo_500e_8hz_cost1_rew1_lim25'\nwandb.init(project=\"cpo-agent-test\", name= PROJECT_NAME )\n\n# recommend a protocol for evaluating constrained RL\n# algorithms on Safety Gym environments based on three metrics:\n# 1. task performance of the final policy,\n# 2. constraint satisfaction of the final policy, and\n# 3. average regret with respect to safety costs throughout training.\n\n# In all Safety Gym benchmark environments, the layout of environment elements is randomized at the start of each episode. Each distribution over layouts is continuous and minimally\n# restricted, allowing for essentially infinite variations within each environment. This prevents\n# RL algorithms from learning trivial solutions that memorize\n\ndef discount(vals, discount_term):\n n = vals.size(0)\n disc_pows = torch.pow(discount_term, torch.arange(n).float())\n # Reverse indexes\n reverse_ix = torch.arange(n - 1, -1, -1)\n discounted = torch.cumsum((vals * disc_pows)[reverse_ix], dim=-1)[reverse_ix] / disc_pows\n\n return discounted\n\n\ndef compute_advs(actual_vals, exp_vals, discount_term):\n # Advantage calculation: discount(predicted - actual)\n exp_vals_next = torch.cat([exp_vals[1:], torch.tensor([0.0])])\n td_res = actual_vals + discount_term * exp_vals_next - exp_vals\n advs = discount(td_res, discount_term)\n\n return advs\n\n\nclass CPO:\n @autoassign\n def __init__(self,\n policy,\n value_fun,\n cost_fun,\n simulator,\n target_kl=1e-2,\n vf_lr=1e-2,\n cf_lr=1e-2,\n cost_lim=0.1,\n train_v_iters=5,\n train_c_iters=5,\n val_l2_reg=1e-3,\n cost_l2_reg=1e-3,\n gamma=0.995,\n cost_gamma=0.995,\n cg_damping=1e-3,\n cg_max_iters=10,\n line_search_coef=0.9,\n line_search_max_iter=10,\n line_search_accept_ratio=0.1,\n optim_mode = \"adam\",\n optim_max_iter=25,\n model_name=None,\n continue_from_file=False,\n save_every=10,\n save_dir='trained-models-dir',\n print_updates=True):\n\n # Special function to avoid certain slowdowns from PyTorch + MPI combo.\n setup_pytorch_for_mpi()\n\n self.save_dir = save_dir\n self.mse_loss = MSELoss(reduction='mean')\n\n # Set policy and functions if starting from scratch\n # if continue_from_file == False:\n\n\n # Different Optimizer Modes (Think LBFGS, Adam and AdaBelief)\n\n if optim_mode == \"adam\":\n self.value_fun_optimizer = Adam(self.value_fun.parameters(), lr=vf_lr)\n self.cost_fun_optimizer = Adam(self.cost_fun.parameters(), lr=vf_lr)\n\n elif optim_mode == \"adabelief\":\n self.value_fun_optimizer = AdaBelief(self.value_fun.parameters(), betas=(0.9, 0.999), eps=1e-8)\n self.cost_fun_optimizer = AdaBelief(self.cost_fun.parameters(), betas=(0.9, 0.999), eps=1e-8)\n\n else:\n self.value_fun_optimizer = LBFGS(self.value_fun.parameters(), lr=vf_lr, max_iter=optim_max_iter)\n self.cost_fun_optimizer = LBFGS(self.cost_fun.parameters(), lr=cf_lr, max_iter=optim_max_iter)\n\n self.epoch_num = 0\n self.elapsed_time = timedelta(0)\n self.device = get_device()\n self.mean_rewards = []\n self.mean_costs = []\n self.session_cum_avg_rewards = 0\n self.session_cum_avg_costs = 0\n\n\n if not model_name and continue_from_file:\n raise Exception('Argument continue_from_file to __init__ method of ' \\\n 'CPO case was set to True but model_name was not ' \\\n 'specified.')\n\n if not model_name and save_every:\n raise Exception('Argument save_every to __init__ method of CPO ' \\\n 'was set to a value greater than 0 but model_name ' \\\n 'was not specified.')\n\n if continue_from_file:\n print(\"about to continue\")\n self.load_session()\n\n def train(self, n_epochs, logger_kwargs):\n\n # Set up logger and save configuration\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n # Set up model saving\n logger.setup_pytorch_saver(policy)\n\n states_w_time_old = None\n disc_rewards_old = None\n disc_costs_old = None\n\n # Main loop: collect experience in env and update/log each epoch\n for epoch in range(n_epochs):\n\n start_time = dt.now()\n self.epoch_num += 1\n\n # Run the simulator and collect experiences in the buffer\n buffer = self.simulator.run_sim()\n\n # Sample buffer experiences\n observations, actions, rewards, costs = buffer.sample()\n # print(\"reward sample:\", rewards)\n\n episode_lengths = torch.tensor([len(episode) for episode in buffer])\n print(\"episode lengths: \", episode_lengths)\n episode_limits = torch.cat([torch.tensor([0]), torch.cumsum(episode_lengths, dim=-1)])\n\n N = np.sum([len(episode) for episode in buffer])\n T = self.simulator.max_ep_len\n time = torch.cat([torch.arange(size).float() for size in episode_lengths])\n time = torch.unsqueeze(time, dim=1) / T\n states_w_time = torch.cat([observations, time], dim=1)\n # print(\"states with time: \", states_w_time)\n\n disc_rewards = torch.zeros(N)\n disc_costs = torch.zeros(N)\n reward_advs = torch.zeros(N)\n cost_advs = torch.zeros(N)\n\n with torch.no_grad():\n\n state_vals = self.value_fun(states_w_time).view(-1)\n state_costs = self.cost_fun(states_w_time).view(-1)\n print(\"state vals: \", state_vals)\n print(\"state costs: \", state_costs)\n\n for start, end in zip(episode_limits[:-1], episode_limits[1:]):\n disc_rewards[start:end] = discount(rewards[start:end], self.gamma)\n disc_costs[start:end] = discount(costs[start:end], self.cost_gamma)\n reward_advs[start:end] = compute_advs(rewards[start:end],\n state_vals[start:end],\n self.gamma)\n cost_advs[start:end] = compute_advs(costs[start:end],\n state_costs[start:end],\n self.cost_gamma)\n\n # Tyna note: think about bias reduction\n\n # Advantage normalizing trick for policy gradient\n reward_advs -= reward_advs.mean()\n reward_advs /= reward_advs.std()\n\n # Center, but do NOT rescale advantages for cost gradient # Tyna to ask Josh about this\n cost_advs -= reward_advs.mean()\n # cost_advs /= cost_advs.std()\n\n if states_w_time_old is not None:\n states_w_time_train = torch.cat([states_w_time, states_w_time_old])\n disc_rewards_train = torch.cat([disc_rewards, disc_rewards_old])\n disc_costs_train = torch.cat([disc_costs, disc_costs_old])\n else:\n states_w_time_train = states_w_time\n disc_rewards_train = disc_rewards\n disc_costs_train = disc_costs\n\n states_w_time_old = states_w_time\n disc_rewards_old = disc_rewards\n disc_costs_old = disc_costs\n\n# constraint_cost = torch.mean(torch.tensor([disc_costs[start] for start in episode_limits[:-1]]))\n constraint_cost = torch.mean(torch.tensor([torch.sum(torch.tensor(episode.costs))\n for episode in buffer]))\n\n self.update_policy(observations, actions, reward_advs, cost_advs, constraint_cost)\n self.update_nn_regressor(self.value_fun, self.value_fun_optimizer, states_w_time_train,\n disc_rewards_train, self.val_l2_reg, self.train_v_iters)\n self.update_nn_regressor(self.cost_fun, self.cost_fun_optimizer, states_w_time_train,\n disc_costs_train, self.cost_l2_reg, self.train_c_iters)\n\n reward_sums = [np.sum(episode.rewards) for episode in buffer]\n cost_sums = [np.sum(episode.costs) for episode in buffer]\n # print(\"all episode rewards for each episode: \", [episode.rewards for episode in buffer])\n print(\"sum episode rewards: \", reward_sums)\n print(\"mean of sum episode rewards: \", np.mean(reward_sums))\n self.mean_rewards.append(np.mean(reward_sums))\n self.mean_costs.append(np.mean(cost_sums))\n self.elapsed_time += dt.now() - start_time\n\n if self.print_updates:\n self.print_update(logger)\n\n # Save model and save last trajectory\n if (epoch % self.save_every == 0) or (epoch == epochs - 1):\n logger.save_state({'env': env}, None)\n\n if self.save_every and not self.epoch_num % self.save_every:\n self.save_session(logger)\n\n def update_policy(self, observations, actions, reward_advs, constraint_advs, J_c):\n # J_c is constraint cost\n self.policy.train()\n\n action_dists = self.policy(observations)\n log_action_probs = action_dists.log_prob(actions)\n\n imp_sampling = torch.exp(log_action_probs - log_action_probs.detach())\n\n # Change to torch.matmul\n reward_loss = -torch.mean(imp_sampling * reward_advs)\n reward_grad = flat_grad(reward_loss, self.policy.parameters(), retain_graph=True)\n # Change to torch.matmul\n constraint_loss = torch.sum(imp_sampling * constraint_advs) / self.simulator.n_episodes\n constraint_grad = flat_grad(constraint_loss, self.policy.parameters(), retain_graph=True)\n\n loss_metrics = {'reward loss': reward_loss,\n 'constraint loss': constraint_loss\n }\n\n wandb.log(loss_metrics)\n\n mean_kl = mean_kl_first_fixed(action_dists, action_dists)\n Fvp_fun = get_Hvp_fun(mean_kl, self.policy.parameters())\n\n F_inv_g = cg_solver(Fvp_fun, reward_grad)\n F_inv_b = cg_solver(Fvp_fun, constraint_grad)\n\n q = torch.matmul(reward_grad, F_inv_g)\n r = torch.matmul(reward_grad, F_inv_b)\n s = torch.matmul(constraint_grad, F_inv_b)\n c = (J_c - self.cost_lim)\n # .to(self.device)\n\n # Is the policy feasible (within the kl constraints?)\n is_feasible = False if c > 0 and c ** 2 / s - 2 * self.target_kl > 0 else True\n\n if is_feasible:\n lam, nu = self.calc_dual_vars(q, r, s, c)\n cur_penalty = nu\n search_dir = -lam ** -1 * (F_inv_g + nu * F_inv_b)\n # if not feasible, perform infeasible recovery: step to purely decrease cost\n else:\n\n search_dir = -torch.sqrt(2 * self.target_kl / s) * F_inv_b\n\n # Should be positive, calculate improvement over loss\n exp_loss_improv = torch.matmul(reward_grad, search_dir)\n current_policy = get_flat_params(self.policy)\n\n def line_search_criterion(search_dir, step_len):\n test_policy = current_policy + step_len * search_dir\n set_params(self.policy, test_policy)\n\n with torch.no_grad():\n # Test if conditions are satisfied\n test_dists = self.policy(observations)\n test_probs = test_dists.log_prob(actions)\n\n imp_sampling = torch.exp(test_probs - log_action_probs.detach())\n\n test_loss = -torch.mean(imp_sampling * reward_advs)\n test_cost = torch.sum(imp_sampling * constraint_advs) / self.simulator.n_episodes\n test_kl = mean_kl_first_fixed(action_dists, test_dists)\n\n loss_improv_cond = (test_loss - reward_loss) / (step_len * exp_loss_improv) >= self.line_search_accept_ratio\n cost_cond = step_len * torch.matmul(constraint_grad, search_dir) <= max(-c, 0.0)\n kl_cond = test_kl <= self.target_kl\n\n set_params(self.policy, current_policy)\n\n if is_feasible:\n return loss_improv_cond and cost_cond and kl_cond\n\n return cost_cond and kl_cond\n\n step_len = line_search(search_dir, 1.0, line_search_criterion, self.line_search_coef)\n # print('Step Len.:', step_len, '\\n')\n\n step_metrics = {'step length': step_len}\n\n wandb.log(step_metrics)\n\n # improved policy\n new_policy = current_policy + step_len * search_dir\n set_params(self.policy, new_policy)\n\n def update_nn_regressor(self, nn_regressor, optimizer, states, targets, l2_reg_coef, n_iters=1):\n nn_regressor.train()\n\n # states = states.to(self.device)\n # targets = targets.to(self.device)\n\n for _ in range(n_iters):\n def mse():\n optimizer.zero_grad()\n\n predictions = nn_regressor(states).view(-1)\n loss = self.mse_loss(predictions, targets)\n\n flat_params = get_flat_params(nn_regressor)\n l2_loss = l2_reg_coef * torch.sum(torch.pow(flat_params, 2))\n loss += l2_loss\n\n loss.backward()\n\n return loss\n\n optimizer.step(mse)\n\n def calc_dual_vars(self, q, r, s, c):\n\n A = q - r ** 2 / s # should be always positive (Cauchy-Shwarz)\n B = 2 * self.target_kl - c ** 2 / s # does safety boundary intersect trust region? (positive = yes)\n\n # optim_case in [3,4]\n if c < 0.0 and c ** 2 / s - 2 * self.target_kl > 0.0:\n lam = torch.sqrt(q / (2 * self.target_kl))\n nu = 0.0\n\n return lam, nu\n\n # w = tro.cg(Hx, b)\n # r = np.dot(w, approx_g) # b^T H^{-1} g\n # s = np.dot(w, Hx(w)) # b^T H^{-1} b\n\n\n lam_mid = r / c\n lam_a = torch.sqrt(A / B)\n lam_b = torch.sqrt(q / (2 * self.target_kl))\n\n f_mid = -0.5 * (q / lam_mid + 2 * lam_mid * self.target_kl)\n f_a = -torch.sqrt(A * B) - r * c / s\n f_b = -torch.sqrt(2 * q * self.target_kl)\n\n if lam_mid > 0:\n if c < 0:\n if lam_a > lam_mid:\n lam_a = lam_mid\n f_a = f_mid\n if lam_b < lam_mid:\n lam_b = lam_mid\n f_b = f_mid\n else:\n if lam_a < lam_mid:\n lam_a = lam_mid\n f_a = f_mid\n if lam_b > lam_mid:\n lam_b = lam_mid\n f_b = f_mid\n else:\n if c < 0:\n lam = lam_b\n else:\n lam = lam_a\n\n lam = lam_a if f_a >= f_b else lam_b\n nu = max(0.0, (lam * c - r) / s)\n\n return lam, nu\n\n def save_session(self, logger):\n # Where experiment outputs are saved by default:\n DEFAULT_DATA_DIR = osp.join(osp.abspath(osp.dirname(osp.dirname(__file__))), 'data')\n self.output_dir = DEFAULT_DATA_DIR\n\n fpath = 'pyt_save'\n fpath = osp.join(self.output_dir, self.model_name , fpath)\n itr = None\n fname = 'model' + ('%d' % itr if itr is not None else '') + '.pt'\n fname = osp.join(fpath, fname)\n os.makedirs(fpath, exist_ok=True)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n # We are using a non-recommended way of saving PyTorch models,\n # by pickling whole objects (which are dependent on the exact\n # directory structure at the time of saving) as opposed to\n # just saving network weights. This works sufficiently well\n # for the purposes of Spinning Up, but you may want to do\n # something different for your personal PyTorch project.\n # We use a catch_warnings() context to avoid the warnings about\n # not being able to save the source code.\n\n torch.save(logger.pytorch_saver_elements, fname)\n\n save_path = os.path.join(fpath, self.model_name + '.pt')\n\n ckpt = dict(policy_state_dict=self.policy.state_dict(),\n value_state_dict=self.value_fun.state_dict(),\n cost_state_dict=self.cost_fun.state_dict(),\n mean_rewards=self.mean_rewards,\n mean_costs=self.mean_costs,\n epoch_num=self.epoch_num,\n elapsed_time=self.elapsed_time)\n\n if self.simulator.obs_filter:\n ckpt['obs_filter'] = self.simulator.obs_filter\n\n torch.save(ckpt, save_path)\n\n def load_session(self, load_path=None):\n if load_path is None:\n load_path = os.path.join(self.save_dir, self.model_name + '.pt')\n print(\"load path:\", load_path)\n ckpt = torch.load(load_path)\n\n self.policy.load_state_dict(ckpt['policy_state_dict'])\n self.value_fun.load_state_dict(ckpt['value_state_dict'])\n self.cost_fun.load_state_dict(ckpt['cost_state_dict'])\n self.mean_rewards = ckpt['mean_rewards']\n self.mean_costs = ckpt['mean_costs']\n self.epoch_num = ckpt['epoch_num']\n self.elapsed_time = ckpt['elapsed_time']\n\n try:\n self.simulator.obs_filter = ckpt['obs_filter']\n except KeyError:\n pass\n\n def print_update(self, logger):\n update_message = '[Epoch]: {0} | [Avg. Reward]: {1} | [Avg. Cost]: {2} | [Elapsed Time]: {3}'\n\n elapsed_time_str = ''.join(str(self.elapsed_time)).split('.')[0]\n format_args = (self.epoch_num, self.mean_rewards[-1], self.mean_costs[-1], elapsed_time_str)\n self.session_cum_avg_rewards += (self.mean_rewards[-1]/(self.epoch_num+1))\n self.session_cum_avg_costs += (self.mean_costs[-1]/(self.epoch_num+1))\n\n logger.store(EpRet=self.mean_rewards[-1],\n EpCost=self.mean_costs[-1])\n # logger.store()\n\n logger.log_tabular('Epoch', self.epoch_num)\n logger.log_tabular('EpRet', with_min_and_max=False)\n logger.log_tabular('EpCost', with_min_and_max=False)\n logger.dump_tabular()\n\n update_metrics = {'mean rewards': self.mean_rewards[-1],\n 'mean costs': self.mean_costs[-1],\n 'cum average rewards': self.session_cum_avg_rewards,\n 'cum average costs': self.session_cum_avg_costs\n }\n\n wandb.log(update_metrics)\n\n print(update_message.format(*format_args))\n\n\nif __name__ == '__main__':\n import argparse\n from utils import setup_logger_kwargs\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--env_name', type=str, default='Safexp-PointGoal1-v0')\n # parser.add_argument('--env_name', type=str, default='Safexp-PointGoal0-v0')\n parser.add_argument('--target_kl', type=float, default=0.01)\n parser.add_argument('--vf_lr', type=float, default=0.01)\n parser.add_argument('--cf_lr', type=float, default=0.01)\n parser.add_argument('--cost_lim', type=int, default=10)\n\n parser.add_argument('--train_v_iters', type=int, default=5)\n parser.add_argument('--train_c_iters', type=int, default=5)\n parser.add_argument('--val_l2_reg', type=float, default=0.001)\n parser.add_argument('--cost_l2_reg', type=float, default=0.001)\n parser.add_argument('--gamma', type=float, default=0.995)\n parser.add_argument('--cost_gamma', type=float, default=0.995)\n\n parser.add_argument('--cg_damping', type=float, default=0.001)\n parser.add_argument('--cg_max_iters', type=int, default=5)\n\n parser.add_argument('--line_search_coef', type=float, default=0.9)\n parser.add_argument('--line_search_max_iter', type=int, default=10)\n parser.add_argument('--line_search_accept_ratio', type=float, default=0.1)\n\n parser.add_argument('--optim_max_iter', type=int, default=25)\n parser.add_argument('--model-name', type=str, dest='model_name', default='Safe-model',\n # required=True,\n help='The entry in config.yaml from which settings' \\\n 'should be loaded.')\n parser.add_argument('--continue_from_file', action='store_true')\n parser.add_argument('--save_every', type=int, default=5)\n parser.add_argument('--print_updates', action='store_false')\n parser.add_argument('--cpu', type=int, default=1)\n parser.add_argument('--seed', type=int, default=0)\n\n args = parser.parse_args()\n\n DEFAULT_DATA_DIR = osp.join(osp.abspath(osp.dirname(osp.dirname(__file__))), 'data')\n logger_kwargs = setup_logger_kwargs(PROJECT_NAME, args.seed, data_dir = DEFAULT_DATA_DIR)\n\n # mpi_fork(args.cpu) # run parallel code with mpi\n\n# Set environment and arguments\n env = gym.make(args.env_name)\n state_dim = env.observation_space.shape[0]\n action_dim = env.action_space.shape[0]\n\n epochs = 500\n n_episodes = 5\n # n_episodes = 10000\n max_ep_len = 16\n policy_dims = [64, 64]\n vf_dims = [64, 64]\n cf_dims = [64, 64]\n cost_lim = 10\n\n # Gaussian policy\n policy = MLP_DiagGaussianPolicy(state_dim, policy_dims, action_dim)\n value_fun = MLP(state_dim + 1, vf_dims, 1)\n cost_fun = MLP(state_dim + 1, cf_dims, 1)\n\n simulator = SinglePathSimulator(args.env_name, policy, n_episodes, max_ep_len)\n cpo = CPO(policy,\n value_fun,\n cost_fun,\n simulator,\n model_name='cpo-run-500e',\n cost_lim=args.cost_lim)\n\n model_name = 'cpo'\n\n print(f'Training policy {model_name} on {args.env_name} environment...\\n')\n\n cpo.train(epochs, logger_kwargs)\n\n wandb.config.update(args)\n\n wandb.finish()\n\n\n" ]
[ [ "torch.mean", "numpy.sum", "torch.load", "torch.sqrt", "torch.cat", "torch.zeros", "torch.sum", "torch.unsqueeze", "torch.tensor", "torch.matmul", "torch.pow", "torch.no_grad", "numpy.mean", "torch.arange", "torch.cumsum", "torch.nn.MSELoss", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FFI-Vietnam/CameraTraps-FFIVietnamAdaptation
[ "308107436332aa07a73bf75b124d11947fde557c" ]
[ "api/batch_processing/postprocessing/postprocess_batch_results.py" ]
[ "\"\"\"\n\npostprocess_batch_results.py\n\nGiven a .json or .csv file representing the output from the batch detection API,\ndo one or more of the following:\n\n* Evaluate detector precision/recall, optionally rendering results (requires\n ground truth)\n* Sample true/false positives/negatives and render to HTML (requires ground\n truth)\n* Sample detections/non-detections and render to HTML (when ground truth isn't\n available)\n\nGround truth, if available, must be in the COCO Camera Traps format.\n\n\"\"\"\n\n\n#%% Constants and imports\n\nimport argparse\nimport collections\nimport copy\nfrom enum import IntEnum\nimport errno\nimport io\nimport itertools\nfrom multiprocessing.pool import ThreadPool\nimport os\nimport sys\nimport time\nfrom typing import Any, Dict, Iterable, Optional, Tuple\nimport uuid\nimport warnings\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport humanfriendly\nimport pandas as pd\nfrom sklearn.metrics import precision_recall_curve, confusion_matrix, average_precision_score\nfrom tqdm import tqdm\n\n# Assumes ai4eutils is on the python path\n# https://github.com/Microsoft/ai4eutils\nfrom write_html_image_list import write_html_image_list\nimport path_utils\n\n# Assumes the cameratraps repo root is on the path\nimport visualization.visualization_utils as vis_utils\nimport visualization.plot_utils as plot_utils\nfrom data_management.cct_json_utils import (CameraTrapJsonUtils, IndexedJsonDb)\nfrom api.batch_processing.postprocessing.load_api_results import load_api_results\nfrom ct_utils import args_to_object\n\nmatplotlib.use('agg')\nwarnings.filterwarnings('ignore', '(Possibly )?corrupt EXIF data', UserWarning)\n\n\n#%% Options\n\nDEFAULT_NEGATIVE_CLASSES = ['empty']\nDEFAULT_UNKNOWN_CLASSES = ['unknown', 'unlabeled', 'ambiguous']\n\n\ndef has_overlap(set1: Iterable, set2: Iterable) -> bool:\n \"\"\"Check whether 2 sets overlap.\"\"\"\n return not set(set1).isdisjoint(set(set2))\n\n\n# Make sure there is no overlap between the two sets, because this will cause\n# issues in the code\nassert not has_overlap(DEFAULT_NEGATIVE_CLASSES, DEFAULT_UNKNOWN_CLASSES), (\n 'Default negative and unknown classes cannot overlap.')\n\n\nclass PostProcessingOptions:\n\n ### Required inputs\n\n api_output_file = ''\n output_dir = ''\n\n ### Options\n\n # Can be a folder or a SAS URL\n image_base_dir = '.'\n\n ground_truth_json_file = ''\n\n # These apply only when we're doing ground-truth comparisons\n negative_classes = DEFAULT_NEGATIVE_CLASSES\n unlabeled_classes = DEFAULT_UNKNOWN_CLASSES\n\n # A list of output sets that we should count, but not render images for.\n #\n # Typically used to preview sets with lots of empties, where you don't want to\n # subset but also don't want to render 100,000 empty images.\n #\n # detections, non_detections\n # detections_animal, detections_person, detections_vehicle\n rendering_bypass_sets = []\n\n confidence_threshold = 0.85\n classification_confidence_threshold = 0.5\n\n # Used for summary statistics only\n target_recall = 0.9\n\n # Number of images to sample, -1 for \"all images\"\n num_images_to_sample = 500\n\n # Random seed for sampling, or None\n sample_seed: Optional[int] = 0 # None\n\n viz_target_width = 800\n\n line_thickness = 4\n box_expansion = 0\n\n sort_html_by_filename = True\n\n # Optionally separate detections into categories (animal/vehicle/human)\n separate_detections_by_category = True\n\n # Optionally replace one or more strings in filenames with other strings;\n # useful for taking a set of results generated for one folder structure\n # and applying them to a slightly different folder structure.\n api_output_filename_replacements = {}\n ground_truth_filename_replacements = {}\n\n # Allow bypassing API output loading when operating on previously-loaded\n # results\n api_detection_results: Optional[pd.DataFrame] = None\n api_other_fields: Optional[Dict[str, Any]] = None\n\n # Should we also split out a separate report about the detections that were\n # just below our main confidence threshold?\n #\n # Currently only supported when ground truth is unavailable\n include_almost_detections = False\n almost_detection_confidence_threshold = 0.75\n\n # Control rendering parallelization\n parallelize_rendering_n_cores: Optional[int] = 100\n parallelize_rendering = False\n\n # Determines whether missing images force an error\n allow_missing_images = False\n\n# ...PostProcessingOptions\n\n\nclass PostProcessingResults:\n\n output_html_file = ''\n api_detection_results: Optional[pd.DataFrame] = None\n api_other_fields: Optional[Dict[str, Any]] = None\n\n\n##%% Helper classes and functions\n\nclass DetectionStatus(IntEnum):\n \"\"\"\n Flags used to mark images as positive or negative for P/R analysis\n (according to ground truth and/or detector output)\n \"\"\"\n \n DS_NEGATIVE = 0\n DS_POSITIVE = 1\n\n # Anything greater than this isn't clearly positive or negative\n DS_MAX_DEFINITIVE_VALUE = DS_POSITIVE\n\n # image has annotations suggesting both negative and positive\n DS_AMBIGUOUS = 2\n\n # image is not annotated or is annotated with 'unknown', 'unlabeled', ETC.\n DS_UNKNOWN = 3\n\n # image has not yet been assigned a state\n DS_UNASSIGNED = 4\n\n # In some analyses, we add an additional class that lets us look at\n # detections just below our main confidence threshold\n DS_ALMOST = 5\n\n\ndef mark_detection_status(\n indexed_db: IndexedJsonDb,\n negative_classes: Iterable[str] = DEFAULT_NEGATIVE_CLASSES,\n unknown_classes: Iterable[str] = DEFAULT_UNKNOWN_CLASSES\n ) -> Tuple[int, int, int, int]:\n \"\"\"\n For each image in indexed_db.db['images'], add a '_detection_status' field\n to indicate whether to treat this image as positive, negative, ambiguous,\n or unknown.\n\n Makes modifications in-place.\n\n returns (n_negative, n_positive, n_unknown, n_ambiguous)\n \"\"\"\n \n negative_classes = set(negative_classes)\n unknown_classes = set(unknown_classes)\n\n # count the # of images with each type of DetectionStatus\n n_unknown = 0\n n_ambiguous = 0\n n_positive = 0\n n_negative = 0\n\n print('Preparing ground-truth annotations')\n for im in tqdm(indexed_db.db['images']):\n\n image_id = im['id']\n annotations = indexed_db.image_id_to_annotations[image_id]\n categories = [ann['category_id'] for ann in annotations]\n category_names = set(indexed_db.cat_id_to_name[cat] for cat in categories)\n\n # Check whether this image has:\n # - unknown / unassigned-type labels\n # - negative-type labels\n # - positive labels (i.e., labels that are neither unknown nor negative)\n has_unknown_labels = has_overlap(category_names, unknown_classes)\n has_negative_labels = has_overlap(category_names, negative_classes)\n has_positive_labels = 0 < len(category_names - (unknown_classes | negative_classes))\n # assert has_unknown_labels is False, '{} has unknown labels'.format(annotations)\n\n # If there are no image annotations, treat this as unknown\n if len(categories) == 0:\n n_unknown += 1\n im['_detection_status'] = DetectionStatus.DS_UNKNOWN\n\n # n_negative += 1\n # im['_detection_status'] = DetectionStatus.DS_NEGATIVE\n\n # If the image has more than one type of labels, it's ambiguous\n # note: bools are automatically converted to 0/1, so we can sum\n elif (has_unknown_labels + has_negative_labels + has_positive_labels) > 1:\n n_ambiguous += 1\n im['_detection_status'] = DetectionStatus.DS_AMBIGUOUS\n\n # After the check above, we can be sure it's only one of positive,\n # negative, or unknown.\n #\n # Important: do not merge the following 'unknown' branch with the first\n # 'unknown' branch above, where we tested 'if len(categories) == 0'\n #\n # If the image has only unknown labels\n elif has_unknown_labels:\n n_unknown += 1\n im['_detection_status'] = DetectionStatus.DS_UNKNOWN\n\n # If the image has only negative labels\n elif has_negative_labels:\n n_negative += 1\n im['_detection_status'] = DetectionStatus.DS_NEGATIVE\n\n # If the images has only positive labels\n elif has_positive_labels:\n n_positive += 1\n im['_detection_status'] = DetectionStatus.DS_POSITIVE\n\n # Annotate the category, if it is unambiguous\n if len(category_names) == 1:\n im['_unambiguous_category'] = list(category_names)[0]\n\n else:\n raise Exception('Invalid detection state')\n\n # ...for each image\n\n return n_negative, n_positive, n_unknown, n_ambiguous\n\n# ...mark_detection_status()\n\n\ndef is_sas_url(s: str) -> bool:\n \"\"\"\n Placeholder for a more robust way to verify that a link is a SAS URL.\n 99.999% of the time this will suffice for what we're using it for right now.\n \"\"\"\n \n return (s.startswith(('http://', 'https://')) and ('core.windows.net' in s)\n and ('?' in s))\n\n\ndef relative_sas_url(folder_url: str, relative_path: str) -> Optional[str]:\n \"\"\"\n Given a container-level or folder-level SAS URL, create a SAS URL to the\n specified relative path.\n \"\"\"\n \n relative_path = relative_path.replace('%','%25')\n relative_path = relative_path.replace('#','%23')\n relative_path = relative_path.replace(' ','%20')\n\n if not is_sas_url(folder_url):\n return None\n tokens = folder_url.split('?')\n assert len(tokens) == 2\n if not tokens[0].endswith('/'):\n tokens[0] = tokens[0] + '/'\n if relative_path.startswith('/'):\n relative_path = relative_path[1:]\n return tokens[0] + relative_path + '?' + tokens[1]\n\n\ndef render_bounding_boxes(\n image_base_dir,\n image_relative_path,\n display_name,\n detections,\n res,\n detection_categories=None,\n classification_categories=None,\n options=None):\n \"\"\"\n Renders detection bounding boxes on a single image.\n\n The source image is:\n\n image_base_dir / image_relative_path\n\n The target image is, for example:\n\n [options.output_dir] / ['detections' or 'non_detections'] / [filename with slashes turned into tildes]\n\n Returns the html info struct for this image in the form that's used for\n write_html_image_list.\n \"\"\"\n\n if options is None:\n options = PostProcessingOptions()\n\n # Leaving code in place for reading from blob storage, may support this\n # in the future.\n \"\"\"\n stream = io.BytesIO()\n _ = blob_service.get_blob_to_stream(container_name, image_id, stream)\n # resize is to display them in this notebook or in the HTML more quickly\n image = Image.open(stream).resize(viz_size)\n \"\"\"\n\n if res in options.rendering_bypass_sets:\n\n sample_name = res + '_' + path_utils.flatten_path(image_relative_path)\n\n else:\n\n if is_sas_url(image_base_dir):\n image_full_path = relative_sas_url(image_base_dir, image_relative_path)\n else:\n image_full_path = os.path.join(image_base_dir, image_relative_path)\n\n # os.path.isfile() is slow when mounting remote directories; much faster\n # to just try/except on the image open.\n try:\n image = vis_utils.open_image(image_full_path)\n except:\n print('Warning: could not open image file {}'.format(image_full_path))\n return ''\n\n if options.viz_target_width is not None:\n image = vis_utils.resize_image(image, options.viz_target_width)\n\n vis_utils.render_detection_bounding_boxes(\n detections, image,\n label_map=detection_categories,\n classification_label_map=classification_categories,\n confidence_threshold=options.confidence_threshold,\n thickness=options.line_thickness,\n expansion=options.box_expansion)\n\n # Render images to a flat folder... we can use os.sep here because we've\n # already normalized paths\n sample_name = res + '_' + path_utils.flatten_path(image_relative_path)\n fullpath = os.path.join(options.output_dir, res, sample_name)\n try:\n image.save(fullpath)\n except OSError as e:\n # errno.ENAMETOOLONG doesn't get thrown properly on Windows, so\n # we awkwardly check against a hard-coded limit\n if (e.errno == errno.ENAMETOOLONG) or (len(fullpath) >= 259):\n extension = os.path.splitext(sample_name)[1]\n sample_name = res + '_' + str(uuid.uuid4()) + extension\n image.save(os.path.join(options.output_dir, res, sample_name))\n else:\n raise\n\n # Use slashes regardless of os\n file_name = '{}/{}'.format(res,sample_name)\n\n return {\n 'filename': file_name,\n 'title': display_name,\n 'textStyle': 'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5'\n }\n\n# ...render_bounding_boxes\n\n\ndef prepare_html_subpages(images_html, output_dir, options=None):\n \"\"\"\n Write out a series of html image lists, e.g. the fp/tp/fn/tn pages.\n\n image_html is a dictionary mapping an html page name (e.g. \"fp\") to a list\n of image structs friendly to write_html_image_list\n \"\"\"\n \n if options is None:\n options = PostProcessingOptions()\n\n # Count items in each category\n image_counts = {}\n for res, array in images_html.items():\n image_counts[res] = len(array)\n\n # Optionally sort by filename before writing to html\n if options.sort_html_by_filename:\n images_html_sorted = {}\n for res, array in images_html.items():\n sorted_array = sorted(array, key=lambda x: x['filename'])\n images_html_sorted[res] = sorted_array\n images_html = images_html_sorted\n\n # Write the individual HTML files\n for res, array in images_html.items():\n write_html_image_list(\n filename=os.path.join(output_dir, '{}.html'.format(res)),\n images=array,\n options={\n 'headerHtml': '<h1>{}</h1>'.format(res.upper())\n })\n\n return image_counts\n\n# ...prepare_html_subpages()\n\n\n#%% Main function\n\ndef process_batch_results(options: PostProcessingOptions\n ) -> PostProcessingResults:\n\n ppresults = PostProcessingResults()\n\n ##%% Expand some options for convenience\n\n output_dir = options.output_dir\n\n\n ##%% Prepare output dir\n\n os.makedirs(output_dir, exist_ok=True)\n\n\n ##%% Load ground truth if available\n\n ground_truth_indexed_db = None\n\n if (options.ground_truth_json_file is not None) and (len(options.ground_truth_json_file) > 0):\n\n if options.separate_detections_by_category:\n print(\"Warning: I don't know how to separate categories yet when doing a P/R analysis, disabling category separation\")\n options.separate_detections_by_category = False\n\n ground_truth_indexed_db = IndexedJsonDb(\n options.ground_truth_json_file, b_normalize_paths=True,\n filename_replacements=options.ground_truth_filename_replacements)\n\n # Mark images in the ground truth as positive or negative\n n_negative, n_positive, n_unknown, n_ambiguous = mark_detection_status(\n ground_truth_indexed_db, negative_classes=options.negative_classes,\n unknown_classes=options.unlabeled_classes)\n print(f'Finished loading and indexing ground truth: {n_negative} '\n f'negative, {n_positive} positive, {n_unknown} unknown, '\n f'{n_ambiguous} ambiguous')\n\n\n ##%% Load detection (and possibly classification) results\n\n if options.api_detection_results is None:\n detections_df, other_fields = load_api_results(\n options.api_output_file, normalize_paths=True,\n filename_replacements=options.api_output_filename_replacements)\n ppresults.api_detection_results = detections_df\n ppresults.api_other_fields = other_fields\n\n else:\n print('Bypassing detection results loading...')\n assert options.api_other_fields is not None\n detections_df = options.api_detection_results\n other_fields = options.api_other_fields\n\n # Remove failed rows\n n_failures = 0\n if 'failure' in detections_df.columns:\n n_failures = detections_df['failure'].count()\n print('Warning: {} failed images'.format(n_failures))\n detections_df = detections_df[detections_df['failure'].isna()]\n \n assert other_fields is not None\n\n detection_categories = other_fields['detection_categories']\n\n # Convert keys and values to lowercase\n classification_categories = other_fields.get('classification_categories', {})\n classification_categories = {\n k.lower(): v.lower()\n for k, v in classification_categories.items()\n }\n\n # Add column 'pred_detection_label' to indicate predicted detection status,\n # not separating out the classes\n det_status = 'pred_detection_label'\n if options.include_almost_detections:\n detections_df[det_status] = DetectionStatus.DS_ALMOST\n confidences = detections_df['max_detection_conf']\n\n pos_mask = (confidences >= options.confidence_threshold)\n detections_df.loc[pos_mask, det_status] = DetectionStatus.DS_POSITIVE\n\n neg_mask = (confidences < options.almost_detection_confidence_threshold)\n detections_df.loc[neg_mask, det_status] = DetectionStatus.DS_NEGATIVE\n else:\n detections_df[det_status] = np.where(\n detections_df['max_detection_conf'] >= options.confidence_threshold,\n DetectionStatus.DS_POSITIVE, DetectionStatus.DS_NEGATIVE)\n\n n_positives = sum(detections_df[det_status] == DetectionStatus.DS_POSITIVE)\n print(f'Finished loading and preprocessing {len(detections_df)} rows '\n f'from detector output, predicted {n_positives} positives.')\n\n if options.include_almost_detections:\n n_almosts = sum(detections_df[det_status] == DetectionStatus.DS_ALMOST)\n print('...and {} almost-positives'.format(n_almosts))\n\n\n ##%% If we have ground truth, remove images we can't match to ground truth\n\n if ground_truth_indexed_db is not None:\n\n b_match = detections_df['file'].isin(\n ground_truth_indexed_db.filename_to_id)\n print(f'Confirmed filename matches to ground truth for {sum(b_match)} '\n f'of {len(detections_df)} files')\n\n detections_df = detections_df[b_match]\n detector_files = detections_df['file'].tolist()\n\n assert len(detector_files) > 0, (\n 'No detection files available, possible path issue?')\n\n print('Trimmed detection results to {} files'.format(len(detector_files)))\n\n\n ##%% Sample images for visualization\n\n images_to_visualize = detections_df\n\n if options.num_images_to_sample is not None and options.num_images_to_sample > 0:\n images_to_visualize = images_to_visualize.sample(\n n=min(options.num_images_to_sample, len(images_to_visualize)),\n random_state=options.sample_seed)\n\n output_html_file = ''\n\n style_header = \"\"\"<head>\n <style type=\"text/css\">\n a { text-decoration: none; }\n body { font-family: segoe ui, calibri, \"trebuchet ms\", verdana, arial, sans-serif; }\n div.contentdiv { margin-left: 20px; }\n </style>\n </head>\"\"\"\n\n\n ##%% Fork here depending on whether or not ground truth is available\n\n # If we have ground truth, we'll compute precision/recall and sample tp/fp/tn/fn.\n #\n # Otherwise we'll just visualize detections/non-detections.\n\n if ground_truth_indexed_db is not None:\n\n ##%% Detection evaluation: compute precision/recall\n\n # numpy array of detection probabilities\n p_detection = detections_df['max_detection_conf'].values\n n_detections = len(p_detection)\n\n # numpy array of bools (0.0/1.0), and -1 as null value\n gt_detections = np.zeros(n_detections, dtype=float)\n\n for i_detection, fn in enumerate(detector_files):\n image_id = ground_truth_indexed_db.filename_to_id[fn]\n image = ground_truth_indexed_db.image_id_to_image[image_id]\n detection_status = image['_detection_status']\n\n if detection_status == DetectionStatus.DS_NEGATIVE:\n gt_detections[i_detection] = 0.0\n elif detection_status == DetectionStatus.DS_POSITIVE:\n gt_detections[i_detection] = 1.0\n else:\n gt_detections[i_detection] = -1.0\n\n # Don't include ambiguous/unknown ground truth in precision/recall analysis\n b_valid_ground_truth = gt_detections >= 0.0\n\n p_detection_pr = p_detection[b_valid_ground_truth]\n gt_detections_pr = gt_detections[b_valid_ground_truth]\n\n print('Including {} of {} values in p/r analysis'.format(np.sum(b_valid_ground_truth),\n len(b_valid_ground_truth)))\n\n precisions, recalls, thresholds = precision_recall_curve(gt_detections_pr, p_detection_pr)\n\n # For completeness, include the result at a confidence threshold of 1.0\n thresholds = np.append(thresholds, [1.0])\n\n precisions_recalls = pd.DataFrame(data={\n 'confidence_threshold': thresholds,\n 'precision': precisions,\n 'recall': recalls\n })\n\n # Compute and print summary statistics\n average_precision = average_precision_score(gt_detections_pr, p_detection_pr)\n print('Average precision: {:.1%}'.format(average_precision))\n\n # Thresholds go up throughout precisions/recalls/thresholds; find the last\n # value where recall is at or above target. That's our precision @ target recall.\n target_recall = 0.9\n b_above_target_recall = np.where(recalls >= target_recall)\n if not np.any(b_above_target_recall):\n precision_at_target_recall = 0.0\n else:\n i_target_recall = np.argmax(b_above_target_recall)\n precision_at_target_recall = precisions[i_target_recall]\n print('Precision at {:.1%} recall: {:.1%}'.format(target_recall, precision_at_target_recall))\n\n cm = confusion_matrix(gt_detections_pr, np.array(p_detection_pr) > options.confidence_threshold)\n\n # Flatten the confusion matrix\n tn, fp, fn, tp = cm.ravel()\n\n precision_at_confidence_threshold = tp / (tp + fp)\n recall_at_confidence_threshold = tp / (tp + fn)\n f1 = 2.0 * (precision_at_confidence_threshold * recall_at_confidence_threshold) / \\\n (precision_at_confidence_threshold + recall_at_confidence_threshold)\n\n print('At a confidence threshold of {:.1%}, precision={:.1%}, recall={:.1%}, f1={:.1%}'.format(\n options.confidence_threshold, precision_at_confidence_threshold, recall_at_confidence_threshold, f1))\n\n ##%% Collect classification results, if they exist\n\n classifier_accuracies = []\n\n # Mapping of classnames to idx for the confusion matrix.\n #\n # The lambda is actually kind of a hack, because we use assume that\n # the following code does not reassign classname_to_idx\n classname_to_idx = collections.defaultdict(lambda: len(classname_to_idx))\n\n # Confusion matrix as defaultdict of defaultdict\n #\n # Rows / first index is ground truth, columns / second index is predicted category\n classifier_cm = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))\n\n # iDetection = 0; fn = detector_files[iDetection]; print(fn)\n assert len(detector_files) == len(detections_df)\n for iDetection, fn in enumerate(detector_files):\n\n image_id = ground_truth_indexed_db.filename_to_id[fn]\n image = ground_truth_indexed_db.image_id_to_image[image_id]\n detections = detections_df['detections'].iloc[iDetection]\n pred_class_ids = [det['classifications'][0][0] \\\n for det in detections if 'classifications' in det.keys()]\n pred_classnames = [classification_categories[pd] for pd in pred_class_ids]\n\n # If this image has classification predictions, and an unambiguous class\n # annotated, and is a positive image...\n if len(pred_classnames) > 0 \\\n and '_unambiguous_category' in image.keys() \\\n and image['_detection_status'] == DetectionStatus.DS_POSITIVE:\n\n # The unambiguous category, we make this a set for easier handling afterward\n gt_categories = set([image['_unambiguous_category']])\n pred_categories = set(pred_classnames)\n\n # Compute the accuracy as intersection of union,\n # i.e. (# of categories in both prediciton and GT)\n # divided by (# of categories in either prediction or GT\n #\n # In case of only one GT category, the result will be 1.0, if\n # prediction is one category and this category matches GT\n #\n # It is 1.0/(# of predicted top-1 categories), if the GT is\n # one of the predicted top-1 categories.\n #\n # It is 0.0, if none of the predicted categories is correct\n\n classifier_accuracies.append(\n len(gt_categories & pred_categories)\n / len(gt_categories | pred_categories)\n )\n image['_classification_accuracy'] = classifier_accuracies[-1]\n\n # Distribute this accuracy across all predicted categories in the\n # confusion matrix\n assert len(gt_categories) == 1\n gt_class_idx = classname_to_idx[list(gt_categories)[0]]\n for pred_category in pred_categories:\n pred_class_idx = classname_to_idx[pred_category]\n classifier_cm[gt_class_idx][pred_class_idx] += 1\n\n # ...for each file in the detection results\n\n # If we have classification results\n if len(classifier_accuracies) > 0:\n\n # Build confusion matrix as array from classifier_cm\n all_class_ids = sorted(classname_to_idx.values())\n classifier_cm_array = np.array(\n [[classifier_cm[r_idx][c_idx] for c_idx in all_class_ids] for r_idx in all_class_ids], dtype=float)\n classifier_cm_array /= (classifier_cm_array.sum(axis=1, keepdims=True) + 1e-7)\n\n # Print some statistics\n print('Finished computation of {} classification results'.format(len(classifier_accuracies)))\n print('Mean accuracy: {}'.format(np.mean(classifier_accuracies)))\n\n # Prepare confusion matrix output\n\n # Get confusion matrix as string\n sio = io.StringIO()\n np.savetxt(sio, classifier_cm_array * 100, fmt='%5.1f')\n cm_str = sio.getvalue()\n # Get fixed-size classname for each idx\n idx_to_classname = {v:k for k,v in classname_to_idx.items()}\n classname_list = [idx_to_classname[idx] for idx in sorted(classname_to_idx.values())]\n classname_headers = ['{:<5}'.format(cname[:5]) for cname in classname_list]\n\n # Prepend class name on each line and add to the top\n cm_str_lines = [' ' * 16 + ' '.join(classname_headers)]\n cm_str_lines += ['{:>15}'.format(cn[:15]) + ' ' + cm_line for cn, cm_line in zip(classname_list, cm_str.splitlines())]\n\n # Print formatted confusion matrix\n print('Confusion matrix: ')\n print(*cm_str_lines, sep='\\n')\n\n # Plot confusion matrix\n\n # To manually add more space at bottom: plt.rcParams['figure.subplot.bottom'] = 0.1\n #\n # Add 0.5 to figsize for every class. For two classes, this will result in\n # fig = plt.figure(figsize=[4,4])\n fig = plot_utils.plot_confusion_matrix(\n classifier_cm_array,\n classname_list,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues,\n vmax=1.0,\n use_colorbar=True,\n y_label=True)\n cm_figure_relative_filename = 'confusion_matrix.png'\n cm_figure_filename = os.path.join(output_dir, cm_figure_relative_filename)\n plt.savefig(cm_figure_filename)\n plt.close(fig)\n\n # ...if we have classification results\n\n\n ##%% Render output\n\n # Write p/r table to .csv file in output directory\n pr_table_filename = os.path.join(output_dir, 'prec_recall.csv')\n precisions_recalls.to_csv(pr_table_filename, index=False)\n\n # Write precision/recall plot to .png file in output directory\n t = 'Precision-Recall curve: AP={:0.1%}, P@{:0.1%}={:0.1%}'.format(\n average_precision, target_recall, precision_at_target_recall)\n fig = plot_utils.plot_precision_recall_curve(precisions, recalls, t)\n pr_figure_relative_filename = 'prec_recall.png'\n pr_figure_filename = os.path.join(output_dir, pr_figure_relative_filename)\n plt.savefig(pr_figure_filename)\n # plt.show(block=False)\n # plt.close(fig)\n\n\n ##%% Sampling\n\n # Sample true/false positives/negatives with correct/incorrect top-1\n # classification and render to html\n\n # Accumulate html image structs (in the format expected by write_html_image_lists)\n # for each category, e.g. 'tp', 'fp', ..., 'class_bird', ...\n images_html = collections.defaultdict(list)\n # Add default entries by accessing them for the first time\n [images_html[res] for res in ['tp', 'tpc', 'tpi', 'fp', 'tn', 'fn']] # Siyu: what does this do? This line should have no effect\n for res in images_html.keys():\n os.makedirs(os.path.join(output_dir, res), exist_ok=True)\n\n image_count = len(images_to_visualize)\n\n # Each element will be a list of 2-tuples, with elements [collection name,html info struct]\n rendering_results = []\n\n # Each element will be a three-tuple with elements file,max_conf,detections\n files_to_render = []\n\n # Assemble the information we need for rendering, so we can parallelize without\n # dealing with Pandas\n # i_row = 0; row = images_to_visualize.iloc[0]\n for _, row in images_to_visualize.iterrows():\n\n # Filenames should already have been normalized to either '/' or '\\'\n files_to_render.append([row['file'], row['max_detection_conf'], row['detections']])\n\n def render_image_with_gt(file_info):\n\n image_relative_path = file_info[0]\n max_conf = file_info[1]\n detections = file_info[2]\n\n # This should already have been normalized to either '/' or '\\'\n\n image_id = ground_truth_indexed_db.filename_to_id.get(image_relative_path, None)\n if image_id is None:\n print('Warning: couldn''t find ground truth for image {}'.format(image_relative_path))\n return None\n\n image = ground_truth_indexed_db.image_id_to_image[image_id]\n annotations = ground_truth_indexed_db.image_id_to_annotations[image_id]\n\n gt_status = image['_detection_status']\n\n gt_presence = bool(gt_status)\n\n gt_classes = CameraTrapJsonUtils.annotations_to_classnames(\n annotations, ground_truth_indexed_db.cat_id_to_name)\n gt_class_summary = ','.join(gt_classes)\n\n if gt_status > DetectionStatus.DS_MAX_DEFINITIVE_VALUE:\n print(f'Skipping image {image_id}, does not have a definitive '\n f'ground truth status (status: {gt_status}, classes: {gt_class_summary})')\n return None\n\n detected = max_conf > options.confidence_threshold\n\n if gt_presence and detected:\n if '_classification_accuracy' not in image.keys():\n res = 'tp'\n elif np.isclose(1, image['_classification_accuracy']):\n res = 'tpc'\n else:\n res = 'tpi'\n elif not gt_presence and detected:\n res = 'fp'\n elif gt_presence and not detected:\n res = 'fn'\n else:\n res = 'tn'\n\n display_name = '<b>Result type</b>: {}, <b>Presence</b>: {}, <b>Class</b>: {}, <b>Max conf</b>: {:0.3f}%, <b>Image</b>: {}'.format(\n res.upper(), str(gt_presence), gt_class_summary,\n max_conf * 100, image_relative_path)\n\n rendered_image_html_info = render_bounding_boxes(\n options.image_base_dir,\n image_relative_path,\n display_name,\n detections,\n res,\n detection_categories,\n classification_categories,\n options)\n\n image_result = None\n if len(rendered_image_html_info) > 0:\n image_result = [[res, rendered_image_html_info]]\n for gt_class in gt_classes:\n image_result.append(['class_{}'.format(gt_class), rendered_image_html_info])\n\n return image_result\n\n # ...def render_image_with_gt(file_info)\n\n start_time = time.time()\n if options.parallelize_rendering:\n if options.parallelize_rendering_n_cores is None:\n pool = ThreadPool()\n else:\n print('Rendering images with {} workers'.format(options.parallelize_rendering_n_cores))\n pool = ThreadPool(options.parallelize_rendering_n_cores)\n rendering_results = list(tqdm(pool.imap(render_image_with_gt, files_to_render), total=len(files_to_render)))\n else:\n # file_info = files_to_render[0]\n for file_info in tqdm(files_to_render):\n rendering_results.append(render_image_with_gt(file_info))\n elapsed = time.time() - start_time\n\n # Map all the rendering results in the list rendering_results into the\n # dictionary images_html\n image_rendered_count = 0\n for rendering_result in rendering_results:\n if rendering_result is None:\n continue\n image_rendered_count += 1\n for assignment in rendering_result:\n images_html[assignment[0]].append(assignment[1])\n\n # Prepare the individual html image files\n image_counts = prepare_html_subpages(images_html, output_dir)\n\n print('{} images rendered (of {})'.format(image_rendered_count,image_count))\n\n # Write index.html\n all_tp_count = image_counts['tp'] + image_counts['tpc'] + image_counts['tpi']\n total_count = all_tp_count + image_counts['tn'] + image_counts['fp'] + image_counts['fn']\n total_count = 6442\n\n classification_detection_results = \"\"\"&nbsp;&nbsp;&nbsp;&nbsp;<a href=\"tpc.html\">with all correct top-1 predictions (TPC)</a> ({})<br/>\n &nbsp;&nbsp;&nbsp;&nbsp;<a href=\"tpi.html\">with one or more incorrect top-1 prediction (TPI)</a> ({})<br/>\n &nbsp;&nbsp;&nbsp;&nbsp;<a href=\"tp.html\">without classification evaluation</a><sup>*</sup> ({})<br/>\"\"\".format(\n image_counts['tpc'],\n image_counts['tpi'],\n image_counts['tp']\n )\n\n index_page = \"\"\"<html>\n {}\n <body>\n <h2>Evaluation</h2>\n\n <h3>Sample images</h3>\n <div class=\"contentdiv\">\n <p>A sample of {} images, annotated with detections above {:.1%} confidence.</p>\n <a href=\"tp.html\">True positives (TP)</a> ({}) ({:0.1%})<br/>\n CLASSIFICATION_PLACEHOLDER_1\n <a href=\"tn.html\">True negatives (TN)</a> ({}) ({:0.1%})<br/>\n <a href=\"fp.html\">False positives (FP)</a> ({}) ({:0.1%})<br/>\n <a href=\"fn.html\">False negatives (FN)</a> ({}) ({:0.1%})<br/>\n CLASSIFICATION_PLACEHOLDER_2\n </div>\n \"\"\".format(\n style_header,\n image_count, options.confidence_threshold,\n all_tp_count, all_tp_count/total_count,\n image_counts['tn'], image_counts['tn']/total_count,\n image_counts['fp'], image_counts['fp']/total_count,\n image_counts['fn'], image_counts['fn']/total_count\n )\n\n index_page += \"\"\"\n <h3>Detection results</h3>\n <div class=\"contentdiv\">\n <p>At a confidence threshold of {:0.1%}, precision={:0.1%}, recall={:0.1%}</p>\n <p><strong>Precision/recall summary for all {} images</strong></p><img src=\"{}\"><br/>\n </div>\n \"\"\".format(\n options.confidence_threshold, precision_at_confidence_threshold, recall_at_confidence_threshold,\n len(detections_df), pr_figure_relative_filename\n )\n\n if len(classifier_accuracies) > 0:\n index_page = index_page.replace('CLASSIFICATION_PLACEHOLDER_1',classification_detection_results)\n index_page = index_page.replace('CLASSIFICATION_PLACEHOLDER_2',\"\"\"<p><sup>*</sup>We do not evaluate the classification result of images\n if the classification information is missing, if the image contains\n categories like &lsquo;empty&rsquo; or &lsquo;human&rsquo;, or if the image has multiple\n classification labels.</p>\"\"\")\n else:\n index_page = index_page.replace('CLASSIFICATION_PLACEHOLDER_1','')\n index_page = index_page.replace('CLASSIFICATION_PLACEHOLDER_2','')\n\n if len(classifier_accuracies) > 0:\n index_page += \"\"\"\n <h3>Classification results</h3>\n <div class=\"contentdiv\">\n <p>Classification accuracy: {:.2%}<br>\n The accuracy is computed only for images with exactly one classification label.\n The accuracy of an image is computed as 1/(number of unique detected top-1 classes),\n i.e. if the model detects multiple boxes with different top-1 classes, then the accuracy\n decreases and the image is put into 'TPI'.</p>\n <p>Confusion matrix:</p>\n <p><img src=\"{}\"></p>\n <div style='font-family:monospace;display:block;'>{}</div>\n </div>\n \"\"\".format(\n np.mean(classifier_accuracies),\n cm_figure_relative_filename,\n \"<br>\".join(cm_str_lines).replace(' ', '&nbsp;')\n )\n\n # Show links to each GT class\n #\n # We could do this without classification results; currently we don't.\n if len(classname_to_idx) > 0:\n\n index_page += '<h3>Images of specific classes</h3><br/><div class=\"contentdiv\">'\n # Add links to all available classes\n for cname in sorted(classname_to_idx.keys()):\n index_page += '<a href=\"class_{0}.html\">{0}</a> ({1})<br>'.format(\n cname,\n len(images_html['class_{}'.format(cname)]))\n index_page += '</div>'\n\n # Close body and html tags\n index_page += '</body></html>'\n output_html_file = os.path.join(output_dir, 'index.html')\n with open(output_html_file, 'w') as f:\n f.write(index_page)\n\n print('Finished writing html to {}'.format(output_html_file))\n\n # ...for each image\n\n\n ##%% Otherwise, if we don't have ground truth...\n\n else:\n\n ##%% Sample detections/non-detections\n\n # Accumulate html image structs (in the format expected by write_html_image_list)\n # for each category\n images_html = collections.defaultdict(list)\n images_html['non_detections']\n\n # Add default entries by accessing them for the first time\n\n # Maps detection categories - e.g. \"human\" - to result set names, e.g.\n # \"detections_human\"\n detection_categories_to_results_name = {}\n\n if not options.separate_detections_by_category:\n images_html['detections']\n else:\n # Add a set of results for each category and combination of categories\n keys = detection_categories.keys()\n subsets = []\n for L in range(1, len(keys)+1):\n for subset in itertools.combinations(keys, L):\n subsets.append(subset)\n for subset in subsets:\n sorted_subset = tuple(sorted(subset))\n results_name = 'detections'\n for category_id in sorted_subset:\n results_name = results_name + '_' + detection_categories[category_id]\n images_html[results_name]\n detection_categories_to_results_name[sorted_subset] = results_name\n\n if options.include_almost_detections:\n images_html['almost_detections']\n\n # Create output directories\n for res in images_html.keys():\n os.makedirs(os.path.join(output_dir, res), exist_ok=True)\n\n image_count = len(images_to_visualize)\n has_classification_info = False\n\n # Each element will be a list of 2-tuples, with elements [collection name,html info struct]\n rendering_results = []\n\n # list of 3-tuples with elements (file, max_conf, detections)\n files_to_render = []\n\n # Assemble the information we need for rendering, so we can parallelize without\n # dealing with Pandas\n # i_row = 0; row = images_to_visualize.iloc[0]\n for _, row in images_to_visualize.iterrows():\n\n assert isinstance(row['detections'],list)\n \n # Filenames should already have been normalized to either '/' or '\\'\n files_to_render.append([row['file'],\n row['max_detection_conf'],\n row['detections']])\n\n # Get unique categories above the threshold for this image\n def get_positive_categories(detections):\n positive_categories = set()\n for d in detections:\n if d['conf'] >= options.confidence_threshold:\n positive_categories.add(d['category'])\n return sorted(positive_categories)\n\n # Local function for parallelization\n def render_image_no_gt(file_info):\n\n image_relative_path = file_info[0]\n max_conf = file_info[1]\n detections = file_info[2]\n\n detection_status = DetectionStatus.DS_UNASSIGNED\n if max_conf >= options.confidence_threshold:\n detection_status = DetectionStatus.DS_POSITIVE\n else:\n if options.include_almost_detections:\n if max_conf >= options.almost_detection_confidence_threshold:\n detection_status = DetectionStatus.DS_ALMOST\n else:\n detection_status = DetectionStatus.DS_NEGATIVE\n else:\n detection_status = DetectionStatus.DS_NEGATIVE\n\n if detection_status == DetectionStatus.DS_POSITIVE:\n if options.separate_detections_by_category:\n positive_categories = tuple(get_positive_categories(detections))\n res = detection_categories_to_results_name[positive_categories]\n else:\n res = 'detections'\n\n elif detection_status == DetectionStatus.DS_NEGATIVE:\n res = 'non_detections'\n else:\n assert detection_status == DetectionStatus.DS_ALMOST\n res = 'almost_detections'\n\n display_name = '<b>Result type</b>: {}, <b>Image</b>: {}, <b>Max conf</b>: {:0.3f}'.format(\n res, image_relative_path, max_conf)\n\n rendering_options = copy.copy(options)\n if detection_status == DetectionStatus.DS_ALMOST:\n rendering_options.confidence_threshold = rendering_options.almost_detection_confidence_threshold\n rendered_image_html_info = render_bounding_boxes(\n options.image_base_dir,\n image_relative_path,\n display_name,\n detections,\n res,\n detection_categories,\n classification_categories,\n rendering_options)\n\n image_result = None\n\n if len(rendered_image_html_info) > 0:\n\n image_result = [[res, rendered_image_html_info]]\n\n for det in detections:\n\n if 'classifications' in det:\n\n # This is a list of [class,confidence] pairs, sorted by confidence\n classifications = det['classifications']\n top1_class_id = classifications[0][0]\n top1_class_name = classification_categories[top1_class_id]\n top1_class_score = classifications[0][1]\n\n # If we either don't have a confidence threshold, or we've met our\n # confidence threshold\n if (options.classification_confidence_threshold < 0) or \\\n (top1_class_score >= options.classification_confidence_threshold):\n image_result.append(['class_{}'.format(top1_class_name),\n rendered_image_html_info])\n else:\n image_result.append(['class_unreliable',\n rendered_image_html_info])\n\n # ...if this detection has classification info\n\n # ...for each detection\n\n return image_result\n\n # ...def render_image_no_gt(file_info):\n\n start_time = time.time()\n if options.parallelize_rendering:\n if options.parallelize_rendering_n_cores is None:\n pool = ThreadPool()\n else:\n print('Rendering images with {} workers'.format(options.parallelize_rendering_n_cores))\n pool = ThreadPool(options.parallelize_rendering_n_cores)\n rendering_results = list(tqdm(pool.imap(render_image_no_gt, files_to_render), total=len(files_to_render)))\n else:\n for file_info in tqdm(files_to_render):\n rendering_results.append(render_image_no_gt(file_info))\n elapsed = time.time() - start_time\n\n # Map all the rendering results in the list rendering_results into the\n # dictionary images_html\n image_rendered_count = 0\n for rendering_result in rendering_results:\n if rendering_result is None:\n continue\n image_rendered_count += 1\n for assignment in rendering_result:\n if 'class' in assignment[0]:\n has_classification_info = True\n images_html[assignment[0]].append(assignment[1])\n\n # Prepare the individual html image files\n image_counts = prepare_html_subpages(images_html, output_dir)\n\n if image_rendered_count == 0:\n seconds_per_image = 0.0\n else:\n seconds_per_image = elapsed/image_rendered_count\n\n print('Rendered {} images (of {}) in {} ({} per image)'.format(image_rendered_count,\n image_count,humanfriendly.format_timespan(elapsed),\n humanfriendly.format_timespan(seconds_per_image)))\n\n # Write index.html\n\n # We can't just sum these, because image_counts includes images in both their\n # detection and classification classes\n # total_images = sum(image_counts.values())\n total_images = 0\n for k in image_counts.keys():\n v = image_counts[k]\n if has_classification_info and k.startswith('class_'):\n continue\n total_images += v\n\n if options.allow_missing_images:\n if total_images != image_count:\n print('Warning: image_count is {}, total_images is {}'.format(total_images,image_count))\n else:\n assert total_images == image_count, \\\n 'Error: image_count is {}, total_images is {}'.format(total_images,image_count)\n\n almost_detection_string = ''\n if options.include_almost_detections:\n almost_detection_string = ' (&ldquo;almost detection&rdquo; threshold at {:.1%})'.format(\n options.almost_detection_confidence_threshold)\n\n index_page = \"\"\"<html>\\n{}\\n<body>\\n\n <h2>Visualization of results</h2>\\n\n <p>A sample of {} images (of {} total)FAILURE_PLACEHOLDER, annotated with detections above {:.1%} confidence{}.</p>\\n\n <h3>Sample images</h3>\\n\n <div class=\"contentdiv\">\\n\"\"\".format(\n style_header, image_count, len(detections_df), options.confidence_threshold,\n almost_detection_string)\n\n failure_string = ''\n if n_failures is not None:\n failure_string = ' ({} failures)'.format(n_failures) \n index_page = index_page.replace('FAILURE_PLACEHOLDER',failure_string)\n \n def result_set_name_to_friendly_name(result_set_name):\n friendly_name = ''\n friendly_name = result_set_name.replace('_','-')\n if friendly_name.startswith('detections-'):\n friendly_name = friendly_name.replace('detections-', 'detections: ')\n friendly_name = friendly_name.capitalize()\n return friendly_name\n\n for result_set_name in images_html.keys():\n\n # Don't print classification classes here; we'll do that later with a slightly\n # different structure\n if has_classification_info and result_set_name.lower().startswith('class_'):\n continue\n\n filename = result_set_name + '.html'\n label = result_set_name_to_friendly_name(result_set_name)\n image_count = image_counts[result_set_name]\n if total_images == 0:\n image_fraction = -1\n else:\n image_fraction = image_count / total_images\n index_page += '<a href=\"{}\">{}</a> ({}, {:.1%})<br/>\\n'.format(\n filename,label,image_count,image_fraction)\n\n index_page += '</div>\\n'\n\n if has_classification_info:\n index_page += '<h3>Images of detected classes</h3>'\n index_page += '<p>The same image might appear under multiple classes if multiple species were detected.</p>\\n'\n index_page += '<p>Classifications with confidence less than {:.1%} confidence are considered \"unreliable\".</p>\\n'.format(\n options.classification_confidence_threshold)\n index_page += '<div class=\"contentdiv\">\\n'\n\n # Add links to all available classes\n class_names = sorted(classification_categories.values())\n if 'class_unreliable' in images_html.keys():\n class_names.append('unreliable')\n\n for cname in class_names:\n ccount = len(images_html['class_{}'.format(cname)])\n if ccount > 0:\n index_page += '<a href=\"class_{}.html\">{}</a> ({})<br/>\\n'.format(\n cname, cname.lower(), ccount)\n index_page += '</div>\\n'\n\n index_page += '</body></html>'\n output_html_file = os.path.join(output_dir, 'index.html')\n with open(output_html_file, 'w') as f:\n f.write(index_page)\n\n print('Finished writing html to {}'.format(output_html_file))\n\n # os.startfile(output_html_file)\n\n # ...if we do/don't have ground truth\n\n ppresults.output_html_file = output_html_file\n return ppresults\n\n# ...process_batch_results\n\n\n#%% Interactive driver(s)\n\nif False:\n\n #%%\n\n base_dir = r'D:\\wildlife_data\\bh'\n options = PostProcessingOptions()\n options.image_base_dir = base_dir\n options.output_dir = os.path.join(base_dir, 'postprocessing_filtered')\n options.api_output_filename_replacements = {} # {'20190430cameratraps\\\\':''}\n options.ground_truth_filename_replacements = {'\\\\data\\\\blob\\\\':''}\n options.api_output_file = os.path.join(base_dir, 'bh_5570_detections.filtered.csv')\n options.ground_truth_json_file = os.path.join(base_dir, 'bh.json')\n options.unlabeled_classes = ['human']\n\n ppresults = process_batch_results(options)\n # os.start(ppresults.output_html_file)\n\n\n#%% Command-line driver\n\ndef main():\n \n options = PostProcessingOptions()\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'api_output_file',\n help='path to .json file produced by the batch inference API')\n parser.add_argument(\n 'output_dir',\n help='base directory for output')\n parser.add_argument(\n '--image_base_dir', default=options.image_base_dir,\n help='base directory for images (optional, can compute statistics '\n 'without images)')\n parser.add_argument(\n '--ground_truth_json_file', default=options.ground_truth_json_file,\n help='ground truth labels (optional, can render detections without '\n 'ground truth), in the COCO Camera Traps format')\n parser.add_argument(\n '--confidence_threshold', type=float,\n default=options.confidence_threshold,\n help='Confidence threshold for statistics and visualization')\n parser.add_argument(\n '--target_recall', type=float, default=options.target_recall,\n help='Target recall (for statistics only)')\n parser.add_argument(\n '--num_images_to_sample', type=int,\n default=options.num_images_to_sample,\n help='number of images to visualize, -1 for all images (default: 500)')\n parser.add_argument(\n '--viz_target_width', type=int, default=options.viz_target_width,\n help='Output image width')\n parser.add_argument(\n '--random_output_sort', action='store_true',\n help='Sort output randomly (defaults to sorting by filename)')\n parser.add_argument(\n '--n_cores', type=int, default=1,\n help='Number of threads to use for rendering (default: 1)')\n\n if len(sys.argv[1:]) == 0:\n parser.print_help()\n parser.exit()\n\n args = parser.parse_args()\n args.sort_html_by_filename = (not args.random_output_sort)\n if args.n_cores != 1:\n assert (args.n_cores > 1), 'Illegal number of cores: {}'.format(args.n_cores)\n args.parallelize_rendering = True\n args.parallelize_rendering_n_cores = args.n_cores \n\n args_to_object(args, options)\n \n process_batch_results(options)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.use", "sklearn.metrics.precision_recall_curve", "pandas.DataFrame", "matplotlib.pyplot.savefig", "numpy.append", "numpy.argmax", "numpy.mean", "sklearn.metrics.average_precision_score", "numpy.any", "matplotlib.pyplot.close", "numpy.savetxt", "numpy.array", "numpy.where", "numpy.sum", "numpy.zeros", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
FlyingOE/zipline
[ "220ffc54a8f4d4f5afdbab86db39f8f512083e53" ]
[ "tests/data/bundles/test_core.py" ]
[ "import os\n\nfrom nose_parameterized import parameterized\nimport pandas as pd\nfrom toolz import valmap\nimport toolz.curried.operator as op\n\nfrom zipline.assets.synthetic import make_simple_equity_info\nfrom zipline.data.bundles import UnknownBundle, from_bundle_ingest_dirname\nfrom zipline.data.bundles.core import _make_bundle_core\nfrom zipline.lib.adjustment import Float64Multiply\nfrom zipline.pipeline.loaders.synthetic import (\n make_bar_data,\n expected_bar_values_2d,\n)\nfrom zipline.testing import (\n subtest,\n str_to_seconds,\n)\nfrom zipline.testing.fixtures import WithInstanceTmpDir, ZiplineTestCase\nfrom zipline.testing.predicates import (\n assert_equal,\n assert_false,\n assert_in,\n assert_is,\n assert_is_instance,\n assert_is_none,\n assert_raises,\n assert_true,\n)\nfrom zipline.utils.cache import dataframe_cache\nfrom zipline.utils.functional import apply\nfrom zipline.utils.calendars import get_calendar\nimport zipline.utils.paths as pth\n\n\n_1_ns = pd.Timedelta(1, unit='ns')\n\n\nclass BundleCoreTestCase(WithInstanceTmpDir, ZiplineTestCase):\n def init_instance_fixtures(self):\n super(BundleCoreTestCase, self).init_instance_fixtures()\n (self.bundles,\n self.register,\n self.unregister,\n self.ingest,\n self.load,\n self.clean) = _make_bundle_core()\n self.environ = {'ZIPLINE_ROOT': self.instance_tmpdir.path}\n\n def test_register_decorator(self):\n @apply\n @subtest(((c,) for c in 'abcde'), 'name')\n def _(name):\n @self.register(name)\n def ingest(*args):\n pass\n\n assert_in(name, self.bundles)\n assert_is(self.bundles[name].ingest, ingest)\n\n self._check_bundles(set('abcde'))\n\n def test_register_call(self):\n def ingest(*args):\n pass\n\n @apply\n @subtest(((c,) for c in 'abcde'), 'name')\n def _(name):\n self.register(name, ingest)\n assert_in(name, self.bundles)\n assert_is(self.bundles[name].ingest, ingest)\n\n assert_equal(\n valmap(op.attrgetter('ingest'), self.bundles),\n {k: ingest for k in 'abcde'},\n )\n self._check_bundles(set('abcde'))\n\n def _check_bundles(self, names):\n assert_equal(set(self.bundles.keys()), names)\n\n for name in names:\n self.unregister(name)\n\n assert_false(self.bundles)\n\n def test_register_no_create(self):\n called = [False]\n\n @self.register('bundle', create_writers=False)\n def bundle_ingest(environ,\n asset_db_writer,\n minute_bar_writer,\n daily_bar_writer,\n adjustment_writer,\n calendar,\n cache,\n show_progress,\n output_dir):\n assert_is_none(asset_db_writer)\n assert_is_none(minute_bar_writer)\n assert_is_none(daily_bar_writer)\n assert_is_none(adjustment_writer)\n called[0] = True\n\n self.ingest('bundle', self.environ)\n assert_true(called[0])\n\n def test_ingest(self):\n start = pd.Timestamp('2014-01-06', tz='utc')\n end = pd.Timestamp('2014-01-10', tz='utc')\n trading_days = get_calendar('NYSE').all_sessions\n calendar = trading_days[trading_days.slice_indexer(start, end)]\n minutes = get_calendar('NYSE').minutes_for_sessions_in_range(\n calendar[0], calendar[-1]\n )\n\n sids = tuple(range(3))\n equities = make_simple_equity_info(\n sids,\n calendar[0],\n calendar[-1],\n )\n\n daily_bar_data = make_bar_data(equities, calendar)\n minute_bar_data = make_bar_data(equities, minutes)\n first_split_ratio = 0.5\n second_split_ratio = 0.1\n splits = pd.DataFrame.from_records([\n {\n 'effective_date': str_to_seconds('2014-01-08'),\n 'ratio': first_split_ratio,\n 'sid': 0,\n },\n {\n 'effective_date': str_to_seconds('2014-01-09'),\n 'ratio': second_split_ratio,\n 'sid': 1,\n },\n ])\n\n schedule = get_calendar('NYSE').schedule\n\n @self.register(\n 'bundle',\n calendar=calendar,\n opens=schedule.market_open[calendar[0]:calendar[-1]],\n closes=schedule.market_close[calendar[0]: calendar[-1]],\n )\n def bundle_ingest(environ,\n asset_db_writer,\n minute_bar_writer,\n daily_bar_writer,\n adjustment_writer,\n calendar,\n cache,\n show_progress,\n output_dir):\n assert_is(environ, self.environ)\n\n asset_db_writer.write(equities=equities)\n minute_bar_writer.write(minute_bar_data)\n daily_bar_writer.write(daily_bar_data)\n adjustment_writer.write(splits=splits)\n\n assert_is_instance(calendar, pd.DatetimeIndex)\n assert_is_instance(cache, dataframe_cache)\n assert_is_instance(show_progress, bool)\n\n self.ingest('bundle', environ=self.environ)\n bundle = self.load('bundle', environ=self.environ)\n\n assert_equal(set(bundle.asset_finder.sids), set(sids))\n\n columns = 'open', 'high', 'low', 'close', 'volume'\n\n actual = bundle.equity_minute_bar_reader.load_raw_arrays(\n columns,\n minutes[0],\n minutes[-1],\n sids,\n )\n\n for actual_column, colname in zip(actual, columns):\n assert_equal(\n actual_column,\n expected_bar_values_2d(minutes, equities, colname),\n msg=colname,\n )\n\n actual = bundle.equity_daily_bar_reader.load_raw_arrays(\n columns,\n calendar[0],\n calendar[-1],\n sids,\n )\n for actual_column, colname in zip(actual, columns):\n assert_equal(\n actual_column,\n expected_bar_values_2d(calendar, equities, colname),\n msg=colname,\n )\n adjustments_for_cols = bundle.adjustment_reader.load_adjustments(\n columns,\n calendar,\n pd.Index(sids),\n )\n for column, adjustments in zip(columns, adjustments_for_cols[:-1]):\n # iterate over all the adjustments but `volume`\n assert_equal(\n adjustments,\n {\n 2: [Float64Multiply(\n first_row=0,\n last_row=2,\n first_col=0,\n last_col=0,\n value=first_split_ratio,\n )],\n 3: [Float64Multiply(\n first_row=0,\n last_row=3,\n first_col=1,\n last_col=1,\n value=second_split_ratio,\n )],\n },\n msg=column,\n )\n\n # check the volume, the value should be 1/ratio\n assert_equal(\n adjustments_for_cols[-1],\n {\n 2: [Float64Multiply(\n first_row=0,\n last_row=2,\n first_col=0,\n last_col=0,\n value=1 / first_split_ratio,\n )],\n 3: [Float64Multiply(\n first_row=0,\n last_row=3,\n first_col=1,\n last_col=1,\n value=1 / second_split_ratio,\n )],\n },\n msg='volume',\n )\n\n @parameterized.expand([('clean',), ('load',)])\n def test_bundle_doesnt_exist(self, fnname):\n with assert_raises(UnknownBundle) as e:\n getattr(self, fnname)('ayy', environ=self.environ)\n\n assert_equal(e.exception.name, 'ayy')\n\n def test_load_no_data(self):\n # register but do not ingest data\n self.register('bundle', lambda *args: None)\n\n ts = pd.Timestamp('2014')\n\n with assert_raises(ValueError) as e:\n self.load('bundle', timestamp=ts, environ=self.environ)\n\n assert_in(\n \"no data for bundle 'bundle' on or before %s\" % ts,\n str(e.exception),\n )\n\n def _list_bundle(self):\n return {\n os.path.join(pth.data_path(['bundle', d], environ=self.environ))\n for d in os.listdir(\n pth.data_path(['bundle'], environ=self.environ),\n )\n }\n\n def _empty_ingest(self, _wrote_to=[]):\n \"\"\"Run the nth empty ingest.\n\n Returns\n -------\n wrote_to : str\n The timestr of the bundle written.\n \"\"\"\n if not self.bundles:\n @self.register('bundle',\n calendar=pd.DatetimeIndex([pd.Timestamp('2014')]))\n def _(environ,\n asset_db_writer,\n minute_bar_writer,\n daily_bar_writer,\n adjustment_writer,\n calendar,\n cache,\n show_progress,\n output_dir):\n _wrote_to.append(output_dir)\n\n _wrote_to[:] = []\n self.ingest('bundle', environ=self.environ)\n assert_equal(len(_wrote_to), 1, msg='ingest was called more than once')\n ingestions = self._list_bundle()\n assert_in(\n _wrote_to[0],\n ingestions,\n msg='output_dir was not in the bundle directory',\n )\n return _wrote_to[0]\n\n def test_clean_keep_last(self):\n first = self._empty_ingest()\n\n assert_equal(\n self.clean('bundle', keep_last=1, environ=self.environ),\n set(),\n )\n assert_equal(\n self._list_bundle(),\n {first},\n msg='directory should not have changed',\n )\n\n second = self._empty_ingest()\n assert_equal(\n self._list_bundle(),\n {first, second},\n msg='two ingestions are not present',\n )\n assert_equal(\n self.clean('bundle', keep_last=1, environ=self.environ),\n {first},\n )\n assert_equal(\n self._list_bundle(),\n {second},\n msg='first ingestion was not removed with keep_last=2',\n )\n\n third = self._empty_ingest()\n fourth = self._empty_ingest()\n fifth = self._empty_ingest()\n\n assert_equal(\n self._list_bundle(),\n {second, third, fourth, fifth},\n msg='larger set of ingestions did not happen correctly',\n )\n\n assert_equal(\n self.clean('bundle', keep_last=2, environ=self.environ),\n {second, third},\n )\n\n assert_equal(\n self._list_bundle(),\n {fourth, fifth},\n msg='keep_last=2 did not remove the correct number of ingestions',\n )\n\n @staticmethod\n def _ts_of_run(run):\n return from_bundle_ingest_dirname(run.rsplit(os.path.sep, 1)[-1])\n\n def test_clean_before_after(self):\n first = self._empty_ingest()\n assert_equal(\n self.clean(\n 'bundle',\n before=self._ts_of_run(first),\n environ=self.environ,\n ),\n set(),\n )\n assert_equal(\n self._list_bundle(),\n {first},\n msg='directory should not have changed (before)',\n )\n\n assert_equal(\n self.clean(\n 'bundle',\n after=self._ts_of_run(first),\n environ=self.environ,\n ),\n set(),\n )\n assert_equal(\n self._list_bundle(),\n {first},\n msg='directory should not have changed (after)',\n )\n\n assert_equal(\n self.clean(\n 'bundle',\n before=self._ts_of_run(first) + _1_ns,\n environ=self.environ,\n ),\n {first},\n )\n assert_equal(\n self._list_bundle(),\n set(),\n msg='directory now be empty (before)',\n )\n\n second = self._empty_ingest()\n assert_equal(\n self.clean(\n 'bundle',\n after=self._ts_of_run(second) - _1_ns,\n environ=self.environ,\n ),\n {second},\n )\n assert_equal(\n self._list_bundle(),\n set(),\n msg='directory now be empty (after)',\n )\n\n third = self._empty_ingest()\n fourth = self._empty_ingest()\n fifth = self._empty_ingest()\n sixth = self._empty_ingest()\n\n assert_equal(\n self._list_bundle(),\n {third, fourth, fifth, sixth},\n msg='larger set of ingestions did no happen correctly',\n )\n\n assert_equal(\n self.clean(\n 'bundle',\n before=self._ts_of_run(fourth),\n after=self._ts_of_run(fifth),\n environ=self.environ,\n ),\n {third, sixth},\n )\n\n assert_equal(\n self._list_bundle(),\n {fourth, fifth},\n msg='did not strip first and last directories',\n )\n" ]
[ [ "pandas.Timestamp", "pandas.Index", "pandas.Timedelta" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wonambi-python/wonambi
[ "4e2834cdd799576d1a231ecb48dfe4da1364fe3a" ]
[ "wonambi/ioeeg/bci2000.py" ]
[ "from os import SEEK_CUR, SEEK_SET, SEEK_END\nfrom re import search, finditer, match\nfrom datetime import datetime\n\nfrom numpy import (fromfile,\n frombuffer,\n asmatrix,\n array,\n arange,\n c_,\n diff,\n empty,\n hstack,\n ndarray,\n NaN,\n vstack,\n where,\n dtype,\n float64,\n int32,\n uint8,\n )\n\nSTATEVECTOR = ['Name', 'Length', 'Value', 'ByteLocation', 'BitLocation']\n\n\nclass BCI2000:\n \"\"\"Basic class to read the data.\n\n Parameters\n ----------\n filename : path to file\n the name of the filename or directory\n \"\"\"\n def __init__(self, filename):\n self.filename = filename\n\n def return_hdr(self):\n \"\"\"Return the header for further use.\n\n Returns\n -------\n subj_id : str\n subject identification code\n start_time : datetime\n start time of the dataset\n s_freq : float\n sampling frequency\n chan_name : list of str\n list of all the channels\n n_samples : int\n number of samples in the dataset\n orig : dict\n additional information taken directly from the header\n\n Notes\n -----\n As far as I can, BCI2000 doesn't have channel labels, so we use dummies\n starting at chan001 (more consistent with Matlab 1-base indexing...)\n \"\"\"\n orig = {}\n orig = _read_header(self.filename)\n\n nchan = int(orig['SourceCh'])\n chan_name = ['ch{:03d}'.format(i + 1) for i in range(nchan)]\n chan_dtype = dtype(orig['DataFormat'])\n self.statevector_len = int(orig['StatevectorLen'])\n\n s_freq = orig['Parameter']['SamplingRate']\n if s_freq.endswith('Hz'):\n s_freq = s_freq.replace('Hz', '')\n s_freq = int(s_freq.strip())\n self.s_freq = s_freq\n\n storagetime = orig['Parameter']['StorageTime'].replace('%20', ' ')\n try: # newer version\n start_time = datetime.strptime(storagetime, '%a %b %d %H:%M:%S %Y')\n except ValueError:\n try:\n start_time = datetime.strptime(storagetime, '%Y-%m-%dT%H:%M:%S')\n except ValueError:\n start_time = None\n\n subj_id = orig['Parameter']['SubjectName']\n\n self.dtype = dtype([(chan, chan_dtype) for chan in chan_name] +\n [('statevector', 'S', self.statevector_len)])\n\n # compute n_samples based on file size - header\n with open(self.filename, 'rb') as f:\n f.seek(0, SEEK_END)\n EOData = f.tell()\n n_samples = int((EOData - int(orig['HeaderLen'])) / self.dtype.itemsize)\n\n self.s_freq = s_freq\n self.header_len = int(orig['HeaderLen'])\n self.n_samples = n_samples\n self.statevectors = _prepare_statevectors(orig['StateVector'])\n # TODO: a better way to parse header\n self.gain = array([float(x) for x in orig['Parameter']['SourceChGain'].split(' ')[1:]])\n\n return subj_id, start_time, s_freq, chan_name, n_samples, orig\n\n def return_dat(self, chan, begsam, endsam):\n \"\"\"Return the data as 2D numpy.ndarray.\n\n Parameters\n ----------\n chan : int or list\n index (indices) of the channels to read\n begsam : int\n index of the first sample\n endsam : int\n index of the last sample\n\n Returns\n -------\n numpy.ndarray\n A 2d matrix, with dimension chan X samples\n \"\"\"\n dat_begsam = max(begsam, 0)\n dat_endsam = min(endsam, self.n_samples)\n dur = dat_endsam - dat_begsam\n\n dtype_onlychan = dtype({k: v for k, v in self.dtype.fields.items() if v[0].kind != 'S'})\n\n # make sure we read some data at least, otherwise segfault\n if dat_begsam < self.n_samples and dat_endsam > 0:\n\n with open(self.filename,'rb') as f:\n f.seek(self.header_len, SEEK_SET) # skip header\n\n f.seek(self.dtype.itemsize * dat_begsam, SEEK_CUR)\n dat = fromfile(f, dtype=self.dtype, count=dur)\n\n dat = ndarray(dat.shape, dtype_onlychan, dat, 0, dat.strides).view((dtype_onlychan[0], len(dtype_onlychan.names))).T\n\n else:\n n_chan = len(dtype_onlychan.names)\n dat = empty((n_chan, 0))\n\n if begsam < 0:\n\n pad = empty((dat.shape[0], 0 - begsam))\n pad.fill(NaN)\n dat = c_[pad, dat]\n\n if endsam >= self.n_samples:\n\n pad = empty((dat.shape[0], endsam - self.n_samples))\n pad.fill(NaN)\n dat = c_[dat, pad]\n\n return dat[chan, :] * self.gain[chan][:, None] # apply gain\n\n def return_markers(self, state='MicromedCode'):\n \"\"\"Return all the markers (also called triggers or events).\n\n Returns\n -------\n list of dict\n where each dict contains 'name' as str, 'start' and 'end' as float\n in seconds from the start of the recordings, and 'chan' as list of\n str with the channels involved (if not of relevance, it's None).\n\n Raises\n ------\n FileNotFoundError\n when it cannot read the events for some reason (don't use other\n exceptions).\n \"\"\"\n markers = []\n try:\n all_states = self._read_states()\n except ValueError: # cryptic error when reading states\n return markers\n\n try:\n x = all_states[state]\n except KeyError:\n return markers\n\n markers = []\n i_mrk = hstack((0, where(diff(x))[0] + 1, len(x)))\n for i0, i1 in zip(i_mrk[:-1], i_mrk[1:]):\n marker = {'name': str(x[i0]),\n 'start': (i0) / self.s_freq,\n 'end': i1 / self.s_freq,\n }\n markers.append(marker)\n\n return markers\n\n def _read_states(self):\n\n all_states = []\n with open(self.filename,'rb') as f:\n f.seek(self.header_len, SEEK_SET) # skip header\n StatevectorOffset = self.dtype.itemsize - self.statevector_len\n\n for i in range(self.n_samples):\n f.seek(StatevectorOffset, SEEK_CUR)\n raw_statevector = f.read(self.statevector_len)\n all_states.append(frombuffer(raw_statevector, dtype='<u1'))\n\n all_states = vstack(all_states).T\n\n states = {}\n for statename, statedef in self.statevectors.items():\n states[statename] = array(statedef['mult'] * asmatrix(all_states[statedef['slice'], :] & statedef['mask']), dtype=int32).squeeze()\n\n return states\n\n\ndef _read_header(filename):\n \"\"\"It's a pain to parse the header. It might be better to use the cpp code\n but I would need to include it here.\n \"\"\"\n header = _read_header_text(filename)\n first_row = header[0]\n EXTRA_ROWS = 3 # drop DefaultValue1 LowRange1 HighRange1\n\n hdr = {}\n for group in finditer('(\\w*)= ([\\w.]*)', first_row):\n hdr[group.group(1)] = group.group(2)\n\n if first_row.startswith('BCI2000V'):\n VERSION = hdr['BCI2000V']\n\n else:\n VERSION = '1'\n hdr['DataFormat'] = 'int16'\n\n for row in header[1:]:\n if row.startswith('['): # remove '[ ... Definition ]'\n section = row[2:-14].replace(' ', '')\n\n if section == 'StateVector':\n hdr[section] = []\n else:\n hdr[section] = {} # defaultdict(dict)\n continue\n\n if row.strip() == '':\n continue\n\n elif section == 'StateVector':\n statevector = {key: value for key, value in list(zip(STATEVECTOR, row.split(' ')))}\n hdr[section].append(statevector)\n\n else:\n group = match('(?P<subsection>[\\w:%]*) (?P<format>\\w*) (?P<key>\\w*)= (?P<value>.*) // ', row)\n\n if group is None:\n group = match('(?P<subsection>[\\w:%]*) (?P<format>\\w*) (?P<key>\\w*)= (?P<value>.*)', row) # For Group without comment\n if group is None:\n print(\"Cannot parse row:\",row)\n continue\n\n onerow = group.groupdict()\n\n values = onerow['value'].split(' ')\n if len(values) > EXTRA_ROWS:\n value = ' '.join(onerow['value'].split(' ')[:-EXTRA_ROWS])\n else:\n value = ' '.join(values)\n\n hdr[section][onerow['key']] = value # similar to matlab's output\n\n return hdr\n\n\ndef _read_header_length(filename):\n with open(filename,'rb') as f:\n firstchar = f.read(100) # should be enough to read the HeaderLen\n found = search('HeaderLen= (\\d*) ', firstchar.decode())\n HeaderLen = int(found.group(1))\n\n return HeaderLen\n\n\ndef _read_header_text(filename):\n HeaderLen = _read_header_length(filename)\n with open(filename,'rb') as f:\n header = f.read(HeaderLen).decode().split('\\r\\n')\n\n return header\n\n\ndef _prepare_statevectors(sv):\n\n statedefs = {}\n\n for v in sv:\n startbyte = int(v['ByteLocation'])\n startbit = int(v['BitLocation'])\n nbits = int(v['Length'])\n nbytes = (startbit + nbits) // 8\n if (startbit + nbits) % 8:\n nbytes += 1\n extrabits = int(nbytes * 8) - nbits - startbit;\n startmask = 255 & (255 << startbit)\n endmask = 255 & (255 >> extrabits)\n div = (1 << startbit);\n v['slice'] = slice(startbyte, startbyte + nbytes)\n v['mask'] = array([255] * nbytes, dtype=uint8)\n v['mask'][0] &= startmask\n v['mask'][-1] &= endmask\n v['mask'].shape = (nbytes, 1)\n v['mult'] = asmatrix(256.0 ** arange(nbytes, dtype=float64) / float(div))\n statedefs[v['Name']] = v\n\n return statedefs\n" ]
[ [ "numpy.fromfile", "numpy.arange", "numpy.ndarray", "numpy.empty", "numpy.dtype", "numpy.asmatrix", "numpy.frombuffer", "numpy.diff", "numpy.array", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aganostosrage/AI-based-stock-prediction
[ "e8d5599be97d3a05fbee1d727a6aba774b8a1534" ]
[ "User/views.py" ]
[ "from django.shortcuts import render, redirect\n#from .models import DoctorReg, predictions, Regdb\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User, auth\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeRegressor\n\n\n# Create your views here.\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef register(request):\n if request.method == 'POST':\n first_name = request.POST['first_name']\n last_name = request.POST['last_name']\n username = request.POST['username']\n password1 = request.POST['password1']\n password2 = request.POST['password2']\n email = request.POST['email']\n\n if password1 == password2:\n if User.objects.filter(username=username).exists():\n messages.info(request, 'Username Taken')\n return redirect('register')\n elif User.objects.filter(email=email).exists():\n messages.info(request, 'Email Taken')\n return redirect('register')\n else:\n user = User.objects.create_user(username=username, password=password1, email=email,\n first_name=first_name, last_name=last_name)\n user.save();\n print('user created')\n return redirect('login')\n\n else:\n messages.info(request, 'password not matching')\n return redirect('register')\n return redirect('/')\n else:\n return render(request, 'register.html')\n\n\n\ndef login(request):\n if request.method == 'POST':\n #v = DoctorReg.objects.all()\n username = request.POST['username']\n password = request.POST['password']\n\n user = auth.authenticate(username=username, password=password)\n\n if user is not None:\n auth.login(request, user)\n return render(request, 'data.html')\n else:\n messages.info(request, 'invalid credentials')\n return redirect('login')\n else:\n return render(request, 'login.html')\n\ndef data(request):\n return render(request,\"data.html\")\n\n\ndef predict(request):\n if (request.method == 'POST'):\n open = request.POST['open']\n high = request.POST['high']\n low= request.POST['low']\n last = request.POST['last']\n close = request.POST['close']\n trade=request.POST['trade']\n\n df = pd.read_csv(r\"static/datasets/Stock.csv\")\n df.dropna(inplace=True)\n df.isnull().sum()\n X_train = df[['Open','High','Low','Last','Close','Total Trade Quantity']]\n\n Y_train = df[['Turnover (Lacs)']]\n tree = DecisionTreeRegressor()\n tree.fit(X_train, Y_train)\n\n prediction = tree.predict([[open,high,low,last,close,trade]])\n\n return render(request, 'predict.html',\n {\"data\": prediction, 'open': open, 'high': high,\n 'close': close, 'last': last,\"low\":low,'trade':trade\n })\n\n\n else:\n return render(request, 'predict.html')\n\ndef logout(request):\n return render(request,\"logout.html\")" ]
[ [ "pandas.read_csv", "sklearn.tree.DecisionTreeRegressor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
mortonne/PyMVPA
[ "98644c5cd9733edd39fac746ea7cf67398674645", "98644c5cd9733edd39fac746ea7cf67398674645", "98644c5cd9733edd39fac746ea7cf67398674645", "98644c5cd9733edd39fac746ea7cf67398674645", "98644c5cd9733edd39fac746ea7cf67398674645", "98644c5cd9733edd39fac746ea7cf67398674645", "98644c5cd9733edd39fac746ea7cf67398674645" ]
[ "mvpa2/misc/data_generators.py", "mvpa2/misc/plot/topo.py", "mvpa2/tests/test_misc_plot.py", "mvpa2/clfs/mass.py", "mvpa2/testing/datasets.py", "mvpa2/tests/test_perturbsensana.py", "mvpa2/support/scipy/stats.py" ]
[ "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the PyMVPA package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Miscellaneous data generators for unittests and demos\"\"\"\n\n__docformat__ = \"restructuredtext\"\n\nimport numpy as np\n\nfrom mvpa2.base.dataset import vstack\nfrom mvpa2.datasets.base import dataset_wizard, Dataset\nfrom mvpa2.misc.fx import double_gamma_hrf, single_gamma_hrf\nfrom mvpa2.misc.fx import get_random_rotation\nfrom mvpa2.misc.neighborhood import IndexQueryEngine\nfrom mvpa2.misc.support import Event\n\nif __debug__:\n from mvpa2.base import debug\n\n\ndef multiple_chunks(func, n_chunks, *args, **kwargs):\n \"\"\"Replicate datasets multiple times raising different chunks\n\n Given some randomized (noisy) generator of a dataset with a single\n chunk call generator multiple times and place results into a\n distinct chunks.\n\n Returns\n -------\n ds : `mvpa2.datasets.base.Dataset`\n \"\"\"\n dss = []\n for chunk in range(n_chunks):\n ds_ = func(*args, **kwargs)\n # might not have chunks at all\n if not \"chunks\" in ds_.sa:\n ds_.sa[\"chunks\"] = np.repeat(chunk + 1, ds_.nsamples)\n else:\n ds_.sa.chunks[:] = chunk + 1\n dss.append(ds_)\n\n return vstack(dss)\n\n\ndef dumb_feature_dataset():\n \"\"\"Create a very simple dataset with 2 features and 3 labels\"\"\"\n data = [\n [1, 0],\n [1, 1],\n [2, 0],\n [2, 1],\n [3, 0],\n [3, 1],\n [4, 0],\n [4, 1],\n [5, 0],\n [5, 1],\n [6, 0],\n [6, 1],\n [7, 0],\n [7, 1],\n [8, 0],\n [8, 1],\n [9, 0],\n [9, 1],\n [10, 0],\n [10, 1],\n [11, 0],\n [11, 1],\n [12, 0],\n [12, 1],\n ]\n regs = ([1] * 8) + ([2] * 8) + ([3] * 8)\n\n return dataset_wizard(\n samples=np.array(data), targets=regs, chunks=list(range(len(regs)))\n )\n\n\ndef dumb_feature_binary_dataset():\n \"\"\"Very simple binary (2 labels) dataset\"\"\"\n data = [\n [1, 0],\n [1, 1],\n [2, 0],\n [2, 1],\n [3, 0],\n [3, 1],\n [4, 0],\n [4, 1],\n [5, 0],\n [5, 1],\n [6, 0],\n [6, 1],\n [7, 0],\n [7, 1],\n [8, 0],\n [8, 1],\n [9, 0],\n [9, 1],\n [10, 0],\n [10, 1],\n [11, 0],\n [11, 1],\n [12, 0],\n [12, 1],\n ]\n regs = ([0] * 12) + ([1] * 12)\n\n return dataset_wizard(\n samples=np.array(data), targets=regs, chunks=list(range(len(regs)))\n )\n\n\ndef normal_feature_dataset(\n perlabel=50,\n nlabels=2,\n nfeatures=4,\n nchunks=5,\n means=None,\n nonbogus_features=None,\n snr=3.0,\n normalize=True,\n):\n \"\"\"Generate a univariate dataset with normal noise and specified means.\n\n Could be considered to be a generalization of\n `pure_multivariate_signal` where means=[ [0,1], [1,0] ].\n\n Specify either means or `nonbogus_features` so means get assigned\n accordingly. If neither `means` nor `nonbogus_features` are\n provided, data will be pure noise and no per-label information.\n\n Parameters\n ----------\n perlabel : int, optional\n Number of samples per each label\n nlabels : int, optional\n Number of labels in the dataset\n nfeatures : int, optional\n Total number of features (including bogus features which carry\n no label-related signal)\n nchunks : int, optional\n Number of chunks (perlabel should be multiple of nchunks)\n means : None or ndarray of (nlabels, nfeatures) shape\n Specified means for each of features (columns) for all labels (rows).\n nonbogus_features : None or list of int\n Indexes of non-bogus features (1 per label).\n snr : float, optional\n Signal-to-noise ration assuming that signal has std 1.0 so we\n just divide random normal noise by snr\n normalize : bool, optional\n Divide by max(abs()) value to bring data into [-1, 1] range.\n \"\"\"\n\n data = np.random.standard_normal((perlabel * nlabels, nfeatures))\n if snr != 0:\n data /= np.sqrt(snr)\n if means is None and nonbogus_features is not None:\n if len(nonbogus_features) != nlabels:\n raise ValueError(\n \"Provide as many nonbogus features as many labels you have\"\n )\n means = np.zeros((len(nonbogus_features), nfeatures))\n # pure multivariate -- single bit per feature\n for i, nbf in enumerate(nonbogus_features):\n means[i, nbf] = 1.0\n if means is not None and snr != 0:\n # add mean\n data += np.repeat(np.array(means, ndmin=2), perlabel, axis=0)\n if normalize:\n # bring it 'under 1', since otherwise some classifiers have difficulties\n # during optimization\n data = 1.0 / (np.max(np.abs(data))) * data\n labels = np.concatenate([np.repeat(\"L%d\" % i, perlabel) for i in range(nlabels)])\n chunks = np.concatenate(\n [np.repeat(list(range(nchunks)), perlabel // nchunks) for i in range(nlabels)]\n )\n ds = dataset_wizard(data, targets=labels, chunks=chunks)\n\n # If nonbogus was provided -- assign .a and .fa accordingly\n if nonbogus_features is not None:\n ds.fa[\"nonbogus_targets\"] = np.array([None] * nfeatures)\n ds.fa.nonbogus_targets[nonbogus_features] = [\"L%d\" % i for i in range(nlabels)]\n ds.a[\"nonbogus_features\"] = nonbogus_features\n ds.a[\"bogus_features\"] = [\n x for x in range(nfeatures) if not x in nonbogus_features\n ]\n return ds\n\n\ndef pure_multivariate_signal(patterns, signal2noise=1.5, chunks=None, targets=None):\n \"\"\"Create a 2d dataset with a clear purely multivariate signal.\n\n This is known is the XOR problem.\n\n ::\n\n %%%%%%%%%\n % O % X %\n %%%%%%%%%\n % X % O %\n %%%%%%%%%\n\n Parameters\n ----------\n patterns: int\n Number of data points in each of the four dot clouds\n signal2noise: float, optional\n Univariate signal pedestal.\n chunks: array, optional\n Vector for chunk labels for all generated samples.\n targets: list, optional\n Length-2 sequence of target values for both classes. If None,\n [0, 1] is used.\n \"\"\"\n if targets is None:\n targets = [0, 1]\n\n # start with noise\n data = np.random.normal(size=(4 * patterns, 2))\n\n # add signal\n data[: 2 * patterns, 1] += signal2noise\n\n data[2 * patterns : 4 * patterns, 1] -= signal2noise\n data[:patterns, 0] -= signal2noise\n data[2 * patterns : 3 * patterns, 0] -= signal2noise\n data[patterns : 2 * patterns, 0] += signal2noise\n data[3 * patterns : 4 * patterns, 0] += signal2noise\n\n # two conditions\n regs = np.array(\n (targets[0:1] * patterns)\n + (targets[1:2] * 2 * patterns)\n + (targets[0:1] * patterns)\n )\n\n if chunks is None:\n chunks = list(range(len(data)))\n return dataset_wizard(samples=data, targets=regs, chunks=chunks)\n\n\ndef get_mv_pattern(s2n):\n \"\"\"Simple multivariate dataset\"\"\"\n return multiple_chunks(pure_multivariate_signal, 6, 5, s2n, 1)\n\n\ndef wr1996(size=200):\n \"\"\"Generate '6d robot arm' dataset (Williams and Rasmussen 1996)\n\n Was originally created in order to test the correctness of the\n implementation of kernel ARD. For full details see:\n http://www.gaussianprocess.org/gpml/code/matlab/doc/regression.html#ard\n\n x_1 picked randomly in [-1.932, -0.453]\n x_2 picked randomly in [0.534, 3.142]\n r_1 = 2.0\n r_2 = 1.3\n f(x_1,x_2) = r_1 cos (x_1) + r_2 cos(x_1 + x_2) + N(0,0.0025)\n etc.\n\n Expected relevances:\n ell_1 1.804377\n ell_2 1.963956\n ell_3 8.884361\n ell_4 34.417657\n ell_5 1081.610451\n ell_6 375.445823\n sigma_f 2.379139\n sigma_n 0.050835\n \"\"\"\n intervals = np.array([[-1.932, -0.453], [0.534, 3.142]])\n r = np.array([2.0, 1.3])\n x = np.random.rand(size, 2)\n x *= np.array(intervals[:, 1] - intervals[:, 0])\n x += np.array(intervals[:, 0])\n if __debug__:\n for i in range(2):\n debug(\n \"DG\", \"%d columnt Min: %g Max: %g\" % (i, x[:, i].min(), x[:, i].max())\n )\n y = r[0] * np.cos(x[:, 0] + r[1] * np.cos(x.sum(1))) + np.random.randn(\n size\n ) * np.sqrt(0.0025)\n y -= y.mean()\n x34 = x + np.random.randn(size, 2) * 0.02\n x56 = np.random.randn(size, 2)\n x = np.hstack([x, x34, x56])\n return dataset_wizard(samples=x, targets=y)\n\n\ndef sin_modulated(n_instances, n_features, flat=False, noise=0.4):\n \"\"\"Generate a (quite) complex multidimensional non-linear dataset\n\n Used for regression testing. In the data label is a sin of a x^2 +\n uniform noise\n \"\"\"\n if flat:\n data = np.arange(0.0, 1.0, 1.0 / n_instances) * np.pi\n data.resize(n_instances, n_features)\n else:\n data = np.random.rand(n_instances, n_features) * np.pi\n label = np.sin((data ** 2).sum(1)).round()\n label += np.random.rand(label.size) * noise\n return dataset_wizard(samples=data, targets=label)\n\n\ndef chirp_linear(\n n_instances, n_features=4, n_nonbogus_features=2, data_noise=0.4, noise=0.1\n):\n \"\"\"Generates simple dataset for linear regressions\n\n Generates chirp signal, populates n_nonbogus_features out of\n n_features with it with different noise level and then provides\n signal itself with additional noise as labels\n \"\"\"\n x = np.linspace(0, 1, n_instances)\n y = np.sin((10 * np.pi * x ** 2))\n\n data = np.random.normal(size=(n_instances, n_features)) * data_noise\n for i in range(n_nonbogus_features):\n data[:, i] += y[:]\n\n labels = y + np.random.normal(size=(n_instances,)) * noise\n\n return dataset_wizard(samples=data, targets=labels)\n\n\ndef linear_awgn(size=10, intercept=0.0, slope=0.4, noise_std=0.01, flat=False):\n \"\"\"Generate a dataset from a linear function with AWGN\n (Added White Gaussian Noise).\n\n It can be multidimensional if 'slope' is a vector. If flat is True\n (in 1 dimesion) generate equally spaces samples instead of random\n ones. This is useful for the test phase.\n \"\"\"\n dimensions = 1\n if isinstance(slope, np.ndarray):\n dimensions = slope.size\n\n if flat and dimensions == 1:\n x = np.linspace(0, 1, size)[:, np.newaxis]\n else:\n x = np.random.rand(size, dimensions)\n\n y = (\n np.dot(x, slope)[:, np.newaxis]\n + (np.random.randn(*(x.shape[0], 1)) * noise_std)\n + intercept\n )\n\n return dataset_wizard(samples=x, targets=y)\n\n\ndef noisy_2d_fx(size_per_fx, dfx, sfx, center, noise_std=1):\n \"\"\"Yet another generator of random dataset\"\"\"\n # used in projection example\n x = []\n y = []\n labels = []\n for fx in sfx:\n nx = np.random.normal(size=size_per_fx)\n ny = fx(nx) + np.random.normal(size=nx.shape, scale=noise_std)\n x.append(nx)\n y.append(ny)\n\n # whenever larger than first function value\n labels.append(np.array(ny < dfx(nx), dtype=\"int\"))\n\n samples = np.array((np.hstack(x), np.hstack(y))).squeeze().T\n labels = np.hstack(labels).squeeze().T\n\n samples += np.array(center)\n\n return dataset_wizard(samples=samples, targets=labels)\n\n\ndef linear1d_gaussian_noise(\n size=100, slope=0.5, intercept=1.0, x_min=-2.0, x_max=3.0, sigma=0.2\n):\n \"\"\"A straight line with some Gaussian noise.\"\"\"\n x = np.linspace(start=x_min, stop=x_max, num=size)\n noise = np.random.randn(size) * sigma\n y = x * slope + intercept + noise\n return dataset_wizard(samples=x[:, None], targets=y)\n\n\ndef autocorrelated_noise(\n ds, sr, cutoff, lfnl=3.0, bord=10, hfnl=None, add_baseline=True\n):\n \"\"\"Generate a dataset with samples being temporally autocorrelated noise.\n\n Parameters\n ----------\n ds : Dataset\n Source dataset whose mean samples serves as the pedestal of the new noise\n samples. All attributes of this dataset will also go into the generated\n one.\n sr : float\n Sampling rate (in Hz) of the samples in the dataset.\n cutoff : float\n Cutoff frequency of the low-pass butterworth filter.\n bord : int\n Order of the butterworth filter that is applied for low-pass\n filtering.\n lfnl : float\n Low frequency noise level in percent signal (per feature).\n hfnl : float or None\n High frequency noise level in percent signal (per feature). If None, no\n HF noise is added.\n \"\"\"\n from scipy.signal import butter, lfilter\n\n # something to play with\n fds = ds.copy(deep=False)\n\n # compute the pedestal\n msample = fds.samples.mean(axis=0)\n\n # noise/signal amplitude relative to each feature mean signal\n noise_amps = msample * (lfnl / 100.0)\n\n # generate gaussian noise for the full dataset\n nsamples = np.random.standard_normal(fds.samples.shape)\n # scale per each feature\n nsamples *= noise_amps\n\n # nyquist frequency\n nf = sr / 2.0\n\n # along samples low-pass filtering\n fb, fa = butter(bord, cutoff / nf)\n nsamples = lfilter(fb, fa, nsamples, axis=0)\n\n # add the pedestal\n if add_baseline:\n nsamples += msample\n\n # HF noise\n if hfnl is not None:\n noise_amps = msample * (hfnl / 100.0)\n nsamples += np.random.standard_normal(nsamples.shape) * noise_amps\n\n fds.samples = nsamples\n return fds\n\n\ndef random_affine_transformation(ds, scale_fac=100.0, shift_fac=10.0):\n \"\"\"Distort a dataset by random scale, shift, and rotation.\n\n The original data samples are transformed by applying a random rotation,\n shifting by a random vector (randomly selected, scaled input sample), and\n scaled by a random factor (randomly selected input feature values, scaled\n by an additional factor). The effective transformation values are stored in\n the output dataset's attribute collection as 'random_rotation',\n 'random_shift', and 'random_scale' respectively.\n\n Parameters\n ----------\n ds : Dataset\n Input dataset. Its sample and features attributes will be assigned to the\n output dataset.\n scale_fac : float\n Factor by which the randomly selected value for data scaling is scaled\n itself.\n shift_fac : float\n Factor by which the randomly selected shift vector is scaled.\n \"\"\"\n rndidx = np.random.randint\n R = get_random_rotation(ds.nfeatures)\n samples = ds.samples\n # reusing random data from dataset itself\n random_scale = samples[rndidx(len(ds)), rndidx(ds.nfeatures)] * scale_fac\n random_shift = samples[rndidx(len(ds))] * shift_fac\n samples = np.dot(samples, R) * random_scale + random_shift\n return Dataset(\n samples,\n sa=ds.sa,\n fa=ds.fa,\n a={\n \"random_rotation\": R,\n \"random_scale\": random_scale,\n \"random_shift\": random_shift,\n },\n )\n\n\ndef simple_hrf_dataset(\n events=None,\n hrf_gen=lambda t: double_gamma_hrf(t) - single_gamma_hrf(t, 0.8, 1, 0.05),\n fir_length=15,\n nsamples=None,\n tr=2.0,\n tres=1,\n baseline=800.0,\n signal_level=1,\n noise=\"normal\",\n noise_level=1,\n resampling=\"scipy\",\n):\n \"\"\"\n events: list of Events or ndarray of onsets for simple(r) designs\n \"\"\"\n if events is None:\n events = [1, 20, 25, 50, 60, 90, 92, 140]\n if isinstance(events, np.ndarray) or not isinstance(events[0], dict):\n events = [Event(onset=o) for o in events]\n else:\n assert isinstance(events, list)\n for e in events:\n assert isinstance(e, dict)\n\n # play fmri\n # full-blown HRF with initial dip and undershoot ;-)\n hrf_x = np.arange(0, float(fir_length) * tres, tres)\n if isinstance(hrf_gen, np.ndarray):\n # just accept provided HRF and only verify size match\n assert len(hrf_x) == len(hrf_gen)\n hrf = hrf_gen\n else:\n # actually generate it\n hrf = hrf_gen(hrf_x)\n if not nsamples:\n # estimate number of samples needed if not provided\n max_onset = max([e[\"onset\"] for e in events])\n nsamples = int(max_onset / tres + len(hrf_x) * 1.5)\n\n # come up with an experimental design\n fast_er = np.zeros(nsamples)\n for e in events:\n on = int(e[\"onset\"] / float(tres))\n off = int((e[\"onset\"] + e.get(\"duration\", 1.0)) / float(tres))\n if off == on:\n off += 1 # so we have at least 1 point\n assert list(range(on, off))\n fast_er[on:off] = e.get(\"intensity\", 1)\n # high resolution model of the convolved regressor\n model_hr = np.convolve(fast_er, hrf)[:nsamples]\n\n # downsample the regressor to fMRI resolution\n if resampling == \"scipy\":\n from scipy import signal\n\n model_lr = signal.resample(model_hr, int(tres * nsamples / tr), window=\"ham\")\n elif resampling == \"naive\":\n if tr % tres != 0.0:\n raise ValueError(\n \"You must use resample='scipy' since your TR=%.2g\"\n \" is not multiple of tres=%.2g\" % (tr, tres)\n )\n if tr < tres:\n raise ValueError(\n \"You must use resample='scipy' since your TR=%.2g\"\n \" is less than tres=%.2g\" % (tr, tres)\n )\n step = int(tr // tres)\n model_lr = model_hr[::step]\n else:\n raise ValueError(\n \"resampling can only be 'scipy' or 'naive'. Got %r\" % resampling\n )\n\n # generate artifical fMRI data: two voxels one is noise, one has\n # something\n wsignal = baseline + model_lr * signal_level\n nsignal = np.ones(wsignal.shape) * baseline\n\n # build design matrix: bold-regressor and constant\n design = np.array([model_lr, np.repeat(1, len(model_lr))]).T\n\n # two 'voxel' dataset\n ds = dataset_wizard(samples=np.array((wsignal, nsignal)).T, targets=1)\n ds.a[\"baseline\"] = baseline\n ds.a[\"tr\"] = tr\n ds.sa[\"design\"] = design\n\n ds.fa[\"signal_level\"] = [signal_level, False]\n\n if noise == \"autocorrelated\":\n # this one seems to be quite unstable and can provide really\n # funky noise at times\n noise = autocorrelated_noise(\n ds,\n 1 / tr,\n 1 / (2 * tr),\n lfnl=noise_level,\n hfnl=noise_level,\n add_baseline=False,\n )\n elif noise == \"normal\":\n noise = np.random.randn(*ds.shape) * noise_level\n else:\n raise ValueError(noise)\n ds.sa[\"noise\"] = noise\n ds.samples += noise\n return ds\n\n\ndef local_random_affine_transformations(\n ds, distort_seeds, distort_neighbor, space, scale_fac=100, shift_fac=10\n):\n \"\"\"Distort a dataset in the local neighborhood of selected features.\n\n This function is similar to ``random_affine_transformation()``, but applies\n multiple random affine transformations to a spatially constraint local\n neighborhood.\n\n Parameters\n ----------\n ds : Dataset\n The to be transformed/distorted dataset.\n distort_seeds : list(int)\n This a sequence of feature ids (corresponding to the input dataset) that\n serve as anchor to determine the local neighborhood for a distortion. The\n number of seeds also determines the number of different local distortions\n that are going to be applied.\n distort_neighbor : callable\n And object that when called with a coordinate generates a sequence of\n coordinates that comprise its neighborhood (see e.g. ``Sphere()``).\n space : str\n Name of the feature attribute of the input dataset that contains the\n relevant feature coordinates (e.g. 'voxel_indices').\n scale_fac : float\n See ``random_affine_transformation()``\n shift_fac : float\n See ``random_affine_transformation()``\n\n Returns\n -------\n Dataset\n A dataset derived from the input dataset with added local distortions.\n \"\"\"\n # which dataset attributes to aggregate\n random_stats = [\"random_rotation\", \"random_scale\", \"random_shift\"]\n kwa = {space: distort_neighbor}\n qe = IndexQueryEngine(**kwa)\n qe.train(ds)\n ds_distorted = ds.copy()\n for stat in random_stats:\n ds_distorted.a[stat + \"s\"] = {}\n # for each seed region\n for seed in distort_seeds:\n # select the neighborhood for this seed\n # take data from the distorted dataset to avoid\n # 'loosing' previous distortions\n distort_ids = qe[seed]\n ds_d = random_affine_transformation(\n ds_distorted[:, distort_ids], scale_fac=scale_fac, shift_fac=shift_fac\n )\n # recover the distortions stats for this seed\n for stat in random_stats:\n ds_distorted.a[stat + \"s\"].value[seed] = ds_d.a[stat].value\n # put the freshly distorted data back\n ds_distorted.samples[:, distort_ids] = ds_d.samples\n return ds_distorted\n", "# emacs: -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the PyMVPA package for the\n# copyright and license terms.\n#\n# The initial version of the code was contributed by Ingo Fründ and is\n# Coypright (c) 2008 by Ingo Fründ [email protected]\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Plot parameter distributions on a head surface (topography plots).\"\"\"\n\n__docformat__ = \"restructuredtext\"\n\nimport numpy as np\n\nfrom mvpa2.base import externals\n\nif externals.exists(\"pylab\", raise_=True):\n import pylab as pl\n\nif externals.exists(\"griddata\", raise_=True):\n from mvpa2.support.griddata import griddata\n\nif externals.exists(\"scipy\", raise_=True):\n from scipy.optimize import leastsq\n\nif externals.versions[\"numpy\"] > \"1.1.0\":\n from numpy import ma\nelse:\n from matplotlib.numerix import ma\n\n# TODO : add optional plotting labels for the sensors\n##REF: Name was automagically refactored\ndef plot_head_topography(\n topography,\n sensorlocations,\n plotsensors=False,\n resolution=51,\n masked=True,\n plothead=True,\n plothead_kwargs=None,\n **kwargs\n):\n \"\"\"Plot distribution to a head surface, derived from some sensor locations.\n\n The sensor locations are first projected onto the best fitting sphere and\n finally projected onto a circle (by simply ignoring the z-axis).\n\n Parameters\n ----------\n topography : array\n A vector of some values corresponding to each sensor.\n sensorlocations : (nsensors x 3) array\n 3D coordinates of each sensor. The order of the sensors has to match\n with the `topography` vector.\n plotsensors : bool\n If True, sensor will be plotted on their projected coordinates.\n No sensor are shown otherwise.\n plothead : bool\n If True, a head outline is plotted.\n plothead_kwargs : dict\n Additional keyword arguments passed to `plot_head_outline()`.\n resolution : int\n Number of surface samples along both x and y-axis.\n masked : bool\n If True, all surface sample extending to head outline will be\n masked.\n **kwargs\n All additional arguments will be passed to `pylab.imshow()`.\n\n Returns\n -------\n (map, head, sensors)\n The corresponding matplotlib objects are returned if plotted, ie.\n if plothead is set to `False`, `head` will be `None`.\n\n map\n The colormap that makes the actual plot, a\n matplotlib.image.AxesImage instance.\n head\n What is returned by `plot_head_outline()`.\n sensors\n The dots marking the electrodes, a matplotlib.lines.Line2d\n instance.\n \"\"\"\n # give sane defaults\n if plothead_kwargs is None:\n plothead_kwargs = {}\n\n # error function to fit the sensor locations to a sphere\n def err(params):\n r, cx, cy, cz = params\n return (\n (sensorlocations[:, 0] - cx) ** 2\n + (sensorlocations[:, 1] - cy) ** 2\n + (sensorlocations[:, 2] - cz) ** 2\n - r ** 2\n )\n\n # initial guess of sphere parameters (radius and center)\n params = (1, 0, 0, 0)\n\n # do fit\n (r, cx, cy, cz), stuff = leastsq(err, params)\n\n # size of each square\n ssh = float(r) / resolution # half-size\n ss = ssh * 2.0 # full-size\n\n # Generate a grid and interpolate using the griddata module\n x = np.arange(cx - r, cx + r, ss) + ssh\n y = np.arange(cy - r, cy + r, ss) + ssh\n x, y = pl.meshgrid(x, y)\n\n # project the sensor locations onto the sphere\n sphere_center = np.array((cx, cy, cz))\n sproj = sensorlocations - sphere_center\n sproj = r * sproj / np.c_[np.sqrt(np.sum(sproj ** 2, axis=1))]\n sproj += sphere_center\n\n # fit topology onto xy projection of sphere\n topo = griddata(\n sproj[:, 0],\n sproj[:, 1],\n np.ravel(np.array(topography)),\n x,\n y,\n interp=\"nn\" if externals.versions[\"matplotlib\"] < \"1.4.0\" else \"linear\",\n )\n\n # mask values outside the head\n if masked:\n notinhead = np.greater_equal((x - cx) ** 2 + (y - cy) ** 2, (1.0 * r) ** 2)\n topo = ma.masked_where(notinhead, topo)\n\n # show surface\n map = pl.imshow(topo, origin=\"lower\", extent=(-r, r, -r, r), **kwargs)\n pl.axis(\"off\")\n\n if plothead:\n # plot scaled head outline\n head = plot_head_outline(scale=r, shift=(cx / 2.0, cy / 2.0), **plothead_kwargs)\n else:\n head = None\n\n if plotsensors:\n # plot projected sensor locations\n\n # reorder sensors so the ones below plotted first\n # TODO: please fix with more elegant solution\n zenum = [x[::-1] for x in enumerate(sproj[:, 2].tolist())]\n zenum.sort()\n indx = [x[1] for x in zenum]\n sensors = pl.plot(sproj[indx, 0] - cx / 2.0, sproj[indx, 1] - cy / 2.0, \"wo\")\n else:\n sensors = None\n\n return map, head, sensors\n\n\n##REF: Name was automagically refactored\ndef plot_head_outline(scale=1, shift=(0, 0), color=\"k\", linewidth=\"5\", **kwargs):\n \"\"\"Plots a simple outline of a head viewed from the top.\n\n The plot contains schematic representations of the nose and ears. The\n size of the head is basically a unit circle for nose and ears attached\n to it.\n\n Parameters\n ----------\n scale : float\n Factor to scale the size of the head.\n shift : 2-tuple of floats\n Shift the center of the head circle by these values.\n color : matplotlib color spec\n The color the outline should be plotted in.\n linewidth : int\n Linewidth of the head outline.\n **kwargs\n All additional arguments are passed to `pylab.plot()`.\n\n Returns\n -------\n Matplotlib lines2D object\n can be used to tweak the look of the head outline.\n \"\"\"\n\n rmax = 0.5\n # factor used all the time\n fac = 2 * np.pi * 0.01\n\n # Koordinates for the ears\n EarX1 = -1 * np.array(\n [\n 0.497,\n 0.510,\n 0.518,\n 0.5299,\n 0.5419,\n 0.54,\n 0.547,\n 0.532,\n 0.510,\n rmax * np.cos(fac * (54 + 42)),\n ]\n )\n EarY1 = np.array(\n [\n 0.0655,\n 0.0775,\n 0.0783,\n 0.0746,\n 0.0555,\n -0.0055,\n -0.0932,\n -0.1313,\n -0.1384,\n rmax * np.sin(fac * (54 + 42)),\n ]\n )\n EarX2 = np.array(\n [\n rmax * np.cos(fac * (54 + 42)),\n 0.510,\n 0.532,\n 0.547,\n 0.54,\n 0.5419,\n 0.5299,\n 0.518,\n 0.510,\n 0.497,\n ]\n )\n EarY2 = np.array(\n [\n rmax * np.sin(fac * (54 + 42)),\n -0.1384,\n -0.1313,\n -0.0932,\n -0.0055,\n 0.0555,\n 0.0746,\n 0.0783,\n 0.0775,\n 0.0655,\n ]\n )\n\n # Coordinates for the Head\n HeadX1 = np.fromfunction(lambda x: rmax * np.cos(fac * (x + 2)), (21,))\n HeadY1 = np.fromfunction(lambda y: rmax * np.sin(fac * (y + 2)), (21,))\n HeadX2 = np.fromfunction(lambda x: rmax * np.cos(fac * (x + 28)), (21,))\n HeadY2 = np.fromfunction(lambda y: rmax * np.sin(fac * (y + 28)), (21,))\n HeadX3 = np.fromfunction(lambda x: rmax * np.cos(fac * (x + 54)), (43,))\n HeadY3 = np.fromfunction(lambda y: rmax * np.sin(fac * (y + 54)), (43,))\n\n # Coordinates for the Nose\n NoseX = np.array([0.18 * rmax, 0, -0.18 * rmax])\n NoseY = np.array([rmax - 0.004, rmax * 1.15, rmax - 0.004])\n\n # Combine to one\n X = np.concatenate((EarX2, HeadX1, NoseX, HeadX2, EarX1, HeadX3))\n Y = np.concatenate((EarY2, HeadY1, NoseY, HeadY2, EarY1, HeadY3))\n\n X *= 2 * scale\n Y *= 2 * scale\n X += shift[0]\n Y += shift[1]\n\n return pl.plot(X, Y, color=color, linewidth=linewidth)\n", "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the PyMVPA package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Unit tests for PyMVPA misc.plot\"\"\"\n\nfrom mvpa2.testing import *\n\nskip_if_no_external(\"pylab\")\n\nimport pylab as pl\nfrom matplotlib.figure import Figure\nfrom mvpa2.misc.plot.base import plot_dataset_chunks\nimport numpy as np\n\nfrom glob import glob\nfrom mock import patch\nfrom os.path import join as pjoin\n\ndata2d = np.random.randn(2, 4, 4)\ndata3d = np.random.randn(3, 4, 4)\n\ndata2d_3d = np.random.randn(2, 4, 4, 4)\ndata2d_4d = np.random.randn(2, 4, 4, 4, 2)\ndata2d_5d = np.random.randn(2, 4, 4, 4, 2, 3)\n\nfrom mvpa2.testing.datasets import datasets\n\n\n@sweepargs(dsp=list(datasets.items()))\ndef test_plot_dataset_chunks(dsp):\n dsname, ds = dsp\n if ds.targets.dtype.kind == \"f\":\n return\n # smoke test for now\n if \"chunks\" not in ds.sa:\n return # nothing to plot in this one\n print(dsname)\n plot_dataset_chunks(ds[:, :2]) # could only plot two\n pl.close(pl.gcf())\n if ds.nfeatures > 2:\n assert_raises(ValueError, plot_dataset_chunks, ds)\n", "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the PyMVPA package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Generic wrappers for learners (classifiers) provided by R's MASS\n\nHighly experimental and ad-hoc -- primary use was to verify LDA/QDA\nresults, thus not included in the mvpa2.suite ATM.\n\"\"\"\n\n__docformat__ = \"restructuredtext\"\n\nimport numpy as np\n\nfrom mvpa2.base import externals\nfrom mvpa2.base.learner import FailedToTrainError, FailedToPredictError\nfrom mvpa2.base.state import ConditionalAttribute\nfrom mvpa2.clfs.base import Classifier, accepts_dataset_as_samples\n\n# do conditional to be able to build module reference\nif externals.exists(\"mass\", raise_=True):\n import rpy2.robjects\n import rpy2.robjects.numpy2ri\n\n if hasattr(rpy2.robjects.numpy2ri, \"activate\"):\n rpy2.robjects.numpy2ri.activate()\n RRuntimeError = rpy2.robjects.rinterface.RRuntimeError\n r = rpy2.robjects.r\n r.library(\"MASS\")\n from mvpa2.support.rpy2_addons import Rrx2\n\n\nclass MASSLearnerAdapter(Classifier):\n \"\"\"Generic adapter for instances of learners provided by R's MASS\n\n Provides basic adaptation of interface for classifiers from MASS\n library (e.g. QDA, LDA), by adapting interface.\n\n Examples\n --------\n >>> if externals.exists('mass'):\n ... from mvpa2.testing.datasets import datasets\n ... mass_qda = MASSLearnerAdapter('qda', tags=['non-linear', 'multiclass'], enable_ca=['posterior'])\n ... mass_qda.train(datasets['uni2large'])\n ... mass_qda.predict(datasets['uni2large']) # doctest: +SKIP\n \"\"\"\n\n __tags__ = [\"mass\", \"rpy2\"]\n\n posterior = ConditionalAttribute(\n enabled=False, doc=\"Posterior probabilities if provided by classifier\"\n )\n\n def __init__(self, learner, kwargs=None, kwargs_predict=None, tags=None, **kwargs_):\n \"\"\"\n Parameters\n ----------\n learner : string\n kwargs : dict, optional\n kwargs_predict : dict, optional\n tags : list of string\n What additional tags to attach to this classifier. Tags are\n used in the queries to classifier or regression warehouses.\n \"\"\"\n\n self._learner = learner\n\n self._kwargs = kwargs or {}\n self._kwargs_predict = kwargs_predict or {}\n\n if tags:\n # So we make a per-instance copy\n self.__tags__ = self.__tags__ + tags\n\n Classifier.__init__(self, **kwargs_)\n\n def __repr__(self):\n \"\"\"String representation of `SKLLearnerWrapper`\"\"\"\n return Classifier.__repr__(\n self, prefixes=[repr(self._learner), \"kwargs=%r\" % (self._kwargs,)]\n )\n\n def _train(self, dataset):\n \"\"\"Train the skl learner using `dataset` (`Dataset`).\"\"\"\n targets_sa = dataset.sa[self.get_space()]\n targets = targets_sa.value\n if not \"regression\" in self.__tags__:\n targets = self._attrmap.to_numeric(targets)\n\n try:\n self._R_model = r[self._learner](dataset.samples, targets, **self._kwargs)\n except RRuntimeError as e:\n raise FailedToTrainError(\n \"Failed to train %s on %s. Got '%s' during call to fit().\"\n % (self, dataset, e)\n )\n\n @accepts_dataset_as_samples\n def _predict(self, data):\n \"\"\"Predict using the trained MASS learner\"\"\"\n try:\n output = r.predict(self._R_model, data, **self._kwargs_predict)\n # TODO: access everything computed, and assign to\n # ca's: res.names\n classes = Rrx2(output, \"class\")\n # TODO: move to helper function to be used generically\n if classes.rclass[0] == \"factor\":\n classes = [int(classes.levels[i - 1]) for i in classes]\n if \"posterior\" in output.names:\n self.ca.posterior = np.asarray(Rrx2(output, \"posterior\"))\n res = np.asarray(classes)\n except Exception as e:\n raise FailedToPredictError(\n \"Failed to predict %s on data of shape %s. Got '%s' during\"\n \" call to predict().\" % (self, data.shape, e)\n )\n\n return res\n", "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the PyMVPA package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Provides convenience datasets for unittesting.\n\nAlso performs testing of storing/reloading datasets into hdf5 file if\ncfg.getboolean('tests', 'use hdf datasets'\n\"\"\"\n\n__docformat__ = \"restructuredtext\"\n\nimport tempfile\nimport shutil\nimport traceback as tbm\nimport sys\nimport numpy as np\nfrom os.path import join as pathjoin\n\nfrom mvpa2 import cfg, externals\nfrom mvpa2.datasets.base import Dataset, HollowSamples\nfrom mvpa2.generators.partition import OddEvenPartitioner\nfrom mvpa2.misc.data_generators import *\nfrom mvpa2.testing.tools import reseed_rng\n\n__all__ = [\n \"datasets\",\n \"get_random_rotation\",\n \"saveload_warehouse\",\n \"pure_multivariate_signal\",\n]\n\n# Define datasets to be used all over. Split-half later on is used to\n# split into training/testing\n#\nsnr_scale = cfg.get_as_dtype(\"tests\", \"snr scale\", float, default=1.0)\n\nspecs = {\n \"large\": {\"perlabel\": 99, \"nchunks\": 11, \"nfeatures\": 20, \"snr\": 8 * snr_scale},\n \"medium\": {\"perlabel\": 24, \"nchunks\": 6, \"nfeatures\": 14, \"snr\": 8 * snr_scale},\n \"small\": {\"perlabel\": 12, \"nchunks\": 4, \"nfeatures\": 6, \"snr\": 14 * snr_scale},\n}\n\n\n# to assure reproducibility -- lets reseed the RNG at this point\n@reseed_rng()\ndef generate_testing_datasets(specs):\n # Lets permute upon each invocation of test, so we could possibly\n # trigger some funny cases\n nonbogus_pool = np.random.permutation([0, 1, 3, 5])\n\n datasets = {}\n\n # use a partitioner to flag odd/even samples as training and test\n ttp = OddEvenPartitioner(space=\"train\", count=1)\n\n for kind, spec in specs.items():\n # set of univariate datasets\n for nlabels in [2, 3, 4]:\n basename = \"uni%d%s\" % (nlabels, kind)\n nonbogus_features = nonbogus_pool[:nlabels]\n\n dataset = normal_feature_dataset(\n nlabels=nlabels, nonbogus_features=nonbogus_features, **spec\n )\n\n # full dataset\n datasets[basename] = list(ttp.generate(dataset))[0]\n\n # sample 3D\n total = 2 * spec[\"perlabel\"]\n nchunks = spec[\"nchunks\"]\n data = np.random.standard_normal((total, 3, 6, 6))\n labels = np.concatenate(\n (np.repeat(0, spec[\"perlabel\"]), np.repeat(1, spec[\"perlabel\"]))\n )\n data[:, 1, 0, 0] += 2 * labels # add some signal\n chunks = np.asarray(list(range(nchunks)) * (total // nchunks))\n mask = np.ones((3, 6, 6), dtype=\"bool\")\n mask[0, 0, 0] = 0\n mask[1, 3, 2] = 0\n ds = Dataset.from_wizard(\n samples=data, targets=labels, chunks=chunks, mask=mask, space=\"myspace\"\n )\n # and to stress tests on manipulating sa/fa possibly containing\n # attributes of dtype object\n ds.sa[\"test_object\"] = [[\"a\"], [1, 2]] * (ds.nsamples // 2)\n datasets[\"3d%s\" % kind] = ds\n\n # some additional datasets\n datasets[\"dumb2\"] = dumb_feature_binary_dataset()\n datasets[\"dumb\"] = dumb_feature_dataset()\n # dataset with few invariant features\n _dsinv = dumb_feature_dataset()\n _dsinv.samples = np.hstack(\n (_dsinv.samples, np.zeros((_dsinv.nsamples, 1)), np.ones((_dsinv.nsamples, 1)))\n )\n datasets[\"dumbinv\"] = _dsinv\n\n # Datasets for regressions testing\n datasets[\"sin_modulated\"] = list(\n ttp.generate(multiple_chunks(sin_modulated, 4, 30, 1))\n )[0]\n # use the same full for training\n datasets[\"sin_modulated_train\"] = datasets[\"sin_modulated\"]\n datasets[\"sin_modulated_test\"] = sin_modulated(30, 1, flat=True)\n\n # simple signal for linear regressors\n datasets[\"chirp_linear\"] = multiple_chunks(chirp_linear, 6, 50, 10, 2, 0.3, 0.1)\n datasets[\"chirp_linear_test\"] = chirp_linear(20, 5, 2, 0.4, 0.1)\n\n datasets[\"wr1996\"] = multiple_chunks(wr1996, 4, 50)\n datasets[\"wr1996_test\"] = wr1996(50)\n\n datasets[\"hollow\"] = Dataset(\n HollowSamples((40, 20)), sa={\"targets\": np.tile([\"one\", \"two\"], 20)}\n )\n\n return datasets\n\n\n# avoid treating it as a test by nose\ngenerate_testing_datasets.__test__ = False\n\n\ndef saveload_warehouse():\n \"\"\"Store all warehouse datasets into HDF5 and reload them.\"\"\"\n import h5py\n from mvpa2.base.hdf5 import obj2hdf, hdf2obj\n\n tempdir = tempfile.mkdtemp()\n\n # store the whole datasets warehouse in one hdf5 file\n hdf = h5py.File(pathjoin(tempdir, \"myhdf5.hdf5\"), \"w\")\n for d in datasets:\n obj2hdf(hdf, datasets[d], d)\n hdf.close()\n\n hdf = h5py.File(pathjoin(tempdir, \"myhdf5.hdf5\"), \"r\")\n rc_ds = {}\n for d in hdf:\n rc_ds[d] = hdf2obj(hdf[d])\n hdf.close()\n\n # cleanup temp dir\n shutil.rmtree(tempdir, ignore_errors=True)\n\n # return the reconstructed datasets (for use in datasets warehouse)\n return rc_ds\n\n\ndatasets = generate_testing_datasets(specs)\n\nif cfg.getboolean(\"tests\", \"use hdf datasets\", False):\n if not externals.exists(\"h5py\"):\n raise RuntimeError(\n \"Cannot perform HDF5 dump of all datasets in the warehouse, \"\n \"because 'h5py' is not available\"\n )\n\n datasets = saveload_warehouse()\n print(\"Replaced all dataset warehouse for HDF5 loaded alternative.\")\n", "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the PyMVPA package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Unit tests for PyMVPA perturbation sensitivity analyzer.\"\"\"\n\nimport numpy as np\nfrom mvpa2.testing import *\nfrom mvpa2.testing.clfs import *\n\nfrom mvpa2.datasets.base import Dataset\nfrom mvpa2.measures.noiseperturbation import NoisePerturbationSensitivity\nfrom mvpa2.generators.partition import NFoldPartitioner\nfrom mvpa2.measures.base import CrossValidation\n\n\nclass PerturbationSensitivityAnalyzerTests(unittest.TestCase):\n @reseed_rng()\n def setUp(self):\n data = np.random.standard_normal((100, 3, 4, 2))\n labels = np.concatenate((np.repeat(0, 50), np.repeat(1, 50)))\n chunks = np.repeat(list(range(5)), 10)\n chunks = np.concatenate((chunks, chunks))\n mask = np.ones((3, 4, 2), dtype=\"bool\")\n mask[0, 0, 0] = 0\n mask[1, 3, 1] = 0\n self.dataset = Dataset.from_wizard(\n samples=data, targets=labels, chunks=chunks, mask=mask\n )\n\n def test_perturbation_sensitivity_analyzer(self):\n # compute N-1 cross-validation as datameasure\n cv = CrossValidation(sample_clf_lin, NFoldPartitioner())\n # do perturbation analysis using gaussian noise\n pa = NoisePerturbationSensitivity(cv, noise=np.random.normal)\n\n # run analysis\n map = pa(self.dataset)\n\n # check for correct size of map\n self.assertTrue(map.nfeatures == self.dataset.nfeatures)\n\n # dataset is noise -> mean sensitivity should be zero\n self.assertTrue(-0.2 < np.mean(map) < 0.2)\n\n\ndef suite(): # pragma: no cover\n return unittest.makeSuite(PerturbationSensitivityAnalyzerTests)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n from . import runner\n\n runner.run()\n", "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the PyMVPA package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Fixer for rdist in scipy\n\"\"\"\n# For scipy import\n\n\n__docformat__ = \"restructuredtext\"\n\nfrom mvpa2.base import externals, warning, cfg\n\nif __debug__:\n from mvpa2.base import debug\n\nif externals.exists(\"scipy\", raise_=True):\n import scipy\n import scipy.stats\n import scipy.stats as stats\n\nif (\n not externals.exists(\"good scipy.stats.rdist\")\n and externals.versions[\"scipy\"] < \"1.18.0\"\n):\n if __debug__:\n debug(\"EXT\", \"Fixing up scipy.stats.rdist\")\n # Lets fix it up, future imports of scipy.stats should carry fixed\n # version, isn't python is \\emph{evil} ;-)\n import numpy as np\n\n from scipy.stats.distributions import rv_continuous\n from scipy import special\n import scipy.integrate\n\n # NB: Following function is copied from scipy SVN rev.5236\n # and fixed with pow -> np.power (thanks Josef!)\n # FIXME: PPF does not work.\n class rdist_gen(rv_continuous):\n def _pdf(self, x, c):\n return np.power((1.0 - x * x), c / 2.0 - 1) / special.beta(0.5, c / 2.0)\n\n def _cdf_skip(self, x, c):\n # error inspecial.hyp2f1 for some values see tickets 758, 759\n return 0.5 + x / special.beta(0.5, c / 2.0) * special.hyp2f1(\n 0.5, 1.0 - c / 2.0, 1.5, x * x\n )\n\n def _munp(self, n, c):\n return (1 - (n % 2)) * special.beta((n + 1.0) / 2, c / 2.0)\n\n # Lets try to avoid at least some of the numerical problems by removing points\n # around edges\n rdist = rdist_gen(\n a=-1.0,\n b=1.0,\n name=\"rdist\",\n longname=\"An R-distributed\",\n shapes=\"c\",\n extradoc=\"\"\"\n\n R-distribution\n\n rdist.pdf(x,c) = (1-x**2)**(c/2-1) / B(1/2, c/2)\n for -1 <= x <= 1, c > 0.\n \"\"\",\n )\n # Fix up number of arguments for veccdf's vectorize\n # Sicne scipy 0.18.0 there is veccdf in rdist_gen\n if hasattr(rdist, \"veccdf\") and (rdist.veccdf.nin == 1):\n if __debug__:\n debug(\"EXT\", \"Fixing up veccdf.nin to make 2 for rdist\")\n rdist.veccdf.nin = 2\n\n scipy.stats.distributions.rdist_gen = scipy.stats.rdist_gen = rdist_gen\n scipy.stats.distributions.rdist = scipy.stats.rdist = rdist\n\n try: # Retest\n externals.exists(\"good scipy.stats.rdist\", force=True, raise_=True)\n except RuntimeError:\n warning(\n \"scipy.stats.rdist was not fixed with a monkey-patch. \" \"It remains broken\"\n )\n # Revert so if configuration stored, we know the true flow of things ;)\n cfg.set(\"externals\", \"have good scipy.stats.rdist\", \"no\")\n\n\nif not externals.exists(\"good scipy.stats.rv_discrete.ppf\"):\n # Local rebindings for ppf7 (7 is for the scipy version from\n # which code was borrowed)\n arr = np.asarray\n from scipy.stats.distributions import valarray, argsreduce\n from numpy import shape, place, any\n\n def ppf7(self, q, *args, **kwds):\n \"\"\"\n Percent point function (inverse of cdf) at q of the given RV\n\n Parameters\n ----------\n q : array-like\n lower tail probability\n arg1, arg2, arg3,... : array-like\n The shape parameter(s) for the distribution (see docstring of the\n instance object for more information)\n loc : array-like, optional\n location parameter (default=0)\n\n Returns\n -------\n k : array-like\n quantile corresponding to the lower tail probability, q.\n\n \"\"\"\n loc = kwds.get(\"loc\")\n args, loc = self._rv_discrete__fix_loc(args, loc)\n q, loc = list(map(arr, (q, loc)))\n args = tuple(map(arr, args))\n cond0 = self._argcheck(*args) & (loc == loc)\n cond1 = (q > 0) & (q < 1)\n cond2 = (q == 1) & cond0\n cond = cond0 & cond1\n output = valarray(shape(cond), value=self.badvalue, typecode=\"d\")\n # output type 'd' to handle nin and inf\n place(output, (q == 0) * (cond == cond), self.a - 1)\n place(output, cond2, self.b)\n if any(cond):\n goodargs = argsreduce(cond, *((q,) + args + (loc,)))\n loc, goodargs = goodargs[-1], goodargs[:-1]\n place(output, cond, self._ppf(*goodargs) + loc)\n\n if output.ndim == 0:\n return output[()]\n return output\n\n scipy.stats.distributions.rv_discrete.ppf = ppf7\n try:\n externals.exists(\"good scipy.stats.rv_discrete.ppf\", force=True, raise_=True)\n except RuntimeError:\n warning(\n \"rv_discrete.ppf was not fixed with a monkey-patch. \" \"It remains broken\"\n )\n cfg.set(\"externals\", \"have good scipy.stats.rv_discrete.ppf\", \"no\")\n\nif externals.versions[\"scipy\"] >= \"0.8.0\" and not externals.exists(\n \"good scipy.stats.rv_continuous._reduce_func(floc,fscale)\"\n):\n if __debug__:\n debug(\"EXT\", \"Fixing up scipy.stats.rv_continuous._reduce_func\")\n\n # Borrowed from scipy v0.4.3-5978-gce90df2\n # Copyright: 2001, 2002 Enthought, Inc.; 2003-2012 SciPy developers\n # License: BSD-3\n def _reduce_func_fixed(self, args, kwds):\n args = list(args)\n Nargs = len(args)\n fixedn = []\n index = list(range(Nargs))\n names = [\"f%d\" % n for n in range(Nargs - 2)] + [\"floc\", \"fscale\"]\n x0 = []\n for n, key in zip(index, names):\n if key in kwds:\n fixedn.append(n)\n args[n] = kwds[key]\n else:\n x0.append(args[n])\n\n if len(fixedn) == 0:\n func = self.nnlf\n restore = None\n else:\n if len(fixedn) == len(index):\n raise ValueError(\"All parameters fixed. There is nothing to optimize.\")\n\n def restore(args, theta):\n # Replace with theta for all numbers not in fixedn\n # This allows the non-fixed values to vary, but\n # we still call self.nnlf with all parameters.\n i = 0\n for n in range(Nargs):\n if n not in fixedn:\n args[n] = theta[i]\n i += 1\n return args\n\n def func(theta, x):\n newtheta = restore(args[:], theta)\n return self.nnlf(newtheta, x)\n\n return x0, func, restore, args\n\n stats.rv_continuous._reduce_func = _reduce_func_fixed\n" ]
[ [ "numpy.convolve", "numpy.hstack", "numpy.dot", "numpy.sqrt", "numpy.linspace", "numpy.abs", "numpy.random.standard_normal", "numpy.arange", "numpy.sin", "numpy.ones", "numpy.random.normal", "scipy.signal.butter", "numpy.random.randn", "numpy.random.rand", "scipy.signal.lfilter", "numpy.repeat", "numpy.array", "numpy.zeros" ], [ "numpy.arange", "numpy.cos", "numpy.sin", "numpy.concatenate", "numpy.greater_equal", "scipy.optimize.leastsq", "matplotlib.numerix.ma.masked_where", "numpy.array", "numpy.sum" ], [ "numpy.random.randn" ], [ "numpy.asarray" ], [ "numpy.random.standard_normal", "numpy.tile", "numpy.ones", "numpy.random.permutation", "numpy.repeat", "numpy.zeros" ], [ "numpy.random.standard_normal", "numpy.ones", "numpy.concatenate", "numpy.mean", "numpy.repeat" ], [ "numpy.power", "numpy.shape", "numpy.any", "scipy.special.hyp2f1", "scipy.stats.distributions.argsreduce", "numpy.place", "scipy.special.beta" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tantao258/tensorflow-yolov3
[ "6f6a1c92a58e019af5fc7ffaecf96e6f249355c4" ]
[ "quick_train.py" ]
[ "#! /usr/bin/env python\n# coding=utf-8\n#================================================================\n# Copyright (C) 2018 * Ltd. All rights reserved.\n#\n# Editor : VIM\n# File name : quick_train.py\n# Author : YunYang1994\n# Created date: 2018-12-07 17:58:58\n# Description :\n#\n#================================================================\n\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\nfrom core import utils, yolov3\n\nsess = tf.Session()\nclasses = utils.read_coco_names('./data/coco.names')\nnum_classes = len(classes)\ninput_shape = [416, 416]\ndataset = utils.read_image_box_from_text('./data/train_data/quick_train_data.txt')\nanchors = utils.get_anchors('./data/yolo_anchors.txt')\n\ninputs = tf.placeholder(tf.float32, shape=[1, 416, 416, 3])\ny_true_13 = tf.placeholder(tf.float32, shape=[1,13,13,3,85])\ny_true_26 = tf.placeholder(tf.float32, shape=[1,26,26,3,85])\ny_true_52 = tf.placeholder(tf.float32, shape=[1,52,52,3,85])\n\nmodel = yolov3.yolov3(80)\nwith tf.variable_scope('yolov3'):\n feature_maps = model.forward(inputs, is_training=True)\n load_ops = utils.load_weights(tf.global_variables(scope='yolov3'), \"./checkpoint/yolov3.weights\")\n sess.run(load_ops)\n loss = model.compute_loss(feature_maps, [y_true_13, y_true_26, y_true_52])\n\noptimizer = tf.train.AdamOptimizer(0.001)\ntrain_op = optimizer.minimize(loss)\nsess.run(tf.global_variables_initializer())\n\nfor image_path in dataset.keys():\n image = Image.open(image_path)\n true_boxes, true_labels = dataset[image_path]\n image, true_boxes = utils.resize_image_correct_bbox(image, true_boxes, input_shape)\n scores = np.ones(len(true_boxes))\n # utils.draw_boxes(image, boxes, scores, labels, classes)\n true_boxes = np.expand_dims(true_boxes, 0)\n true_labels = np.expand_dims(true_labels, 0)\n y_true = utils.preprocess_true_boxes(true_boxes, true_labels, input_shape, anchors, num_classes)\n\n image_data = np.expand_dims(np.array(image, dtype=np.float32) / 255., axis=0)\n\n _, result = sess.run([train_op,loss], feed_dict={inputs:image_data,\n y_true_13:y_true[0],\n y_true_26:y_true[1],\n y_true_52:y_true[2],})\n # print(result)\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.expand_dims", "tensorflow.global_variables", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.AdamOptimizer", "tensorflow.Session", "tensorflow.variable_scope", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
dongxulee/lifeCycle
[ "2b4a74dbd64357d00b29f7d946a66afcba747cc6", "2b4a74dbd64357d00b29f7d946a66afcba747cc6", "2b4a74dbd64357d00b29f7d946a66afcba747cc6" ]
[ "20200616/functions/header.py", "20210528/.ipynb_checkpoints/constant-checkpoint.py", "20210528/.ipynb_checkpoints/constantHighSkill2-checkpoint.py" ]
[ "# header files and constant variables\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nfrom scipy.interpolate import RectBivariateSpline as RS\nfrom multiprocessing import Pool\nfrom functools import partial\nfrom pyswarm import pso\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nnp.printoptions(precision=2)\n\n# time line\nT_min = 0\nT_max = 70\nT_R = 45\n# discounting factor\nbeta = 1/(1+0.02)\n# All the money amount are denoted in thousand dollars\nearningShock = [0.8,1.2]\n# Define transition matrix of economical states\n# GOOD -> GOOD 0.8, BAD -> BAD 0.6\nPs = np.array([[0.6, 0.4],[0.2, 0.8]])\n# current risk free interest rate\nr_f = np.array([0.01 ,0.03])\n# stock return depends on current and future econ states\n# r_m = np.array([[-0.2, 0.15],[-0.15, 0.2]])\nr_m = np.array([[-0.15, 0.20],[-0.15, 0.20]])\n# expected return on stock market\n# r_bar = 0.0667\nr_bar = 0.02\n# probability of survival\nPa = np.load(\"prob.npy\")\n# deterministic income\ndetEarning = np.load(\"detEarning.npy\")\n# probability of employment transition Pe[s, s_next, e, e_next]\nPe = np.array([[[[0.3, 0.7], [0.1, 0.9]], [[0.25, 0.75], [0.05, 0.95]]],\n [[[0.25, 0.75], [0.05, 0.95]], [[0.2, 0.8], [0.01, 0.99]]]])\n# tax rate before and after retirement\ntau_L = 0.2\ntau_R = 0.1\n# minimum consumption\nc_bar = 3\n\n#Define the utility function\ndef u(c):\n gamma = 2\n return (np.float_power(max(c-c_bar,0),1-gamma) - 1)/(1 - gamma)\n\n#Define the bequeath function, which is a function of wealth\ndef uB(w):\n B = 2\n return B*u(w+c_bar+1)\n\n#Define the earning function, which applies for both employment and unemployment\ndef y(t, x):\n w, n, s, e, A = x\n if A == 0:\n return 0\n else:\n if t < T_R:\n return detEarning[t] * earningShock[int(s)] * e + (1-e) * 5\n else:\n return detEarning[t]\n\n# Define the reward funtion depends on both state and action.\ndef R(x, a):\n c, b, k = a\n w, n, s, e, A = x\n if A == 0:\n return uB(w+(1+r_bar)*n)\n else:\n return u(c)\n", "import numpy as np\nimport jax.numpy as jnp\nfrom jax.scipy.ndimage import map_coordinates\nimport warnings\nfrom jax import jit, partial, random, vmap\nfrom tqdm import tqdm\nwarnings.filterwarnings(\"ignore\")\nnp.printoptions(precision=2)\n\n# time line\nT_min = 0\nT_max = 60\nT_R = 45\n# discounting factor\nbeta = 1/(1+0.02)\n# utility function parameter \ngamma = 3\n# relative importance of housing consumption and non durable consumption \nalpha = 0.7\n# parameter used to calculate the housing consumption \nkappa = 0.3\n# uB associated parameter\nB = 2\n# social welfare after the unemployment\nwelfare = 20\n# tax rate before and after retirement\ntau_L = 0.2\ntau_R = 0.1\n# number of states S\nnS = 8\n# number of states e\nnE = 2\n# housing state\nnO = 2\n\n\n'''\n Economic state calibration \n'''\n\n# probability of survival\nPa = jnp.array(np.load(\"constant/prob.npy\"))\n# deterministic income\ndetEarning = jnp.array(np.load(\"constant/detEarningHigh.npy\"))\n# rescale the deterministic income\ndetEarning = detEarning \ndetEarning = jnp.concatenate([detEarning[:46], detEarning[46:]-30])\n# Define transition matrix of economical states S\nPs = np.genfromtxt('constant/Ps.csv',delimiter=',')\nfix = (np.sum(Ps, axis = 1) - 1)\nfor i in range(nS):\n for j in range(nS):\n if Ps[i,j] - fix[i] > 0:\n Ps[i,j] = Ps[i,j] - fix[i]\n break\nPs = jnp.array(Ps)\n# The possible GDP growth, stock return, bond return\ngkfe = np.genfromtxt('constant/gkfe.csv',delimiter=',')\ngkfe = jnp.array(gkfe)\n# GDP growth depending on current S state\ngGDP = gkfe[:,0]/100\n# risk free interest rate depending on current S state \nr_b = gkfe[:,1]/100\n# stock return depending on current S state\nr_k = gkfe[:,2]/100\n# unemployment rate depending on current S state \nPe = gkfe[:,7:]/100\nPe = Pe[:,::-1]\n\n'''\n 401k related constants\n'''\n# some variables associated with 401k amount\nr_bar = 0.02\nPa = Pa[:T_max]\nNt = [np.sum(Pa[t:]) for t in range(T_min,T_max)]\n#Factor used to calculate the withdraw amount \nDn = [(r_bar*(1+r_bar)**N)/((1+r_bar)**N - 1) for N in Nt]\nDn[-1] = 1\nDn = jnp.array(Dn)\n# income fraction goes into 401k \nyi = 0.04\n\n\n\n'''\n housing related constants\n'''\n# variable associated with housing and mortgage \n# mortgage rate \nrh = 0.045\n# housing unit\nH = 1000\n# rent unit\nRl = 500\n# housing price constant \npt = 2*250/1000\n# 30k rent 1000 sf\npr = 2*10/1000 * 2 \n# constant cost \nc_h = 5\nc_s = H*pt*0.4\n# Dm is used to update the mortgage payment\nDm = [(1+rh) - rh*(1+rh)**(T_max - t)/((1+rh)**(T_max-t)-1) for t in range(T_min, T_max)]\nDm[-1] = 0\nDm = jnp.array(Dm)\n\n# 30 year mortgage\nMs = []\nM = H*pt*0.8\nm = M*(1+rh) - Dm[30]*M\nfor i in range(30, T_max):\n M = M*(1+rh) - m\n Ms.append(M)\nMs[-1] = 0\nMs = jnp.array(Ms)\n\n\n# stock transaction fee\nKc = 0.001\n\n\n'''\n Discretize the state space\n Discretize the action space \n'''\n# actions dicretization(hp, cp, kp)\nnumGrid = 20\nAs = np.array(np.meshgrid(np.linspace(0.001,0.999,numGrid), np.linspace(0,1,numGrid), [0,1])).T.reshape(-1,3)\nAs = jnp.array(As)\n# wealth discretization \nws = np.linspace(0, 400, 20)\nns = np.linspace(0, 300, 10)\nms = np.linspace(0, 0.8*H*pt, 10)\n# scales associated with discretization\nscaleW = ws.max()/ws.size\nscaleN = ns.max()/ns.size\nscaleM = ms.max()/ms.size\n\n# dimentions of the state\ndim = (ws.size, ns.size, ms.size, nS, nE, nO)\ndimSize = len(dim)\n\nxgrid = np.array([[w,n,m,s,e,o] for w in ws\n for n in ns\n for m in ms\n for s in range(nS)\n for e in range(nE)\n for o in range(nO)]).reshape(dim + (dimSize,))\n\nXs = xgrid.reshape((np.prod(dim),dimSize))\nXs = jnp.array(Xs)\n\nVgrid = np.zeros(dim + (T_max,))\ncgrid = np.zeros(dim + (T_max,))\nbgrid = np.zeros(dim + (T_max,))\nkgrid = np.zeros(dim + (T_max,))\nhgrid = np.zeros(dim + (T_max,))\nagrid = np.zeros(dim + (T_max,))\n\n# start of function definitions\nnX = Xs.shape[0]\nnA = As.shape[0]\n\n#Define the earning function, which applies for both employment status and 8 econ states\n@partial(jit, static_argnums=(0,))\ndef y(t, x):\n '''\n x = [w,n,m,s,e,o]\n x = [0,1,2,3,4,5]\n '''\n if t <= T_R:\n return detEarning[t] * (1+gGDP[jnp.array(x[3], dtype = jnp.int8)]) * x[4] + (1-x[4]) * welfare\n else:\n return detEarning[-1]\n \n#Earning after tax and fixed by transaction in and out from 401k account \n@partial(jit, static_argnums=(0,))\ndef yAT(t,x):\n yt = y(t, x)\n if t <= T_R:\n # yi portion of the income will be put into the 401k if employed\n return (1-tau_L)*(yt * (1-yi))*x[4] + (1-x[4])*yt\n else:\n # t > T_R, n/discounting amount will be withdraw from the 401k \n return (1-tau_R)*yt + x[1]*Dn[t]\n \n#Define the evolution of the amount in 401k account \n@partial(jit, static_argnums=(0,))\ndef gn(t, x, r = r_bar):\n if t <= T_R:\n # if the person is employed, then yi portion of his income goes into 401k \n n_cur = x[1] + y(t, x) * yi * x[4]\n else:\n # t > T_R, n*Dn amount will be withdraw from the 401k \n n_cur = x[1] - x[1]*Dn[t]\n # the 401 grow with the rate r \n return (1+r)*n_cur\n\n#Define the utility function\n@jit\ndef u(c):\n return (jnp.log(c)/jnp.log(c))*(jnp.power(c, 1-gamma) - 1)/(1 - gamma)\n\n#Define the bequeath function, which is a function of bequeath wealth\n@jit\ndef uB(tb):\n return B*u(tb)\n\n#Reward function depends on the housing and non-housing consumption\n@jit\ndef R(x,a):\n '''\n Input:\n x = [w,n,m,s,e,o]\n x = [0,1,2,3,4,5]\n a = [c,b,k,h,action]\n a = [0,1,2,3,4]\n '''\n c = a[:,0]\n h = a[:,3]\n C = jnp.power(c, alpha) * jnp.power(h, 1-alpha)\n return u(C)\n\n\n# pc*qc / (ph*qh) = alpha/(1-alpha)\n@partial(jit, static_argnums=(0,))\ndef feasibleActions(t, x):\n # owner\n sell = As[:,2]\n payment = (x[2] > 0)*(((t<=T_R)*tau_L + (t>T_R)*tau_R)*x[2]*rh - m)\n \n# # if the agent is able to pay\n# if yAT(t,x) + x[0] + payment > 0:\n# sell = jnp.zeros(nA)\n# budget1 = yAT(t,x) + x[0] + (1-sell)*payment\n# # if the agent is not able to pay (force sell)\n# else:\n# sell = jnp.ones(nA)\n# budget1 = yAT(t,x) + x[0] + sell*(H*pt - x[2] - c_s)\n \n sell = (yAT(t,x) + x[0] + payment > 0)*jnp.zeros(nA) + (yAT(t,x) + x[0] + payment <= 0)*jnp.ones(nA)\n budget1 = yAT(t,x) + x[0] + (1-sell)*payment + sell*(H*pt - x[2] - c_s)\n \n # last term is the tax deduction of the interest portion of mortgage payment \n h = jnp.ones(nA)*H*(1+kappa)*(1-sell) + sell*jnp.clip(budget1*As[:,0]*(1-alpha)/pr, a_max = Rl)\n c = budget1*As[:,0]*(1-sell) + sell*(budget1*As[:,0] - h*pr)\n budget2 = budget1*(1-As[:,0])\n k = budget2*As[:,1]*(1-Kc)\n b = budget2*(1-As[:,1])\n owner_action = jnp.column_stack((c,b,k,h,sell)) \n \n \n # renter\n buy = As[:,2]*(t < 30)\n budget1 = yAT(t,x) + x[0] - buy*(H*pt*0.2 + c_h)\n h = jnp.clip(budget1*As[:,0]*(1-alpha)/pr, a_max = Rl)*(1-buy) + buy*jnp.ones(nA)*H*(1+kappa)\n c = (budget1*As[:,0] - h*pr)*(1-buy) + buy*budget1*As[:,0]\n budget2 = budget1*(1-As[:,0])\n k = budget2*As[:,1]*(1-Kc)\n b = budget2*(1-As[:,1])\n renter_action = jnp.column_stack((c,b,k,h,buy))\n \n actions = x[5]*owner_action + (1-x[5])*renter_action\n return actions\n\n@partial(jit, static_argnums=(0,))\ndef transition(t,a,x):\n '''\n Input:\n x = [w,n,m,s,e,o]\n x = [0,1,2,3,4,5]\n a = [c,b,k,h,action]\n a = [0,1,2,3,4]\n Output:\n w_next\n n_next\n m_next\n s_next\n e_next\n o_next\n \n prob_next\n '''\n nA = a.shape[0]\n s = jnp.array(x[3], dtype = jnp.int8)\n e = jnp.array(x[4], dtype = jnp.int8)\n # actions taken\n b = a[:,1]\n k = a[:,2]\n action = a[:,4]\n w_next = ((1+r_b[s])*b + jnp.outer(k,(1+r_k)).T).T.flatten().repeat(nE)\n n_next = gn(t, x)*jnp.ones(w_next.size)\n s_next = jnp.tile(jnp.arange(nS),nA).repeat(nE)\n e_next = jnp.column_stack((e.repeat(nA*nS),(1-e).repeat(nA*nS))).flatten()\n # job status changing probability and econ state transition probability\n pe = Pe[s, e]\n ps = jnp.tile(Ps[s], nA)\n prob_next = jnp.column_stack(((1-pe)*ps,pe*ps)).flatten()\n \n # owner\n m_next_own = ((1-action)*jnp.clip(x[2]*(1+rh) - m, a_min = 0)).repeat(nS*nE)\n o_next_own = (x[5] - action).repeat(nS*nE)\n # renter\n m_next_rent = (action*H*pt*0.8).repeat(nS*nE)\n o_next_rent = action.repeat(nS*nE)\n \n m_next = x[5] * m_next_own + (1-x[5]) * m_next_rent\n o_next = x[5] * o_next_own + (1-x[5]) * o_next_rent \n return jnp.column_stack((w_next,n_next,m_next,s_next,e_next,o_next,prob_next))\n\n# used to calculate dot product\n@jit\ndef dotProduct(p_next, uBTB):\n return (p_next*uBTB).reshape((p_next.shape[0]//(nS*nE), (nS*nE))).sum(axis = 1)\n\n# define approximation of fit\n@jit\ndef fit(v, xp):\n return map_coordinates(v,jnp.vstack((xp[:,0]/scaleW,\n xp[:,1]/scaleN,\n xp[:,2]/scaleM,\n xp[:,3],\n xp[:,4],\n xp[:,5])),\n order = 1, mode = 'nearest')\n\n@partial(jit, static_argnums=(0,))\ndef V(t,V_next,x):\n '''\n x = [w,n,m,s,e,o]\n x = [0,1,2,3,4,5]\n xp:\n w_next 0\n n_next 1\n m_next 2\n s_next 3\n e_next 4\n o_next 5\n prob_next 6\n '''\n actions = feasibleActions(t,x)\n xp = transition(t,actions,x)\n # bequeath utility\n TB = xp[:,0]+x[1]*(1+r_bar)+xp[:,5]*(H*pt-x[2]*(1+rh)-25)\n bequeathU = uB(TB)\n if t == T_max-1:\n Q = R(x,actions) + beta * dotProduct(xp[:,6], bequeathU)\n else:\n Q = R(x,actions) + beta * dotProduct(xp[:,6], Pa[t]*fit(V_next, xp) + (1-Pa[t])*bequeathU)\n Q = jnp.nan_to_num(Q, nan = -jnp.inf)\n v = Q.max()\n cbkha = actions[Q.argmax()]\n return v, cbkha\n\n\n# calculate the stationary distribution of econ state and employment state\nS_distribution = jnp.ones(nS)/nS\nfor _ in range(100):\n S_distribution = jnp.matmul(S_distribution, Ps)\n \n#P(0,1)\nP01 = jnp.dot(Pe[:,0],S_distribution)\n#P(1,0)\nP10 = jnp.dot(Pe[:,1],S_distribution)\njnp.array([[1-P01, P01],[P10, 1-P10]])\n\nE_distribution = jnp.ones(2)/2\nfor _ in range(100):\n E_distribution = jnp.matmul(E_distribution, jnp.array([[1-P01, P01],[P10, 1-P10]]))", "import numpy as np\nimport jax.numpy as jnp\nfrom jax.scipy.ndimage import map_coordinates\nimport warnings\nfrom jax import jit, partial, random, vmap\nfrom tqdm import tqdm\nwarnings.filterwarnings(\"ignore\")\nnp.printoptions(precision=2)\n\n# time line\nT_min = 0\nT_max = 60\nT_R = 45\n# discounting factor\nbeta = 1/(1+0.02)\n# utility function parameter \ngamma = 2\n# relative importance of housing consumption and non durable consumption \nalpha = 0.7\n# parameter used to calculate the housing consumption \nkappa = 0.3\n# uB associated parameter\nB = 2\n# social welfare after the unemployment\nwelfare = 20\n# tax rate before and after retirement\ntau_L = 0.2\ntau_R = 0.1\n# number of states S\nnS = 8\n# number of states e\nnE = 2\n# housing state\nnO = 2\n\n\n'''\n Economic state calibration \n'''\n\n# probability of survival\nPa = jnp.array(np.load(\"constant/prob.npy\"))\n# deterministic income\ndetEarning = jnp.array(np.load(\"constant/detEarningHigh.npy\"))\n# rescale the deterministic income\ndetEarning = detEarning \n####################################################################################### high skill feature\ndetEarning = jnp.concatenate([detEarning[:46]*1.2, detEarning[46:]-30])\n# Define transition matrix of economical states S\nPs = np.genfromtxt('constant/Ps.csv',delimiter=',')\nfix = (np.sum(Ps, axis = 1) - 1)\nfor i in range(nS):\n for j in range(nS):\n if Ps[i,j] - fix[i] > 0:\n Ps[i,j] = Ps[i,j] - fix[i]\n break\nPs = jnp.array(Ps)\n# The possible GDP growth, stock return, bond return\ngkfe = np.genfromtxt('constant/gkfe.csv',delimiter=',')\ngkfe = jnp.array(gkfe)\n# GDP growth depending on current S state\ngGDP = gkfe[:,0]/100\n# risk free interest rate depending on current S state \nr_b = gkfe[:,1]/100\n# stock return depending on current S state\nr_k = gkfe[:,2]/100\n# unemployment rate depending on current S state \nPe = gkfe[:,7:]/100\nPe = Pe[:,::-1]\n\n'''\n 401k related constants\n'''\n# some variables associated with 401k amount\nr_bar = 0.02\nPa = Pa[:T_max]\nNt = [np.sum(Pa[t:]) for t in range(T_min,T_max)]\n#Factor used to calculate the withdraw amount \nDn = [(r_bar*(1+r_bar)**N)/((1+r_bar)**N - 1) for N in Nt]\nDn[-1] = 1\nDn = jnp.array(Dn)\n# income fraction goes into 401k \nyi = 0.04\n\n\n\n'''\n housing related constants\n'''\n# variable associated with housing and mortgage \n# mortgage rate \nrh = 0.045\n# housing unit\nH = 1000\n# rent unit\nRl = 500\n# housing price constant \npt = 2*250/1000\n# 30k rent 1000 sf\npr = 2*10/1000 * 2 \n# constant cost \nc_h = 5\nc_s = H*pt*0.4\n# Dm is used to update the mortgage payment\nDm = [(1+rh) - rh*(1+rh)**(T_max - t)/((1+rh)**(T_max-t)-1) for t in range(T_min, T_max)]\nDm[-1] = 0\nDm = jnp.array(Dm)\n\n# 30 year mortgage\nMs = []\nM = H*pt*0.8\nm = M*(1+rh) - Dm[30]*M\nfor i in range(30, T_max):\n M = M*(1+rh) - m\n Ms.append(M)\nMs[-1] = 0\nMs = jnp.array(Ms)\n\n############################################################################################################ high skill feature \n# stock transaction fee\nKc = 0\n\n\n'''\n Discretize the state space\n Discretize the action space \n'''\n# actions dicretization(hp, cp, kp)\nnumGrid = 20\nAs = np.array(np.meshgrid(np.linspace(0.001,0.999,numGrid), np.linspace(0,1,numGrid), [0,1])).T.reshape(-1,3)\nAs = jnp.array(As)\n# wealth discretization \nws = np.linspace(0, 400, 20)\nns = np.linspace(0, 300, 10)\nms = np.linspace(0, 0.8*H*pt, 10)\n# scales associated with discretization\nscaleW = ws.max()/ws.size\nscaleN = ns.max()/ns.size\nscaleM = ms.max()/ms.size\n\n# dimentions of the state\ndim = (ws.size, ns.size, ms.size, nS, nE, nO)\ndimSize = len(dim)\n\nxgrid = np.array([[w,n,m,s,e,o] for w in ws\n for n in ns\n for m in ms\n for s in range(nS)\n for e in range(nE)\n for o in range(nO)]).reshape(dim + (dimSize,))\n\nXs = xgrid.reshape((np.prod(dim),dimSize))\nXs = jnp.array(Xs)\n\nVgrid = np.zeros(dim + (T_max,))\ncgrid = np.zeros(dim + (T_max,))\nbgrid = np.zeros(dim + (T_max,))\nkgrid = np.zeros(dim + (T_max,))\nhgrid = np.zeros(dim + (T_max,))\nagrid = np.zeros(dim + (T_max,))\n\n# start of function definitions\nnX = Xs.shape[0]\nnA = As.shape[0]\n\n#Define the earning function, which applies for both employment status and 8 econ states\n@partial(jit, static_argnums=(0,))\ndef y(t, x):\n '''\n x = [w,n,m,s,e,o]\n x = [0,1,2,3,4,5]\n '''\n if t <= T_R:\n return detEarning[t] * (1+gGDP[jnp.array(x[3], dtype = jnp.int8)]) * x[4] + (1-x[4]) * welfare\n else:\n return detEarning[-1]\n \n#Earning after tax and fixed by transaction in and out from 401k account \n@partial(jit, static_argnums=(0,))\ndef yAT(t,x):\n yt = y(t, x)\n if t <= T_R:\n # yi portion of the income will be put into the 401k if employed\n return (1-tau_L)*(yt * (1-yi))*x[4] + (1-x[4])*yt\n else:\n # t > T_R, n/discounting amount will be withdraw from the 401k \n return (1-tau_R)*yt + x[1]*Dn[t]\n \n#Define the evolution of the amount in 401k account \n@partial(jit, static_argnums=(0,))\ndef gn(t, x, r = r_bar):\n if t <= T_R:\n # if the person is employed, then yi portion of his income goes into 401k \n n_cur = x[1] + y(t, x) * yi * x[4]\n else:\n # t > T_R, n*Dn amount will be withdraw from the 401k \n n_cur = x[1] - x[1]*Dn[t]\n # the 401 grow with the rate r \n return (1+r)*n_cur\n\n#Define the utility function\n@jit\ndef u(c):\n return (jnp.log(c)/jnp.log(c))*(jnp.power(c, 1-gamma) - 1)/(1 - gamma)\n\n#Define the bequeath function, which is a function of bequeath wealth\n@jit\ndef uB(tb):\n return B*u(tb)\n\n#Reward function depends on the housing and non-housing consumption\n@jit\ndef R(x,a):\n '''\n Input:\n x = [w,n,m,s,e,o]\n x = [0,1,2,3,4,5]\n a = [c,b,k,h,action]\n a = [0,1,2,3,4]\n '''\n c = a[:,0]\n h = a[:,3]\n C = jnp.power(c, alpha) * jnp.power(h, 1-alpha)\n return u(C)\n\n\n# pc*qc / (ph*qh) = alpha/(1-alpha)\n@partial(jit, static_argnums=(0,))\ndef feasibleActions(t, x):\n # owner\n sell = As[:,2]\n payment = (x[2] > 0)*(((t<=T_R)*tau_L + (t>T_R)*tau_R)*x[2]*rh - m)\n \n# # if the agent is able to pay\n# if yAT(t,x) + x[0] + payment > 0:\n# sell = jnp.zeros(nA)\n# budget1 = yAT(t,x) + x[0] + (1-sell)*payment\n# # if the agent is not able to pay (force sell)\n# else:\n# sell = jnp.ones(nA)\n# budget1 = yAT(t,x) + x[0] + sell*(H*pt - x[2] - c_s)\n \n sell = (yAT(t,x) + x[0] + payment > 0)*jnp.zeros(nA) + (yAT(t,x) + x[0] + payment <= 0)*jnp.ones(nA)\n budget1 = yAT(t,x) + x[0] + (1-sell)*payment + sell*(H*pt - x[2] - c_s)\n \n # last term is the tax deduction of the interest portion of mortgage payment \n h = jnp.ones(nA)*H*(1+kappa)*(1-sell) + sell*jnp.clip(budget1*As[:,0]*(1-alpha)/pr, a_max = Rl)\n c = budget1*As[:,0]*(1-sell) + sell*(budget1*As[:,0] - h*pr)\n budget2 = budget1*(1-As[:,0])\n k = budget2*As[:,1]*(1-Kc)\n b = budget2*(1-As[:,1])\n owner_action = jnp.column_stack((c,b,k,h,sell)) \n \n \n # renter\n buy = As[:,2]*(t < 30)\n budget1 = yAT(t,x) + x[0] - buy*(H*pt*0.2 + c_h)\n h = jnp.clip(budget1*As[:,0]*(1-alpha)/pr, a_max = Rl)*(1-buy) + buy*jnp.ones(nA)*H*(1+kappa)\n c = (budget1*As[:,0] - h*pr)*(1-buy) + buy*budget1*As[:,0]\n budget2 = budget1*(1-As[:,0])\n k = budget2*As[:,1]*(1-Kc)\n b = budget2*(1-As[:,1])\n renter_action = jnp.column_stack((c,b,k,h,buy))\n \n actions = x[5]*owner_action + (1-x[5])*renter_action\n return actions\n\n@partial(jit, static_argnums=(0,))\ndef transition(t,a,x):\n '''\n Input:\n x = [w,n,m,s,e,o]\n x = [0,1,2,3,4,5]\n a = [c,b,k,h,action]\n a = [0,1,2,3,4]\n Output:\n w_next\n n_next\n m_next\n s_next\n e_next\n o_next\n \n prob_next\n '''\n nA = a.shape[0]\n s = jnp.array(x[3], dtype = jnp.int8)\n e = jnp.array(x[4], dtype = jnp.int8)\n # actions taken\n b = a[:,1]\n k = a[:,2]\n action = a[:,4]\n w_next = ((1+r_b[s])*b + jnp.outer(k,(1+r_k)).T).T.flatten().repeat(nE)\n n_next = gn(t, x)*jnp.ones(w_next.size)\n s_next = jnp.tile(jnp.arange(nS),nA).repeat(nE)\n e_next = jnp.column_stack((e.repeat(nA*nS),(1-e).repeat(nA*nS))).flatten()\n # job status changing probability and econ state transition probability\n pe = Pe[s, e]\n ps = jnp.tile(Ps[s], nA)\n prob_next = jnp.column_stack(((1-pe)*ps,pe*ps)).flatten()\n \n # owner\n m_next_own = ((1-action)*jnp.clip(x[2]*(1+rh) - m, a_min = 0)).repeat(nS*nE)\n o_next_own = (x[5] - action).repeat(nS*nE)\n # renter\n m_next_rent = (action*H*pt*0.8).repeat(nS*nE)\n o_next_rent = action.repeat(nS*nE)\n \n m_next = x[5] * m_next_own + (1-x[5]) * m_next_rent\n o_next = x[5] * o_next_own + (1-x[5]) * o_next_rent \n return jnp.column_stack((w_next,n_next,m_next,s_next,e_next,o_next,prob_next))\n\n# used to calculate dot product\n@jit\ndef dotProduct(p_next, uBTB):\n return (p_next*uBTB).reshape((p_next.shape[0]//(nS*nE), (nS*nE))).sum(axis = 1)\n\n# define approximation of fit\n@jit\ndef fit(v, xp):\n return map_coordinates(v,jnp.vstack((xp[:,0]/scaleW,\n xp[:,1]/scaleN,\n xp[:,2]/scaleM,\n xp[:,3],\n xp[:,4],\n xp[:,5])),\n order = 1, mode = 'nearest')\n\n@partial(jit, static_argnums=(0,))\ndef V(t,V_next,x):\n '''\n x = [w,n,m,s,e,o]\n x = [0,1,2,3,4,5]\n xp:\n w_next 0\n n_next 1\n m_next 2\n s_next 3\n e_next 4\n o_next 5\n prob_next 6\n '''\n actions = feasibleActions(t,x)\n xp = transition(t,actions,x)\n # bequeath utility\n TB = xp[:,0]+x[1]*(1+r_bar)+xp[:,5]*(H*pt-x[2]*(1+rh)-25)\n bequeathU = uB(TB)\n if t == T_max-1:\n Q = R(x,actions) + beta * dotProduct(xp[:,6], bequeathU)\n else:\n Q = R(x,actions) + beta * dotProduct(xp[:,6], Pa[t]*fit(V_next, xp) + (1-Pa[t])*bequeathU)\n Q = jnp.nan_to_num(Q, nan = -jnp.inf)\n v = Q.max()\n cbkha = actions[Q.argmax()]\n return v, cbkha\n\n\n# calculate the stationary distribution of econ state and employment state\nS_distribution = jnp.ones(nS)/nS\nfor _ in range(100):\n S_distribution = jnp.matmul(S_distribution, Ps)\n \n#P(0,1)\nP01 = jnp.dot(Pe[:,0],S_distribution)\n#P(1,0)\nP10 = jnp.dot(Pe[:,1],S_distribution)\njnp.array([[1-P01, P01],[P10, 1-P10]])\n\nE_distribution = jnp.ones(2)/2\nfor _ in range(100):\n E_distribution = jnp.matmul(E_distribution, jnp.array([[1-P01, P01],[P10, 1-P10]]))\n \n \n# ############################################################################################# solving the model\n# for t in tqdm(range(T_max-1,T_min-1, -1)):\n# if t == T_max-1:\n# v,cbkha = vmap(partial(V,t,Vgrid[:,:,:,:,:,:,t]))(Xs)\n# else:\n# v,cbkha = vmap(partial(V,t,Vgrid[:,:,:,:,:,:,t+1]))(Xs)\n# Vgrid[:,:,:,:,:,:,t] = v.reshape(dim)\n# cgrid[:,:,:,:,:,:,t] = cbkha[:,0].reshape(dim)\n# bgrid[:,:,:,:,:,:,t] = cbkha[:,1].reshape(dim)\n# kgrid[:,:,:,:,:,:,t] = cbkha[:,2].reshape(dim)\n# hgrid[:,:,:,:,:,:,t] = cbkha[:,3].reshape(dim)\n# agrid[:,:,:,:,:,:,t] = cbkha[:,4].reshape(dim)\n \n# np.save(\"HighSkillWorker2\",Vgrid)" ]
[ [ "numpy.load", "numpy.array", "numpy.printoptions" ], [ "numpy.printoptions", "numpy.linspace", "numpy.genfromtxt", "numpy.prod", "numpy.load", "numpy.zeros", "numpy.sum" ], [ "numpy.printoptions", "numpy.linspace", "numpy.genfromtxt", "numpy.prod", "numpy.load", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rift-labs-developer/colour
[ "15112dbe824aab0f21447e0db4a046a28a06f43a", "15112dbe824aab0f21447e0db4a046a28a06f43a", "15112dbe824aab0f21447e0db4a046a28a06f43a", "15112dbe824aab0f21447e0db4a046a28a06f43a", "15112dbe824aab0f21447e0db4a046a28a06f43a", "15112dbe824aab0f21447e0db4a046a28a06f43a", "15112dbe824aab0f21447e0db4a046a28a06f43a" ]
[ "colour/quality/tm3018.py", "colour/colorimetry/tests/test_luminance.py", "colour/io/luts/tests/test__init__.py", "colour/contrast/barten1999.py", "colour/appearance/hunt.py", "colour/models/rgb/transfer_functions/rimm_romm_rgb.py", "colour/temperature/tests/test_krystek1985.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nANSI/IES TM-30-18 Colour Fidelity Index\n=======================================\n\nDefines the *ANSI/IES TM-30-18 Colour Fidelity Index* (CFI) computation\nobjects:\n\n- :class:`colour.quality.ColourQuality_Specification_ANSIIESTM3018`\n- :func:`colour.quality.colour_fidelity_index_ANSIIESTM3018`\n\nReferences\n----------\n- :cite:`ANSI2018` : ANSI, & IES Color Committee. (2018). ANSI/IES TM-30-18 -\n IES Method for Evaluating Light Source Color Rendition.\n ISBN:978-0-87995-379-9\n\"\"\"\n\nimport numpy as np\nfrom collections import namedtuple\n\nfrom colour.quality import colour_fidelity_index_CIE2017\nfrom colour.quality.cfi2017 import delta_E_to_R_f\nfrom colour.utilities import as_float_array, as_int\n\n\nclass ColourQuality_Specification_ANSIIESTM3018(\n namedtuple(\n 'ColourQuality_Specification_ANSIIESTM3018',\n ('name', 'sd_test', 'sd_reference', 'R_f', 'R_s', 'CCT', 'D_uv',\n 'colorimetry_data', 'R_g', 'bins', 'averages_test',\n 'averages_reference', 'average_norms', 'R_fs', 'R_cs', 'R_hs'))):\n \"\"\"\n Defines the *ANSI/IES TM-30-18 Colour Fidelity Index* (CFI) colour quality\n specification.\n\n Parameters\n ----------\n name : unicode\n Name of the test spectral distribution.\n sd_test : SpectralDistribution\n Spectral distribution of the tested illuminant.\n sd_reference : SpectralDistribution\n Spectral distribution of the reference illuminant.\n R_f : numeric\n *Colour Fidelity Index* (CFI) :math:`R_f`.\n R_s : list\n Individual *colour fidelity indexes* data for each sample.\n CCT : numeric\n Correlated colour temperature :math:`T_{cp}`.\n D_uv : numeric\n Distance from the Planckian locus :math:`\\\\Delta_{uv}`.\n colorimetry_data : tuple\n Colorimetry data for the test and reference computations.\n bins : list of list of int\n List of 16 lists, each containing the indexes of colour samples that\n lie in the respective hue bin.\n averages_test : ndarray, (16, 2)\n Averages of *CAM02-UCS* a', b' coordinates for each hue bin for test\n samples.\n averages_reference : ndarray, (16, 2)\n Averages for reference samples.\n average_norms : ndarray, (16,)\n Distance of averages for reference samples from the origin.\n R_fs : ndarray, (16,)\n Local colour fidelities for each hue bin.\n R_cs : ndarray, (16,)\n Local chromaticity shifts for each hue bin, in percents.\n R_hs : ndarray, (16,)\n Local hue shifts for each hue bin.\n \"\"\"\n\n\ndef colour_fidelity_index_ANSIIESTM3018(sd_test, additional_data=False):\n \"\"\"\n Returns the *ANSI/IES TM-30-18 Colour Fidelity Index* (CFI) :math:`R_f`\n of given spectral distribution.\n\n Parameters\n ----------\n sd_test : SpectralDistribution\n Test spectral distribution.\n additional_data : bool, optional\n Whether to output additional data.\n\n Returns\n -------\n numeric or ColourQuality_Specification_ANSIIESTM3018\n *ANSI/IES TM-30-18 Colour Fidelity Index* (CFI).\n\n References\n ----------\n :cite:`ANSI2018`\n\n Examples\n --------\n >>> from colour import SDS_ILLUMINANTS\n >>> sd = SDS_ILLUMINANTS['FL2']\n >>> colour_fidelity_index_ANSIIESTM3018(sd) # doctest: +ELLIPSIS\n 70.1208254...\n \"\"\"\n\n if not additional_data:\n return colour_fidelity_index_CIE2017(sd_test, False)\n\n specification = colour_fidelity_index_CIE2017(sd_test, True)\n\n # Setup bins based on where the reference a'b' points are located.\n bins = [[] for _i in range(16)]\n for i, sample in enumerate(specification.colorimetry_data[1]):\n bin_index = as_int(np.floor(sample.CAM.h / 22.5))\n bins[bin_index].append(i)\n\n # Per-bin a'b' averages.\n averages_test = np.empty([16, 2])\n averages_reference = np.empty([16, 2])\n for i in range(16):\n apbp_s = [\n specification.colorimetry_data[0][j].Jpapbp[[1, 2]]\n for j in bins[i]\n ]\n averages_test[i, :] = np.mean(apbp_s, axis=0)\n apbp_s = [\n specification.colorimetry_data[1][j].Jpapbp[[1, 2]]\n for j in bins[i]\n ]\n averages_reference[i, :] = np.mean(apbp_s, axis=0)\n\n # Gamut Index.\n R_g = 100 * (\n averages_area(averages_test) / averages_area(averages_reference))\n\n # Local colour fidelity indexes, i.e. 16 CFIs for each bin.\n bin_delta_E_s = [\n np.mean([specification.delta_E_s[bins[i]]]) for i in range(16)\n ]\n R_fs = delta_E_to_R_f(as_float_array(bin_delta_E_s))\n\n # Angles bisecting the hue bins.\n angles = (22.5 * np.arange(16) + 11.25) / 180 * np.pi\n cosines = np.cos(angles)\n sines = np.sin(angles)\n\n average_norms = np.linalg.norm(averages_reference, axis=1)\n a_deltas = averages_test[:, 0] - averages_reference[:, 0]\n b_deltas = averages_test[:, 1] - averages_reference[:, 1]\n\n # Local chromaticity shifts, multiplied by 100 to obtain percentages.\n R_cs = 100 * (a_deltas * cosines + b_deltas * sines) / average_norms\n\n # Local hue shifts.\n R_hs = (-a_deltas * sines + b_deltas * cosines) / average_norms\n\n return ColourQuality_Specification_ANSIIESTM3018(\n specification.name, sd_test, specification.sd_reference,\n specification.R_f, specification.R_s, specification.CCT,\n specification.D_uv, specification.colorimetry_data, R_g, bins,\n averages_test, averages_reference, average_norms, R_fs, R_cs, R_hs)\n\n\ndef averages_area(averages):\n \"\"\"\n Computes the area of the polygon formed by the hue bin averages.\n\n Parameters\n ----------\n averages : array_like, (n, 2)\n Hue bin averages.\n\n Returns\n -------\n float\n Area of the polygon.\n \"\"\"\n\n N = averages.shape[0]\n\n triangle_areas = np.empty(N)\n for i in range(N):\n u = averages[i, :]\n v = averages[(i + 1) % N, :]\n triangle_areas[i] = (u[0] * v[1] - u[1] * v[0]) / 2\n\n return np.sum(triangle_areas)\n", "# -*- coding: utf-8 -*-\n\"\"\"\nDefines the unit tests for the :mod:`colour.colorimetry.luminance` module.\n\"\"\"\n\nimport numpy as np\nimport unittest\n\nfrom colour.colorimetry import (\n luminance_Newhall1943, intermediate_luminance_function_CIE1976,\n luminance_CIE1976, luminance_ASTMD1535, luminance_Fairchild2010,\n luminance_Fairchild2011, luminance_Abebe2017)\nfrom colour.colorimetry.luminance import luminance\nfrom colour.utilities import domain_range_scale, ignore_numpy_errors\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'TestLuminanceNewhall1943', 'TestLuminanceASTMD1535',\n 'TestIntermediateLuminanceFunctionCIE1976', 'TestLuminanceCIE1976',\n 'TestLuminanceFairchild2010', 'TestLuminanceFairchild2011',\n 'TestLuminanceAbebe2017', 'TestLuminance'\n]\n\n\nclass TestLuminanceNewhall1943(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.colorimetry.luminance.luminance_Newhall1943`\n definition unit tests methods.\n \"\"\"\n\n def test_luminance_Newhall1943(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`\n definition.\n \"\"\"\n\n self.assertAlmostEqual(\n luminance_Newhall1943(4.08244375), 12.550078816731881, places=7)\n\n self.assertAlmostEqual(\n luminance_Newhall1943(5.39132685), 23.481252371310738, places=7)\n\n self.assertAlmostEqual(\n luminance_Newhall1943(2.97619312), 6.4514266875601924, places=7)\n\n def test_n_dimensional_luminance_Newhall1943(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`\n definition n-dimensional arrays support.\n \"\"\"\n\n V = 4.08244375\n Y = luminance_Newhall1943(V)\n\n V = np.tile(V, 6)\n Y = np.tile(Y, 6)\n np.testing.assert_almost_equal(luminance_Newhall1943(V), Y, decimal=7)\n\n V = np.reshape(V, (2, 3))\n Y = np.reshape(Y, (2, 3))\n np.testing.assert_almost_equal(luminance_Newhall1943(V), Y, decimal=7)\n\n V = np.reshape(V, (2, 3, 1))\n Y = np.reshape(Y, (2, 3, 1))\n np.testing.assert_almost_equal(luminance_Newhall1943(V), Y, decimal=7)\n\n def test_domain_range_scale_luminance_Newhall1943(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`\n definition domain and range scale support.\n \"\"\"\n\n Y = luminance_Newhall1943(4.08244375)\n\n d_r = (('reference', 1, 1), (1, 0.1, 0.01), (100, 10, 1))\n for scale, factor_a, factor_b in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n luminance_Newhall1943(4.08244375 * factor_a),\n Y * factor_b,\n decimal=7)\n\n @ignore_numpy_errors\n def test_nan_luminance_Newhall1943(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`\n definition nan support.\n \"\"\"\n\n luminance_Newhall1943(\n np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nclass TestLuminanceASTMD1535(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.colorimetry.luminance.luminance_ASTMD1535`\n definition unit tests methods.\n \"\"\"\n\n def test_luminance_ASTMD1535(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`\n definition.\n \"\"\"\n\n self.assertAlmostEqual(\n luminance_ASTMD1535(4.08244375), 12.236342675366036, places=7)\n\n self.assertAlmostEqual(\n luminance_ASTMD1535(5.39132685), 22.893999867280378, places=7)\n\n self.assertAlmostEqual(\n luminance_ASTMD1535(2.97619312), 6.2902253509053132, places=7)\n\n def test_n_dimensional_luminance_ASTMD1535(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`\n definition n-dimensional arrays support.\n \"\"\"\n\n V = 4.08244375\n Y = luminance_ASTMD1535(V)\n\n V = np.tile(V, 6)\n Y = np.tile(Y, 6)\n np.testing.assert_almost_equal(luminance_ASTMD1535(V), Y, decimal=7)\n\n V = np.reshape(V, (2, 3))\n Y = np.reshape(Y, (2, 3))\n np.testing.assert_almost_equal(luminance_ASTMD1535(V), Y, decimal=7)\n\n V = np.reshape(V, (2, 3, 1))\n Y = np.reshape(Y, (2, 3, 1))\n np.testing.assert_almost_equal(luminance_ASTMD1535(V), Y, decimal=7)\n\n def test_domain_range_scale_luminance_ASTMD1535(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`\n definition domain and range scale support.\n \"\"\"\n\n Y = luminance_ASTMD1535(4.08244375)\n\n d_r = (('reference', 1, 1), (1, 0.1, 0.01), (100, 10, 1))\n for scale, factor_a, factor_b in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n luminance_ASTMD1535(4.08244375 * factor_a),\n Y * factor_b,\n decimal=7)\n\n @ignore_numpy_errors\n def test_nan_luminance_ASTMD1535(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`\n definition nan support.\n \"\"\"\n\n luminance_ASTMD1535(\n np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nclass TestIntermediateLuminanceFunctionCIE1976(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.colorimetry.luminance.\\\nintermediate_luminance_function_CIE1976` definition unit tests methods.\n \"\"\"\n\n def test_intermediate_luminance_function_CIE1976(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.\\\nintermediate_luminance_function_CIE1976` definition.\n \"\"\"\n\n self.assertAlmostEqual(\n intermediate_luminance_function_CIE1976(0.495929964178047),\n 12.197225350000002,\n places=7)\n\n self.assertAlmostEqual(\n intermediate_luminance_function_CIE1976(0.613072093530391),\n 23.042767810000004,\n places=7)\n\n self.assertAlmostEqual(\n intermediate_luminance_function_CIE1976(0.394876333449113),\n 6.157200790000001,\n places=7)\n\n def test_n_dimensional_intermediate_luminance_function_CIE1976(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.\\\nintermediate_luminance_function_CIE1976` definition n-dimensional arrays\nsupport.\n \"\"\"\n\n f_Y_Y_n = 0.495929964178047\n Y = intermediate_luminance_function_CIE1976(f_Y_Y_n)\n\n f_Y_Y_n = np.tile(f_Y_Y_n, 6)\n Y = np.tile(Y, 6)\n np.testing.assert_almost_equal(\n intermediate_luminance_function_CIE1976(f_Y_Y_n), Y, decimal=7)\n\n f_Y_Y_n = np.reshape(f_Y_Y_n, (2, 3))\n Y = np.reshape(Y, (2, 3))\n np.testing.assert_almost_equal(\n intermediate_luminance_function_CIE1976(f_Y_Y_n), Y, decimal=7)\n\n f_Y_Y_n = np.reshape(f_Y_Y_n, (2, 3, 1))\n Y = np.reshape(Y, (2, 3, 1))\n np.testing.assert_almost_equal(\n intermediate_luminance_function_CIE1976(f_Y_Y_n), Y, decimal=7)\n\n def test_domain_range_scale_intermediate_luminance_function_CIE1976(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.\\\nintermediate_luminance_function_CIE1976` definition domain and range scale\nsupport.\n \"\"\"\n\n Y = intermediate_luminance_function_CIE1976(41.527875844653451, 100)\n\n for scale in ('reference', 1, 100):\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n intermediate_luminance_function_CIE1976(\n 41.527875844653451, 100),\n Y,\n decimal=7)\n\n @ignore_numpy_errors\n def test_nan_intermediate_luminance_function_CIE1976(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.\\\nintermediate_luminance_function_CIE1976` definition nan support.\n \"\"\"\n\n intermediate_luminance_function_CIE1976(\n np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nclass TestLuminanceCIE1976(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.colorimetry.luminance.luminance_CIE1976` definition\n unit tests methods.\n \"\"\"\n\n def test_luminance_CIE1976(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`\n definition.\n \"\"\"\n\n self.assertAlmostEqual(\n luminance_CIE1976(41.527875844653451),\n 12.197225350000002,\n places=7)\n\n self.assertAlmostEqual(\n luminance_CIE1976(55.116362849525402),\n 23.042767810000004,\n places=7)\n\n self.assertAlmostEqual(\n luminance_CIE1976(29.805654680097106), 6.157200790000001, places=7)\n\n self.assertAlmostEqual(\n luminance_CIE1976(56.480581732417676, 50),\n 12.197225349999998,\n places=7)\n\n self.assertAlmostEqual(\n luminance_CIE1976(47.317620274162735, 75),\n 12.197225350000002,\n places=7)\n\n self.assertAlmostEqual(\n luminance_CIE1976(42.519930728120940, 95),\n 12.197225350000005,\n places=7)\n\n def test_n_dimensional_luminance_CIE1976(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`\n definition n-dimensional arrays support.\n \"\"\"\n\n L_star = 41.527875844653451\n Y = luminance_CIE1976(L_star)\n\n L_star = np.tile(L_star, 6)\n Y = np.tile(Y, 6)\n np.testing.assert_almost_equal(luminance_CIE1976(L_star), Y, decimal=7)\n\n L_star = np.reshape(L_star, (2, 3))\n Y = np.reshape(Y, (2, 3))\n np.testing.assert_almost_equal(luminance_CIE1976(L_star), Y, decimal=7)\n\n L_star = np.reshape(L_star, (2, 3, 1))\n Y = np.reshape(Y, (2, 3, 1))\n np.testing.assert_almost_equal(luminance_CIE1976(L_star), Y, decimal=7)\n\n def test_domain_range_scale_luminance_CIE1976(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`\n definition domain and range scale support.\n \"\"\"\n\n Y = luminance_CIE1976(41.527875844653451, 100)\n\n d_r = (('reference', 1), (1, 0.01), (100, 1))\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n luminance_CIE1976(41.527875844653451 * factor, 100),\n Y * factor,\n decimal=7)\n\n @ignore_numpy_errors\n def test_nan_luminance_CIE1976(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`\n definition nan support.\n \"\"\"\n\n luminance_CIE1976(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nclass TestLuminanceFairchild2010(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.colorimetry.luminance.luminance_Fairchild2010`\n definition unit tests methods.\n \"\"\"\n\n def test_luminance_Fairchild2010(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`\n definition.\n \"\"\"\n\n self.assertAlmostEqual(\n luminance_Fairchild2010(31.996390226262736),\n 0.12197225350000002,\n places=7)\n\n self.assertAlmostEqual(\n luminance_Fairchild2010(60.203153682783302),\n 0.23042767809999998,\n places=7)\n\n self.assertAlmostEqual(\n luminance_Fairchild2010(11.836517240976489),\n 0.06157200790000001,\n places=7)\n\n self.assertAlmostEqual(\n luminance_Fairchild2010(24.424283249379986, 2.75),\n 0.12197225350000002,\n places=7)\n\n self.assertAlmostEqual(\n luminance_Fairchild2010(100.019986327374240),\n 1008.00000024,\n places=7)\n\n self.assertAlmostEqual(\n luminance_Fairchild2010(100.019999997090270),\n 100799.92312466,\n places=7)\n\n def test_n_dimensional_luminance_Fairchild2010(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`\n definition n-dimensional arrays support.\n \"\"\"\n\n L_hdr = 31.996390226262736\n Y = luminance_Fairchild2010(L_hdr)\n\n L_hdr = np.tile(L_hdr, 6)\n Y = np.tile(Y, 6)\n np.testing.assert_almost_equal(\n luminance_Fairchild2010(L_hdr), Y, decimal=7)\n\n L_hdr = np.reshape(L_hdr, (2, 3))\n Y = np.reshape(Y, (2, 3))\n np.testing.assert_almost_equal(\n luminance_Fairchild2010(L_hdr), Y, decimal=7)\n\n L_hdr = np.reshape(L_hdr, (2, 3, 1))\n Y = np.reshape(Y, (2, 3, 1))\n np.testing.assert_almost_equal(\n luminance_Fairchild2010(L_hdr), Y, decimal=7)\n\n def test_domain_range_scale_luminance_Fairchild2010(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`\n definition domain and range scale support.\n \"\"\"\n\n Y = luminance_Fairchild2010(31.996390226262736)\n\n d_r = (('reference', 1, 1), (1, 0.01, 1), (100, 1, 100))\n for scale, factor_a, factor_b in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n luminance_Fairchild2010(31.996390226262736 * factor_a),\n Y * factor_b,\n decimal=7)\n\n @ignore_numpy_errors\n def test_nan_luminance_Fairchild2010(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`\n definition nan support.\n \"\"\"\n\n luminance_Fairchild2010(\n np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nclass TestLuminanceFairchild2011(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.colorimetry.luminance.luminance_Fairchild2011`\n definition unit tests methods.\n \"\"\"\n\n def test_luminance_Fairchild2011(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`\n definition.\n \"\"\"\n\n self.assertAlmostEqual(\n luminance_Fairchild2011(51.852958445912506),\n 0.12197225350000007,\n places=7)\n\n self.assertAlmostEqual(\n luminance_Fairchild2011(65.275207956353853),\n 0.23042767809999998,\n places=7)\n\n self.assertAlmostEqual(\n luminance_Fairchild2011(39.818935510715917),\n 0.061572007900000038,\n places=7)\n\n self.assertAlmostEqual(\n luminance_Fairchild2011(0.13268968410139345, 2.75),\n 0.12197225350000002,\n places=7)\n\n self.assertAlmostEqual(\n luminance_Fairchild2011(234.72925681957565),\n 1008.00000000,\n places=7)\n\n self.assertAlmostEqual(\n luminance_Fairchild2011(245.57059778237573),\n 100800.00000000,\n places=7)\n\n def test_n_dimensional_luminance_Fairchild2011(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`\n definition n-dimensional arrays support.\n \"\"\"\n\n L_hdr = 51.852958445912506\n Y = luminance_Fairchild2011(L_hdr)\n\n L_hdr = np.tile(L_hdr, 6)\n Y = np.tile(Y, 6)\n np.testing.assert_almost_equal(\n luminance_Fairchild2011(L_hdr), Y, decimal=7)\n\n L_hdr = np.reshape(L_hdr, (2, 3))\n Y = np.reshape(Y, (2, 3))\n np.testing.assert_almost_equal(\n luminance_Fairchild2011(L_hdr), Y, decimal=7)\n\n L_hdr = np.reshape(L_hdr, (2, 3, 1))\n Y = np.reshape(Y, (2, 3, 1))\n np.testing.assert_almost_equal(\n luminance_Fairchild2011(L_hdr), Y, decimal=7)\n\n def test_domain_range_scale_luminance_Fairchild2011(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`\n definition domain and range scale support.\n \"\"\"\n\n Y = luminance_Fairchild2011(26.459509817572265)\n\n d_r = (('reference', 1, 1), (1, 0.01, 1), (100, 1, 100))\n for scale, factor_a, factor_b in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n luminance_Fairchild2011(26.459509817572265 * factor_a),\n Y * factor_b,\n decimal=7)\n\n @ignore_numpy_errors\n def test_nan_luminance_Fairchild2011(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`\n definition nan support.\n \"\"\"\n\n luminance_Fairchild2011(\n np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nclass TestLuminanceAbebe2017(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.colorimetry.luminance.luminance_Abebe2017`\n definition unit tests methods.\n \"\"\"\n\n def test_luminance_Abebe2017(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`\n definition.\n \"\"\"\n\n self.assertAlmostEqual(\n luminance_Abebe2017(0.486955571109229),\n 12.197225350000004,\n places=7)\n\n self.assertAlmostEqual(\n luminance_Abebe2017(0.474544792145434, method='Stevens'),\n 12.197225350000025,\n places=7)\n\n self.assertAlmostEqual(\n luminance_Abebe2017(0.286847428534793, 1000),\n 12.197225350000046,\n places=7)\n\n self.assertAlmostEqual(\n luminance_Abebe2017(0.192145492588158, 4000),\n 12.197225350000121,\n places=7)\n\n self.assertAlmostEqual(\n luminance_Abebe2017(0.170365211220992, 4000, method='Stevens'),\n 12.197225349999933,\n places=7)\n\n def test_n_dimensional_luminance_Abebe2017(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`\n definition n-dimensional arrays support.\n \"\"\"\n\n L = 0.486955571109229\n Y = luminance_Abebe2017(L)\n\n L = np.tile(L, 6)\n Y = np.tile(Y, 6)\n np.testing.assert_almost_equal(luminance_Abebe2017(L), Y, decimal=7)\n\n L = np.reshape(L, (2, 3))\n Y = np.reshape(Y, (2, 3))\n np.testing.assert_almost_equal(luminance_Abebe2017(L), Y, decimal=7)\n\n L = np.reshape(L, (2, 3, 1))\n Y = np.reshape(Y, (2, 3, 1))\n np.testing.assert_almost_equal(luminance_Abebe2017(L), Y, decimal=7)\n\n def test_domain_range_scale_luminance_Abebe2017(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`\n definition domain and range scale support.\n \"\"\"\n\n L = luminance_Abebe2017(0.486955571109229)\n\n d_r = (('reference', 1), (1, 1), (100, 1))\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n luminance_Abebe2017(0.486955571109229 * factor,\n 100 * factor),\n L * factor,\n decimal=7)\n\n @ignore_numpy_errors\n def test_nan_luminance_Abebe2017(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`\n definition nan support.\n \"\"\"\n\n luminance_Abebe2017(\n *[np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])] * 2)\n\n\nclass TestLuminance(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.colorimetry.luminance.luminance` definition unit\n tests methods.\n \"\"\"\n\n def test_domain_range_scale_luminance(self):\n \"\"\"\n Tests :func:`colour.colorimetry.luminance.luminance` definition\n domain and range scale support.\n \"\"\"\n\n m = ('Newhall 1943', 'ASTM D1535', 'CIE 1976', 'Fairchild 2010',\n 'Fairchild 2011', 'Abebe 2017')\n v = [luminance(41.527875844653451, method, Y_n=100) for method in m]\n\n d_r = (('reference', 1), (1, 0.01), (100, 1))\n for method, value in zip(m, v):\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n luminance(\n 41.527875844653451 * factor, method, Y_n=100),\n value * factor,\n decimal=7)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# -*- coding: utf-8 -*-\n\"\"\"\nDefines the unit tests for the :mod:`colour.io.luts.__init__` module.\n\"\"\"\n\nimport numpy as np\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nfrom colour.io import LUTSequence, read_LUT, write_LUT\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['LUTS_DIRECTORY', 'TestReadLUT', 'TestWriteLUT']\n\nLUTS_DIRECTORY = os.path.join(os.path.dirname(__file__), 'resources')\n\n\nclass TestReadLUT(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.io.luts.__init__.read_LUT` definition unit tests\n methods.\n \"\"\"\n\n def test_read_LUT(self):\n \"\"\"\n Tests :func:`colour.io.luts.__init__.read_LUT` definition.\n \"\"\"\n\n LUT_1 = read_LUT(\n os.path.join(LUTS_DIRECTORY, 'sony_spi1d', 'eotf_sRGB_1D.spi1d'))\n\n np.testing.assert_almost_equal(\n LUT_1.table,\n np.array([\n -7.73990000e-03, 5.16000000e-04, 1.22181000e-02,\n 3.96819000e-02, 8.71438000e-02, 1.57439400e-01, 2.52950100e-01,\n 3.75757900e-01, 5.27729400e-01, 7.10566500e-01, 9.25840600e-01,\n 1.17501630e+00, 1.45946870e+00, 1.78049680e+00, 2.13933380e+00,\n 2.53715520e+00\n ]))\n self.assertEqual(LUT_1.name, 'eotf sRGB 1D')\n self.assertEqual(LUT_1.dimensions, 1)\n np.testing.assert_array_equal(LUT_1.domain, np.array([-0.1, 1.5]))\n self.assertEqual(LUT_1.size, 16)\n self.assertListEqual(\n LUT_1.comments,\n ['Generated by \"Colour 0.3.11\".', '\"colour.models.eotf_sRGB\".'])\n\n LUT_2 = read_LUT(\n os.path.join(LUTS_DIRECTORY, 'resolve_cube', 'LogC_Video.cube'))\n np.testing.assert_almost_equal(\n LUT_2[0].table,\n np.array([\n [0.00000000, 0.00000000, 0.00000000],\n [0.02708500, 0.02708500, 0.02708500],\n [0.06304900, 0.06304900, 0.06304900],\n [0.11314900, 0.11314900, 0.11314900],\n [0.18304900, 0.18304900, 0.18304900],\n [0.28981100, 0.28981100, 0.28981100],\n [0.41735300, 0.41735300, 0.41735300],\n [0.54523100, 0.54523100, 0.54523100],\n [0.67020500, 0.67020500, 0.67020500],\n [0.78963000, 0.78963000, 0.78963000],\n [0.88646800, 0.88646800, 0.88646800],\n [0.94549100, 0.94549100, 0.94549100],\n [0.97644900, 0.97644900, 0.97644900],\n [0.98924800, 0.98924800, 0.98924800],\n [0.99379700, 0.99379700, 0.99379700],\n [1.00000000, 1.00000000, 1.00000000],\n ]),\n )\n self.assertEqual(LUT_2[1].size, 4)\n\n def test_raise_exception_read_LUT(self):\n \"\"\"\n Tests :func:`colour.io.luts.__init__.read_LUT` definition raised\n exception.\n \"\"\"\n\n self.assertRaises(\n ValueError, read_LUT,\n os.path.join(LUTS_DIRECTORY, 'sony_spi1d',\n 'Exception_Raising.spi1d'))\n\n\nclass TestWriteLUT(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.io.luts.__init__.write_LUT` definition unit tests\n methods.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Initialises common tests attributes.\n \"\"\"\n\n self._temporary_directory = tempfile.mkdtemp()\n\n def tearDown(self):\n \"\"\"\n After tests actions.\n \"\"\"\n\n shutil.rmtree(self._temporary_directory)\n\n def test_write_LUT(self):\n \"\"\"\n Tests :func:`colour.io.luts.__init__.write_LUT` definition.\n \"\"\"\n\n LUT_1_r = read_LUT(\n os.path.join(LUTS_DIRECTORY, 'sony_spi1d', 'eotf_sRGB_1D.spi1d'))\n\n write_LUT(\n LUT_1_r,\n os.path.join(self._temporary_directory, 'eotf_sRGB_1D.spi1d'))\n\n LUT_1_t = read_LUT(\n os.path.join(self._temporary_directory, 'eotf_sRGB_1D.spi1d'))\n\n self.assertEqual(LUT_1_r, LUT_1_t)\n\n write_LUT(\n LUTSequence(LUT_1_r),\n os.path.join(self._temporary_directory, 'eotf_sRGB_1D.spi1d'))\n\n self.assertEqual(LUT_1_r, LUT_1_t)\n\n LUT_2_r = read_LUT(\n os.path.join(LUTS_DIRECTORY, 'resolve_cube',\n 'Three_Dimensional_Table_With_Shaper.cube'))\n\n write_LUT(\n LUT_2_r,\n os.path.join(self._temporary_directory,\n 'Three_Dimensional_Table_With_Shaper.cube'))\n\n LUT_2_t = read_LUT(\n os.path.join(self._temporary_directory,\n 'Three_Dimensional_Table_With_Shaper.cube'))\n\n self.assertEqual(LUT_2_r, LUT_2_t)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# -*- coding: utf-8 -*-\n\"\"\"\nBarten (1999) Contrast Sensitivity Function\n===========================================\n\nDefines the *Barten (1999)* contrast sensitivity function:\n\n- :func:`colour.contrast.contrast_sensitivity_function_Barten1999`\n\nReferences\n----------\n- :cite:`Barten1999` : Barten, P. G. (1999). Contrast Sensitivity of the\n Human Eye and Its Effects on Image Quality. SPIE. doi:10.1117/3.353254\n- :cite:`Barten2003` : Barten, P. G. J. (2003). Formula for the contrast\n sensitivity of the human eye. In Y. Miyake & D. R. Rasmussen (Eds.),\n Proceedings of SPIE (Vol. 5294, pp. 231-238). doi:10.1117/12.537476\n- :cite:`Cowan2004` : Cowan, M., Kennel, G., Maier, T., & Walker, B. (2004).\n Contrast Sensitivity Experiment to Determine the Bit Depth for Digital\n Cinema. SMPTE Motion Imaging Journal, 113(9), 281-292. doi:10.5594/j11549\n- :cite:`InternationalTelecommunicationUnion2015` : International\n Telecommunication Union. (2015). Report ITU-R BT.2246-4 - The present\n state of ultra-high definition television BT Series Broadcasting service\n (Vol. 5, pp. 1-92).\n https://www.itu.int/dms_pub/itu-r/opb/rep/R-REP-BT.2246-4-2015-PDF-E.pdf\n\"\"\"\n\nimport numpy as np\n\nfrom colour.utilities import as_float_array, as_float\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'optical_MTF_Barten1999', 'pupil_diameter_Barten1999', 'sigma_Barten1999',\n 'retinal_illuminance_Barten1999', 'maximum_angular_size_Barten1999',\n 'contrast_sensitivity_function_Barten1999'\n]\n\n\ndef optical_MTF_Barten1999(u, sigma=0.01):\n \"\"\"\n Returns the optical modulation transfer function (MTF) :math:`M_{opt}` of\n the eye using *Barten (1999)* method.\n\n Parameters\n ----------\n u : numeric or array_like\n Spatial frequency :math:`u`, the cycles per degree.\n sigma : numeric or array_like, optional\n Standard deviation :math:`\\\\sigma` of the line-spread function\n resulting from the convolution of the different elements of the\n convolution process.\n\n Returns\n -------\n numeric or array_like\n Optical modulation transfer function (MTF) :math:`M_{opt}` of the eye.\n\n References\n ----------\n :cite:`Barten1999`, :cite:`Barten2003`, :cite:`Cowan2004`,\n :cite:`InternationalTelecommunicationUnion2015`,\n\n Examples\n --------\n >>> optical_MTF_Barten1999(4, 0.01) # doctest: +ELLIPSIS\n 0.9689107...\n \"\"\"\n\n u = as_float_array(u)\n sigma = as_float_array(sigma)\n\n return as_float(np.exp(-2 * np.pi ** 2 * sigma ** 2 * u ** 2))\n\n\ndef pupil_diameter_Barten1999(L, X_0=60, Y_0=None):\n \"\"\"\n Returns the pupil diameter for given luminance and object or stimulus\n angular size using *Barten (1999)* method.\n\n Parameters\n ----------\n L : numeric or array_like\n Average luminance :math:`L` in :math:`cd/m^2`.\n X_0 : numeric or array_like, optional\n Angular size of the object :math:`X_0` in degrees in the x direction.\n Y_0 : numeric or array_like, optional\n Angular size of the object :math:`X_0` in degrees in the y direction.\n\n References\n ----------\n :cite:`Barten1999`, :cite:`Barten2003`, :cite:`Cowan2004`,\n :cite:`InternationalTelecommunicationUnion2015`,\n\n Examples\n --------\n >>> pupil_diameter_Barten1999(100, 60, 60) # doctest: +ELLIPSIS\n 2.0777571...\n \"\"\"\n\n L = as_float_array(L)\n X_0 = as_float_array(X_0)\n Y_0 = X_0 if Y_0 is None else as_float_array(Y_0)\n\n return as_float(5 - 3 * np.tanh(0.4 * np.log(L * X_0 * Y_0 / 40 ** 2)))\n\n\ndef sigma_Barten1999(sigma_0=0.5 / 60, C_ab=0.08 / 60, d=2.1):\n \"\"\"\n Returns the standard deviation :math:`\\\\sigma` of the line-spread function\n resulting from the convolution of the different elements of the convolution\n process using *Barten (1999)* method.\n\n The :math:`\\\\sigma` quantity depends on the pupil diameter :math:`d` of the\n eye lens. For very small pupil diameters, :math:`\\\\sigma` increases\n inversely proportionally with pupil size because of diffraction, and for\n large pupil diameters, :math:`\\\\sigma` increases about linearly with pupil\n size because of chromatic aberration and others aberrations.\n\n Parameters\n ----------\n sigma_0 : numeric or array_like, optional\n Constant :math:`\\\\sigma_{0}` in degrees.\n C_ab : numeric or array_like, optional\n Spherical aberration of the eye :math:`C_{ab}` in\n :math:`degrees\\\\div mm`.\n d : numeric or array_like, optional\n Pupil diameter :math:`d` in millimeters.\n\n Returns\n -------\n ndarray\n Standard deviation :math:`\\\\sigma` of the line-spread function\n resulting from the convolution of the different elements of the\n convolution process.\n\n Warnings\n --------\n This definition expects :math:`\\\\sigma_{0}` and :math:`C_{ab}` to be given\n in degrees and :math:`degrees\\\\div mm` respectively. However, in the\n literature, the values for :math:`\\\\sigma_{0}` and\n :math:`C_{ab}` are usually given in :math:`arc min` and\n :math:`arc min\\\\div mm` respectively, thus they need to be divided by 60.\n\n References\n ----------\n :cite:`Barten1999`, :cite:`Barten2003`, :cite:`Cowan2004`,\n :cite:`InternationalTelecommunicationUnion2015`,\n\n Examples\n --------\n >>> sigma_Barten1999(0.5 / 60, 0.08 / 60, 2.1) # doctest: +ELLIPSIS\n 0.0087911...\n \"\"\"\n\n sigma_0 = as_float_array(sigma_0)\n C_ab = as_float_array(C_ab)\n d = as_float_array(d)\n\n return as_float(np.sqrt(sigma_0 ** 2 + (C_ab * d) ** 2))\n\n\ndef retinal_illuminance_Barten1999(\n L, d=2.1, apply_stiles_crawford_effect_correction=True):\n \"\"\"\n Returns the retinal illuminance :math:`E` in Trolands for given average\n luminance :math:`L` and pupil diameter :math:`d` using *Barten (1999)*\n method.\n\n Parameters\n ----------\n L : numeric or array_like\n Average luminance :math:`L` in :math:`cd/m^2`.\n d : numeric or array_like, optional\n Pupil diameter :math:`d` in millimeters.\n apply_stiles_crawford_effect_correction : bool, optional\n Whether to apply the correction for *Stiles-Crawford* effect.\n\n Returns\n -------\n ndarray\n Retinal illuminance :math:`E` in Trolands.\n\n Notes\n -----\n - This definition is for use with photopic viewing conditions and thus\n corrects for the Stiles-Crawford effect by default, i.e. directional\n sensitivity of the cone cells with lower response of cone cells\n receiving light from the edge of the pupil.\n\n References\n ----------\n :cite:`Barten1999`, :cite:`Barten2003`, :cite:`Cowan2004`,\n :cite:`InternationalTelecommunicationUnion2015`,\n\n Examples\n --------\n >>> retinal_illuminance_Barten1999(100, 2.1) # doctest: +ELLIPSIS\n 330.4115803...\n >>> retinal_illuminance_Barten1999(100, 2.1, False) # doctest: +ELLIPSIS\n 346.3605900...\n \"\"\"\n\n d = as_float_array(d)\n L = as_float_array(L)\n\n E = (np.pi * d ** 2) / 4 * L\n\n if apply_stiles_crawford_effect_correction:\n E *= (1 - (d / 9.7) ** 2 + (d / 12.4) ** 4)\n\n return E\n\n\ndef maximum_angular_size_Barten1999(u, X_0=60, X_max=12, N_max=15):\n \"\"\"\n Returns the maximum angular size :math:`X` of the object considered using\n *Barten (1999)* method.\n\n Parameters\n ----------\n u : numeric\n Spatial frequency :math:`u`, the cycles per degree.\n X_0 : numeric or array_like, optional\n Angular size :math:`X_0` in degrees of the object in the x direction.\n X_max : numeric or array_like, optional\n Maximum angular size :math:`X_{max}` in degrees of the integration\n area in the x direction.\n N_max : numeric or array_like, optional\n Maximum number of cycles :math:`N_{max}` over which the eye can\n integrate the information.\n\n Returns\n -------\n numeric or ndarray\n Maximum angular size :math:`X` of the object considered.\n\n References\n ----------\n :cite:`Barten1999`, :cite:`Barten2003`, :cite:`Cowan2004`,\n :cite:`InternationalTelecommunicationUnion2015`,\n\n Examples\n --------\n >>> maximum_angular_size_Barten1999(4) # doctest: +ELLIPSIS\n 3.5729480...\n \"\"\"\n\n u = as_float_array(u)\n X_0 = as_float_array(X_0)\n X_max = as_float_array(X_max)\n N_max = as_float_array(N_max)\n\n return (1 / X_0 ** 2 + 1 / X_max ** 2 + u ** 2 / N_max ** 2) ** -0.5\n\n\ndef contrast_sensitivity_function_Barten1999(u,\n sigma=sigma_Barten1999(\n 0.5 / 60, 0.08 / 60, 2.1),\n k=3.0,\n T=0.1,\n X_0=60,\n Y_0=None,\n X_max=12,\n Y_max=None,\n N_max=15,\n n=0.03,\n p=1.2274 * 10 ** 6,\n E=retinal_illuminance_Barten1999(\n 20, 2.1),\n phi_0=3 * 10 ** -8,\n u_0=7):\n \"\"\"\n Returns the contrast sensitivity :math:`S` of the human eye according to\n the contrast sensitivity function (CSF) described by *Barten (1999)*.\n\n Contrast sensitivity is defined as the inverse of the modulation threshold\n of a sinusoidal luminance pattern. The modulation threshold of this pattern\n is generally defined by 50% probability of detection. The contrast\n sensitivity function or CSF gives the contrast sensitivity as a function of\n spatial frequency. In the CSF, the spatial frequency is expressed in\n angular units with respect to the eye. It reaches a maximum between 1 and\n 10 cycles per degree with a fall off at higher and lower spatial\n frequencies.\n\n Parameters\n ----------\n u : numeric\n Spatial frequency :math:`u`, the cycles per degree.\n sigma : numeric or array_like, optional\n Standard deviation :math:`\\\\sigma` of the line-spread function\n resulting from the convolution of the different elements of the\n convolution process.\n k : numeric or array_like, optional\n Signal-to-noise (SNR) ratio :math:`k`.\n T : numeric or array_like, optional\n Integration time :math:`T` in seconds of the eye.\n X_0 : numeric or array_like, optional\n Angular size :math:`X_0` in degrees of the object in the x direction.\n Y_0 : numeric or array_like, optional\n Angular size :math:`Y_0` in degrees of the object in the y direction.\n X_max : numeric or array_like, optional\n Maximum angular size :math:`X_{max}` in degrees of the integration\n area in the x direction.\n Y_max : numeric or array_like, optional\n Maximum angular size :math:`Y_{max}` in degrees of the integration\n area in the y direction.\n N_max : numeric or array_like, optional\n Maximum number of cycles :math:`N_{max}` over which the eye can\n integrate the information.\n n : numeric or array_like, optional\n Quantum efficiency of the eye :math:`n`.\n p : numeric or array_like, optional\n Photon conversion factor :math:`p` in\n :math:`photons\\\\div seconds\\\\div degrees^2\\\\div Trolands` that\n depends on the light source.\n E : numeric or array_like, optional\n Retinal illuminance :math:`E` in Trolands.\n phi_0 : numeric or array_like, optional\n Spectral density :math:`\\\\phi_0` in :math:`seconds degrees^2` of the\n neural noise.\n u_0 : numeric or array_like, optional\n Spatial frequency :math:`u_0` in :math:`cycles\\\\div degrees` above\n which the lateral inhibition ceases.\n\n Returns\n -------\n ndarray\n Contrast sensitivity :math:`S`.\n\n Warnings\n --------\n This definition expects :math:`\\\\sigma_{0}` and :math:`C_{ab}` used in the\n computation of :math:`\\\\sigma` to be given in degrees and\n :math:`degrees\\\\div mm` respectively. However, in the literature, the\n values for :math:`\\\\sigma_{0}` and :math:`C_{ab}` are usually given in\n :math:`arc min` and :math:`arc min\\\\div mm` respectively, thus they need to\n be divided by 60.\n\n Notes\n -----\n - The formula holds for bilateral viewing and for equal dimensions of\n the object in x and y direction. For monocular vision, the contrast\n sensitivity is a factor :math:`\\\\sqrt{2}` smaller.\n - *Barten (1999)* CSF default values for the :math:`k`,\n :math:`\\\\sigma_{0}`, :math:`C_{ab}`, :math:`T`, :math:`X_{max}`,\n :math:`N_{max}`, :math:`n`, :math:`\\\\phi_{0}` and :math:`u_0` constants\n are valid for a standard observer with good vision and with an age\n between 20 and 30 years.\n - The other constants have been filled using reference data from\n *Figure 31* in :cite:`InternationalTelecommunicationUnion2015` but\n must be adapted to the current use case.\n - The product of :math:`u`, the cycles per degree, and :math:`X_0`,\n the number of degrees, gives the number of cycles :math:`P_c` in a\n pattern. Therefore, :math:`X_0` can be made a variable dependent on\n :math:`u` such as :math:`X_0 = P_c / u`.\n\n References\n ----------\n :cite:`Barten1999`, :cite:`Barten2003`, :cite:`Cowan2004`,\n :cite:`InternationalTelecommunicationUnion2015`,\n\n Examples\n --------\n >>> contrast_sensitivity_function_Barten1999(4) # doctest: +ELLIPSIS\n 360.8691122...\n\n Reproducing *Figure 31* in \\\n:cite:`InternationalTelecommunicationUnion2015` illustrating the minimum\n detectable contrast according to *Barten (1999)* model with the assumed\n conditions for UHDTV applications. The minimum detectable contrast\n :math:`MDC` is then defined as follows::\n\n :math:`MDC = 1 / CSF * 2 * (1 / 1.27)`\n\n where :math:`2` is used for the conversion from modulation to contrast and\n :math:`1 / 1.27` is used for the conversion from sinusoidal to rectangular\n waves.\n\n >>> from scipy.optimize import fmin\n >>> settings_BT2246 = {\n ... 'k': 3.0,\n ... 'T': 0.1,\n ... 'X_max': 12,\n ... 'N_max': 15,\n ... 'n': 0.03,\n ... 'p': 1.2274 * 10 ** 6,\n ... 'phi_0': 3 * 10 ** -8,\n ... 'u_0': 7,\n ... }\n >>>\n >>> def maximise_spatial_frequency(L):\n ... maximised_spatial_frequency = []\n ... for L_v in L:\n ... X_0 = 60\n ... d = pupil_diameter_Barten1999(L_v, X_0)\n ... sigma = sigma_Barten1999(0.5 / 60, 0.08 / 60, d)\n ... E = retinal_illuminance_Barten1999(L_v, d, True)\n ... maximised_spatial_frequency.append(\n ... fmin(lambda x: (\n ... -contrast_sensitivity_function_Barten1999(\n ... u=x,\n ... sigma=sigma,\n ... X_0=X_0,\n ... E=E,\n ... **settings_BT2246)\n ... ), 0, disp=False)[0])\n ... return as_float(np.array(maximised_spatial_frequency))\n >>>\n >>> L = np.logspace(np.log10(0.01), np.log10(100), 10)\n >>> X_0 = Y_0 = 60\n >>> d = pupil_diameter_Barten1999(L, X_0, Y_0)\n >>> sigma = sigma_Barten1999(0.5 / 60, 0.08 / 60, d)\n >>> E = retinal_illuminance_Barten1999(L, d)\n >>> u = maximise_spatial_frequency(L)\n >>> (1 / contrast_sensitivity_function_Barten1999(\n ... u=u, sigma=sigma, E=E, X_0=X_0, Y_0=Y_0, **settings_BT2246)\n ... * 2 * (1/ 1.27))\n ... # doctest: +ELLIPSIS\n array([ 0.0207396..., 0.0133019..., 0.0089256..., 0.0064202..., \\\n0.0050275...,\n 0.0041933..., 0.0035573..., 0.0030095..., 0.0025803..., \\\n0.0022897...])\n \"\"\"\n\n u = as_float_array(u)\n k = as_float_array(k)\n T = as_float_array(T)\n X_0 = as_float_array(X_0)\n Y_0 = X_0 if Y_0 is None else as_float_array(Y_0)\n X_max = as_float_array(X_max)\n Y_max = X_max if Y_max is None else as_float_array(Y_max)\n N_max = as_float_array(N_max)\n n = as_float_array(n)\n p = as_float_array(p)\n E = as_float_array(E)\n phi_0 = as_float_array(phi_0)\n u_0 = as_float_array(u_0)\n\n M_opt = optical_MTF_Barten1999(u, sigma)\n\n M_as = 1 / (maximum_angular_size_Barten1999(u, X_0, X_max, N_max) *\n maximum_angular_size_Barten1999(u, Y_0, Y_max, N_max))\n\n S = (M_opt / k) / np.sqrt(2 / T * M_as * (1 / (n * p * E) + phi_0 /\n (1 - np.exp(-(u / u_0) ** 2))))\n\n return as_float(S)\n", "# -*- coding: utf-8 -*-\n\"\"\"\nHunt Colour Appearance Model\n============================\n\nDefines the *Hunt* colour appearance model objects:\n\n- :class:`colour.appearance.InductionFactors_Hunt`\n- :attr:`colour.VIEWING_CONDITIONS_HUNT`\n- :class:`colour.CAM_Specification_Hunt`\n- :func:`colour.XYZ_to_Hunt`\n\nReferences\n----------\n- :cite:`Fairchild2013u` : Fairchild, M. D. (2013). The Hunt Model. In Color\n Appearance Models (3rd ed., pp. 5094-5556). Wiley. ISBN:B00DAYO8E2\n- :cite:`Hunt2004b` : Hunt, R. W. G. (2004). The Reproduction of Colour (6th\n ed.). John Wiley & Sons, Ltd. doi:10.1002/0470024275\n\"\"\"\n\nimport numpy as np\nfrom collections import namedtuple\n\nfrom colour.algebra import spow, vector_dot\nfrom colour.utilities import (CaseInsensitiveMapping, as_float_array,\n from_range_degrees, full, ones, to_domain_100,\n tsplit, tstack, usage_warning, zeros)\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'InductionFactors_Hunt', 'VIEWING_CONDITIONS_HUNT',\n 'HUE_DATA_FOR_HUE_QUADRATURE', 'MATRIX_XYZ_TO_HPE', 'MATRIX_HPE_TO_XYZ',\n 'CAM_ReferenceSpecification_Hunt', 'CAM_Specification_Hunt', 'XYZ_to_Hunt',\n 'luminance_level_adaptation_factor', 'illuminant_scotopic_luminance',\n 'XYZ_to_rgb', 'f_n', 'chromatic_adaptation',\n 'adjusted_reference_white_signals', 'achromatic_post_adaptation_signal',\n 'colour_difference_signals', 'hue_angle', 'eccentricity_factor',\n 'low_luminance_tritanopia_factor', 'yellowness_blueness_response',\n 'redness_greenness_response', 'overall_chromatic_response',\n 'saturation_correlate', 'achromatic_signal', 'brightness_correlate',\n 'lightness_correlate', 'chroma_correlate', 'colourfulness_correlate'\n]\n\n\nclass InductionFactors_Hunt(\n namedtuple('InductionFactors_Hunt', ('N_c', 'N_b', 'N_cb', 'N_bb'))):\n \"\"\"\n *Hunt* colour appearance model induction factors.\n\n Parameters\n ----------\n N_c : numeric or array_like\n Chromatic surround induction factor :math:`N_c`.\n N_b : numeric or array_like\n *Brightness* surround induction factor :math:`N_b`.\n N_cb : numeric or array_like, optional\n Chromatic background induction factor :math:`N_{cb}`, approximated\n using tristimulus values :math:`Y_w` and :math:`Y_b` of\n respectively the reference white and the background if not specified.\n N_bb : numeric or array_like, optional\n *Brightness* background induction factor :math:`N_{bb}`, approximated\n using tristimulus values :math:`Y_w` and :math:`Y_b` of\n respectively the reference white and the background if not specified.\n\n References\n ----------\n :cite:`Fairchild2013u`, :cite:`Hunt2004b`\n \"\"\"\n\n def __new__(cls, N_c, N_b, N_cb=None, N_bb=None):\n \"\"\"\n Returns a new instance of the\n :class:`colour.appearance.InductionFactors_Hunt` class.\n \"\"\"\n\n return super(InductionFactors_Hunt, cls).__new__(\n cls, N_c, N_b, N_cb, N_bb)\n\n\nVIEWING_CONDITIONS_HUNT = CaseInsensitiveMapping({\n 'Small Areas, Uniform Background & Surrounds':\n InductionFactors_Hunt(1, 300),\n 'Normal Scenes':\n InductionFactors_Hunt(1, 75),\n 'Television & CRT, Dim Surrounds':\n InductionFactors_Hunt(1, 25),\n 'Large Transparencies On Light Boxes':\n InductionFactors_Hunt(0.7, 25),\n 'Projected Transparencies, Dark Surrounds':\n InductionFactors_Hunt(0.7, 10)\n})\nVIEWING_CONDITIONS_HUNT.__doc__ = \"\"\"\nReference *Hunt* colour appearance model viewing conditions.\n\nReferences\n----------\n:cite:`Fairchild2013u`, :cite:`Hunt2004b`\n\nVIEWING_CONDITIONS_HUNT : CaseInsensitiveMapping\n **{'Small Areas, Uniform Background & Surrounds',\n 'Normal Scenes',\n 'Television & CRT, Dim Surrounds',\n 'Large Transparencies On Light Boxes',\n 'Projected Transparencies, Dark Surrounds'}**\n\nAliases:\n\n- 'small_uniform': 'Small Areas, Uniform Background & Surrounds'\n- 'normal': 'Normal Scenes'\n- 'tv_dim': 'Television & CRT, Dim Surrounds'\n- 'light_boxes': 'Large Transparencies On Light Boxes'\n- 'projected_dark': 'Projected Transparencies, Dark Surrounds'\n\n\"\"\"\nVIEWING_CONDITIONS_HUNT['small_uniform'] = (\n VIEWING_CONDITIONS_HUNT['Small Areas, Uniform Background & Surrounds'])\nVIEWING_CONDITIONS_HUNT['normal'] = (VIEWING_CONDITIONS_HUNT['Normal Scenes'])\nVIEWING_CONDITIONS_HUNT['tv_dim'] = (\n VIEWING_CONDITIONS_HUNT['Television & CRT, Dim Surrounds'])\nVIEWING_CONDITIONS_HUNT['light_boxes'] = (\n VIEWING_CONDITIONS_HUNT['Large Transparencies On Light Boxes'])\nVIEWING_CONDITIONS_HUNT['projected_dark'] = (\n VIEWING_CONDITIONS_HUNT['Projected Transparencies, Dark Surrounds'])\n\nHUE_DATA_FOR_HUE_QUADRATURE = {\n 'h_s': np.array([20.14, 90.00, 164.25, 237.53]),\n 'e_s': np.array([0.8, 0.7, 1.0, 1.2])\n}\n\nMATRIX_XYZ_TO_HPE = np.array([\n [0.38971, 0.68898, -0.07868],\n [-0.22981, 1.18340, 0.04641],\n [0.00000, 0.00000, 1.00000],\n])\n\"\"\"\n*Hunt* colour appearance model *CIE XYZ* tristimulus values to\n*Hunt-Pointer-Estevez* :math:`\\\\rho\\\\gamma\\\\beta` colourspace matrix.\n\nMATRIX_XYZ_TO_HPE : array_like, (3, 3)\n\"\"\"\n\nMATRIX_HPE_TO_XYZ = np.linalg.inv(MATRIX_XYZ_TO_HPE)\n\"\"\"\n*Hunt* colour appearance model *Hunt-Pointer-Estevez*\n:math:`\\\\rho\\\\gamma\\\\beta` colourspace to *CIE XYZ* tristimulus values matrix.\n\nMATRIX_HPE_TO_XYZ : array_like, (3, 3)\n\"\"\"\n\n\nclass CAM_ReferenceSpecification_Hunt(\n namedtuple('CAM_ReferenceSpecification_Hunt',\n ('J', 'C_94', 'h_S', 's', 'Q', 'M_94', 'H', 'H_C'))):\n \"\"\"\n Defines the *Hunt* colour appearance model reference specification.\n\n This specification has field names consistent with *Fairchild (2013)*\n reference.\n\n Parameters\n ----------\n J : numeric or array_like\n Correlate of *Lightness* :math:`J`.\n C_94 : numeric or array_like\n Correlate of *chroma* :math:`C_94`.\n h_S : numeric or array_like\n *Hue* angle :math:`h_S` in degrees.\n s : numeric or array_like\n Correlate of *saturation* :math:`s`.\n Q : numeric or array_like\n Correlate of *brightness* :math:`Q`.\n M_94 : numeric or array_like\n Correlate of *colourfulness* :math:`M_94`.\n H : numeric or array_like\n *Hue* :math:`h` quadrature :math:`H`.\n H_C : numeric or array_like\n *Hue* :math:`h` composition :math:`H_C`.\n\n References\n ----------\n :cite:`Fairchild2013u`, :cite:`Hunt2004b`\n \"\"\"\n\n\nclass CAM_Specification_Hunt(\n namedtuple('CAM_Specification_Hunt',\n ('J', 'C', 'h', 's', 'Q', 'M', 'H', 'HC'))):\n \"\"\"\n Defines the *Hunt* colour appearance model specification.\n\n This specification has field names consistent with the remaining colour\n appearance models in :mod:`colour.appearance` but diverge from\n *Fairchild (2013)* reference.\n\n Parameters\n ----------\n J : numeric or array_like\n Correlate of *Lightness* :math:`J`.\n C : numeric or array_like\n Correlate of *chroma* :math:`C_94`.\n h : numeric or array_like\n *Hue* angle :math:`h_S` in degrees.\n s : numeric or array_like\n Correlate of *saturation* :math:`s`.\n Q : numeric or array_like\n Correlate of *brightness* :math:`Q`.\n M : numeric or array_like\n Correlate of *colourfulness* :math:`M_94`.\n H : numeric or array_like\n *Hue* :math:`h` quadrature :math:`H`.\n HC : numeric or array_like\n *Hue* :math:`h` composition :math:`H_C`.\n\n Notes\n -----\n - This specification is the one used in the current model implementation.\n\n References\n ----------\n :cite:`Fairchild2013u`, :cite:`Hunt2004b`\n \"\"\"\n\n\ndef XYZ_to_Hunt(XYZ,\n XYZ_w,\n XYZ_b,\n L_A,\n surround=VIEWING_CONDITIONS_HUNT['Normal Scenes'],\n L_AS=None,\n CCT_w=None,\n XYZ_p=None,\n p=None,\n S=None,\n S_w=None,\n helson_judd_effect=False,\n discount_illuminant=True):\n \"\"\"\n Computes the *Hunt* colour appearance model correlates.\n\n Parameters\n ----------\n XYZ : array_like\n *CIE XYZ* tristimulus values of test sample / stimulus.\n XYZ_w : array_like\n *CIE XYZ* tristimulus values of reference white.\n XYZ_b : array_like\n *CIE XYZ* tristimulus values of background.\n L_A : numeric or array_like\n Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`.\n surround : InductionFactors_Hunt, optional\n Surround viewing conditions induction factors.\n L_AS : numeric or array_like, optional\n Scotopic luminance :math:`L_{AS}` of the illuminant, approximated if\n not specified.\n CCT_w : numeric or array_like, optional\n Correlated color temperature :math:`T_{cp}`: of the illuminant, needed\n to approximate :math:`L_{AS}`.\n XYZ_p : array_like, optional\n *CIE XYZ* tristimulus values of proximal field, assumed to be equal to\n background if not specified.\n p : numeric or array_like, optional\n Simultaneous contrast / assimilation factor :math:`p` with value\n normalised to domain [-1, 0] when simultaneous contrast occurs and\n normalised to domain [0, 1] when assimilation occurs.\n S : numeric or array_like, optional\n Scotopic response :math:`S` to the stimulus, approximated using\n tristimulus values :math:`Y` of the stimulus if not specified.\n S_w : numeric or array_like, optional\n Scotopic response :math:`S_w` for the reference white, approximated\n using the tristimulus values :math:`Y_w` of the reference white if not\n specified.\n helson_judd_effect : bool, optional\n Truth value indicating whether the *Helson-Judd* effect should be\n accounted for.\n discount_illuminant : bool, optional\n Truth value indicating if the illuminant should be discounted.\n\n Returns\n -------\n CAM_Specification_Hunt\n *Hunt* colour appearance model specification.\n\n Raises\n ------\n ValueError\n If an illegal arguments combination is specified.\n\n Notes\n -----\n\n +--------------------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +==========================+=======================+===============+\n | ``XYZ`` | [0, 100] | [0, 1] |\n +--------------------------+-----------------------+---------------+\n | ``XYZ_w`` | [0, 100] | [0, 1] |\n +--------------------------+-----------------------+---------------+\n | ``XYZ_b`` | [0, 100] | [0, 1] |\n +--------------------------+-----------------------+---------------+\n | ``XYZ_p`` | [0, 100] | [0, 1] |\n +--------------------------+-----------------------+---------------+\n\n +------------------------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +==============================+=======================+===============+\n | ``CAM_Specification_Hunt.h`` | [0, 360] | [0, 1] |\n +------------------------------+-----------------------+---------------+\n\n References\n ----------\n :cite:`Fairchild2013u`, :cite:`Hunt2004b`\n\n Examples\n --------\n >>> XYZ = np.array([19.01, 20.00, 21.78])\n >>> XYZ_w = np.array([95.05, 100.00, 108.88])\n >>> XYZ_b = np.array([95.05, 100.00, 108.88])\n >>> L_A = 318.31\n >>> surround = VIEWING_CONDITIONS_HUNT['Normal Scenes']\n >>> CCT_w = 6504\n >>> XYZ_to_Hunt(XYZ, XYZ_w, XYZ_b, L_A, surround, CCT_w=CCT_w)\n ... # doctest: +ELLIPSIS\n CAM_Specification_Hunt(J=30.0462678..., C=0.1210508..., h=269.2737594..., \\\ns=0.0199093..., Q=22.2097654..., M=0.1238964..., H=array(nan), HC=array(nan))\n \"\"\"\n XYZ = to_domain_100(XYZ)\n XYZ_w = to_domain_100(XYZ_w)\n XYZ_b = to_domain_100(XYZ_b)\n _X, Y, _Z = tsplit(XYZ)\n _X_w, Y_w, _Z_w = tsplit(XYZ_w)\n X_b, Y_b, _Z_b = tsplit(XYZ_b)\n\n # Arguments handling.\n if XYZ_p is not None:\n X_p, Y_p, Z_p = tsplit(to_domain_100(XYZ_p))\n else:\n X_p = X_b\n Y_p = Y_b\n Z_p = Y_b\n usage_warning('Unspecified proximal field \"XYZ_p\" argument, using '\n 'background \"XYZ_b\" as approximation!')\n\n if surround.N_cb is None:\n N_cb = 0.725 * spow(Y_w / Y_b, 0.2)\n usage_warning('Unspecified \"N_cb\" argument, using approximation: '\n '\"{0}\"'.format(N_cb))\n if surround.N_bb is None:\n N_bb = 0.725 * spow(Y_w / Y_b, 0.2)\n usage_warning('Unspecified \"N_bb\" argument, using approximation: '\n '\"{0}\"'.format(N_bb))\n\n if L_AS is None and CCT_w is None:\n raise ValueError('Either the scotopic luminance \"L_AS\" of the '\n 'illuminant or its correlated colour temperature '\n '\"CCT_w\" must be specified!')\n if L_AS is None:\n L_AS = illuminant_scotopic_luminance(L_A, CCT_w)\n usage_warning(\n 'Unspecified \"L_AS\" argument, using approximation from \"CCT\": '\n '\"{0}\"'.format(L_AS))\n\n if (S is None and S_w is not None) or (S is not None and S_w is None):\n raise ValueError('Either both stimulus scotopic response \"S\" and '\n 'reference white scotopic response \"S_w\" arguments '\n 'need to be specified or none of them!')\n elif S is None and S_w is None:\n S = Y\n S_w = Y_w\n usage_warning(\n 'Unspecified stimulus scotopic response \"S\" and reference '\n 'white scotopic response \"S_w\" arguments, using '\n 'approximation: \"{0}\", \"{1}\"'.format(S, S_w))\n\n if p is None:\n usage_warning(\n 'Unspecified simultaneous contrast / assimilation \"p\" '\n 'argument, model will not account for simultaneous chromatic '\n 'contrast!')\n\n XYZ_p = tstack([X_p, Y_p, Z_p])\n\n # Computing luminance level adaptation factor :math:`F_L`.\n F_L = luminance_level_adaptation_factor(L_A)\n\n # Computing test sample chromatic adaptation.\n rgb_a = chromatic_adaptation(XYZ, XYZ_w, XYZ_b, L_A, F_L, XYZ_p, p,\n helson_judd_effect, discount_illuminant)\n\n # Computing reference white chromatic adaptation.\n rgb_aw = chromatic_adaptation(XYZ_w, XYZ_w, XYZ_b, L_A, F_L, XYZ_p, p,\n helson_judd_effect, discount_illuminant)\n\n # Computing opponent colour dimensions.\n # Computing achromatic post adaptation signals.\n A_a = achromatic_post_adaptation_signal(rgb_a)\n A_aw = achromatic_post_adaptation_signal(rgb_aw)\n\n # Computing colour difference signals.\n C = colour_difference_signals(rgb_a)\n C_w = colour_difference_signals(rgb_aw)\n\n # -------------------------------------------------------------------------\n # Computing the *hue* angle :math:`h_s`.\n # -------------------------------------------------------------------------\n h = hue_angle(C)\n # hue_w = hue_angle(C_w)\n # TODO: Implement hue quadrature & composition computation.\n\n # -------------------------------------------------------------------------\n # Computing the correlate of *saturation* :math:`s`.\n # -------------------------------------------------------------------------\n # Computing eccentricity factors.\n e_s = eccentricity_factor(h)\n\n # Computing low luminance tritanopia factor :math:`F_t`.\n F_t = low_luminance_tritanopia_factor(L_A)\n\n M_yb = yellowness_blueness_response(C, e_s, surround.N_c, N_cb, F_t)\n M_rg = redness_greenness_response(C, e_s, surround.N_c, N_cb)\n M_yb_w = yellowness_blueness_response(C_w, e_s, surround.N_c, N_cb, F_t)\n M_rg_w = redness_greenness_response(C_w, e_s, surround.N_c, N_cb)\n\n # Computing overall chromatic response.\n M = overall_chromatic_response(M_yb, M_rg)\n M_w = overall_chromatic_response(M_yb_w, M_rg_w)\n\n s = saturation_correlate(M, rgb_a)\n\n # -------------------------------------------------------------------------\n # Computing the correlate of *brightness* :math:`Q`.\n # -------------------------------------------------------------------------\n # Computing achromatic signal :math:`A`.\n A = achromatic_signal(L_AS, S, S_w, N_bb, A_a)\n A_w = achromatic_signal(L_AS, S_w, S_w, N_bb, A_aw)\n\n Q = brightness_correlate(A, A_w, M, surround.N_b)\n brightness_w = brightness_correlate(A_w, A_w, M_w, surround.N_b)\n # TODO: Implement whiteness-blackness :math:`Q_{wb}` computation.\n\n # -------------------------------------------------------------------------\n # Computing the correlate of *Lightness* :math:`J`.\n # -------------------------------------------------------------------------\n J = lightness_correlate(Y_b, Y_w, Q, brightness_w)\n\n # -------------------------------------------------------------------------\n # Computing the correlate of *chroma* :math:`C_{94}`.\n # -------------------------------------------------------------------------\n C_94 = chroma_correlate(s, Y_b, Y_w, Q, brightness_w)\n\n # -------------------------------------------------------------------------\n # Computing the correlate of *colourfulness* :math:`M_{94}`.\n # -------------------------------------------------------------------------\n M_94 = colourfulness_correlate(F_L, C_94)\n\n return CAM_Specification_Hunt(J, C_94, from_range_degrees(h), s, Q, M_94,\n full(J.shape, np.nan), full(J.shape, np.nan))\n\n\ndef luminance_level_adaptation_factor(L_A):\n \"\"\"\n Returns the *luminance* level adaptation factor :math:`F_L`.\n\n Parameters\n ----------\n L_A : numeric or array_like\n Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`.\n\n Returns\n -------\n numeric or ndarray\n *Luminance* level adaptation factor :math:`F_L`\n\n Examples\n --------\n >>> luminance_level_adaptation_factor(318.31) # doctest: +ELLIPSIS\n 1.1675444...\n \"\"\"\n\n L_A = as_float_array(L_A)\n\n k = 1 / (5 * L_A + 1)\n k4 = k ** 4\n F_L = 0.2 * k4 * (5 * L_A) + 0.1 * (1 - k4) ** 2 * spow(5 * L_A, 1 / 3)\n\n return F_L\n\n\ndef illuminant_scotopic_luminance(L_A, CCT):\n \"\"\"\n Returns the approximate scotopic luminance :math:`L_{AS}` of the\n illuminant.\n\n Parameters\n ----------\n L_A : numeric or array_like\n Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`.\n CCT : numeric or array_like\n Correlated color temperature :math:`T_{cp}` of the illuminant.\n\n Returns\n -------\n numeric or ndarray\n Approximate scotopic luminance :math:`L_{AS}`.\n\n Examples\n --------\n >>> illuminant_scotopic_luminance(318.31, 6504.0) # doctest: +ELLIPSIS\n 769.9376286...\n \"\"\"\n\n L_A = as_float_array(L_A)\n CCT = as_float_array(CCT)\n\n CCT = 2.26 * L_A * spow((CCT / 4000) - 0.4, 1 / 3)\n\n return CCT\n\n\ndef XYZ_to_rgb(XYZ):\n \"\"\"\n Converts from *CIE XYZ* tristimulus values to *Hunt-Pointer-Estevez*\n :math:`\\\\rho\\\\gamma\\\\beta` colourspace.\n\n Parameters\n ----------\n XYZ : array_like\n *CIE XYZ* tristimulus values.\n\n Returns\n -------\n ndarray\n *Hunt-Pointer-Estevez* :math:`\\\\rho\\\\gamma\\\\beta` colourspace.\n\n Examples\n --------\n >>> XYZ = np.array([19.01, 20.00, 21.78])\n >>> XYZ_to_rgb(XYZ) # doctest: +ELLIPSIS\n array([ 19.4743367..., 20.3101217..., 21.78 ])\n \"\"\"\n\n return vector_dot(MATRIX_XYZ_TO_HPE, XYZ)\n\n\ndef f_n(x):\n \"\"\"\n Defines the nonlinear response function of the *Hunt* colour appearance\n model used to model the nonlinear behaviour of various visual responses.\n\n Parameters\n ----------\n x : numeric or array_like or array_like\n Visual response variable :math:`x`.\n\n Returns\n -------\n numeric or array_like\n Modeled visual response variable :math:`x`.\n\n\n Examples\n --------\n >>> x = np.array([0.23350512, 0.23351103, 0.23355179])\n >>> f_n(x) # doctest: +ELLIPSIS\n array([ 5.8968592..., 5.8969521..., 5.8975927...])\n \"\"\"\n\n x = as_float_array(x)\n\n x_p = spow(x, 0.73)\n x_m = 40 * (x_p / (x_p + 2))\n\n return x_m\n\n\ndef chromatic_adaptation(XYZ,\n XYZ_w,\n XYZ_b,\n L_A,\n F_L,\n XYZ_p=None,\n p=None,\n helson_judd_effect=False,\n discount_illuminant=True):\n \"\"\"\n Applies chromatic adaptation to given *CIE XYZ* tristimulus values.\n\n Parameters\n ----------\n XYZ : array_like\n *CIE XYZ* tristimulus values of test sample.\n XYZ_b : array_like\n *CIE XYZ* tristimulus values of background.\n XYZ_w : array_like\n *CIE XYZ* tristimulus values of reference white.\n L_A : numeric or array_like\n Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`.\n F_L : numeric or array_like\n Luminance adaptation factor :math:`F_L`.\n XYZ_p : array_like, optional\n *CIE XYZ* tristimulus values of proximal field, assumed to be equal to\n background if not specified.\n p : numeric or array_like, optional\n Simultaneous contrast / assimilation factor :math:`p` with value\n normalised to domain [-1, 0] when simultaneous contrast occurs and\n normalised to domain [0, 1] when assimilation occurs.\n helson_judd_effect : bool, optional\n Truth value indicating whether the *Helson-Judd* effect should be\n accounted for.\n discount_illuminant : bool, optional\n Truth value indicating if the illuminant should be discounted.\n\n Returns\n -------\n ndarray\n Adapted *CIE XYZ* tristimulus values.\n\n Examples\n --------\n >>> XYZ = np.array([19.01, 20.00, 21.78])\n >>> XYZ_b = np.array([95.05, 100.00, 108.88])\n >>> XYZ_w = np.array([95.05, 100.00, 108.88])\n >>> L_A = 318.31\n >>> F_L = 1.16754446415\n >>> chromatic_adaptation(XYZ, XYZ_w, XYZ_b, L_A, F_L) # doctest: +ELLIPSIS\n array([ 6.8959454..., 6.8959991..., 6.8965708...])\n\n # Coverage Doctests\n\n >>> chromatic_adaptation(XYZ, XYZ_w, XYZ_b, L_A, F_L,\n ... discount_illuminant=False) # doctest: +ELLIPSIS\n array([ 6.8525880..., 6.8874417..., 6.9461478...])\n >>> chromatic_adaptation(XYZ, XYZ_w, XYZ_b, L_A, F_L,\n ... helson_judd_effect=True) # doctest: +ELLIPSIS\n array([ 6.8959454..., 6.8959991..., 6.8965708...])\n >>> chromatic_adaptation(XYZ, XYZ_w, XYZ_b, L_A, F_L,\n ... XYZ_p=XYZ_b, p=0.5) # doctest: +ELLIPSIS\n array([ 9.2069020..., 9.2070219..., 9.2078373...])\n \"\"\"\n\n XYZ_w = as_float_array(XYZ_w)\n XYZ_b = as_float_array(XYZ_b)\n L_A = as_float_array(L_A)\n F_L = as_float_array(F_L)\n\n rgb = XYZ_to_rgb(XYZ)\n rgb_w = XYZ_to_rgb(XYZ_w)\n Y_w = XYZ_w[..., 1]\n Y_b = XYZ_b[..., 1]\n\n h_rgb = 3 * rgb_w / np.sum(rgb_w, axis=-1)[..., np.newaxis]\n\n # Computing chromatic adaptation factors.\n if not discount_illuminant:\n L_A_p = spow(L_A, 1 / 3)\n F_rgb = ((1 + L_A_p + h_rgb) / (1 + L_A_p + (1 / h_rgb)))\n else:\n F_rgb = ones(h_rgb.shape)\n\n # Computing Helson-Judd effect parameters.\n if helson_judd_effect:\n D_rgb = (f_n((Y_b / Y_w) * F_L * F_rgb[..., 1]) - f_n(\n (Y_b / Y_w) * F_L * F_rgb))\n else:\n D_rgb = zeros(F_rgb.shape)\n\n # Computing cone bleach factors.\n B_rgb = (10 ** 7) / ((10 ** 7) + 5 * L_A[..., np.newaxis] * (rgb_w / 100))\n\n # Computing adjusted reference white signals.\n if XYZ_p is not None and p is not None:\n rgb_p = XYZ_to_rgb(XYZ_p)\n rgb_w = adjusted_reference_white_signals(rgb_p, B_rgb, rgb_w, p)\n\n # Computing adapted cone responses.\n rgb_a = 1\n rgb_a += B_rgb * (f_n(F_L[..., np.newaxis] * F_rgb * rgb / rgb_w) + D_rgb)\n\n return rgb_a\n\n\ndef adjusted_reference_white_signals(rgb_p, rgb_b, rgb_w, p):\n \"\"\"\n Adjusts the white point for simultaneous chromatic contrast.\n\n Parameters\n ----------\n rgb_p : array_like\n Cone signals *Hunt-Pointer-Estevez* :math:`\\\\rho\\\\gamma\\\\beta`\n colourspace array of the proximal field.\n rgb_b : array_like\n Cone signals *Hunt-Pointer-Estevez* :math:`\\\\rho\\\\gamma\\\\beta`\n colourspace array of the background.\n rgb_w : array_like\n Cone signals array *Hunt-Pointer-Estevez* :math:`\\\\rho\\\\gamma\\\\beta`\n colourspace array of the reference white.\n p : numeric or array_like\n Simultaneous contrast / assimilation factor :math:`p` with value\n normalised to domain [-1, 0] when simultaneous contrast occurs and\n normalised to domain [0, 1] when assimilation occurs.\n\n Returns\n -------\n ndarray\n Adjusted cone signals *Hunt-Pointer-Estevez* :math:`\\\\rho\\\\gamma\\\\beta`\n colourspace array of the reference white.\n\n Examples\n --------\n >>> rgb_p = np.array([98.07193550, 101.13755950, 100.00000000])\n >>> rgb_b = np.array([0.99984505, 0.99983840, 0.99982674])\n >>> rgb_w = np.array([97.37325710, 101.54968030, 108.88000000])\n >>> p = 0.1\n >>> adjusted_reference_white_signals(rgb_p, rgb_b, rgb_w, p)\n ... # doctest: +ELLIPSIS\n array([ 88.0792742..., 91.8569553..., 98.4876543...])\n \"\"\"\n\n rgb_p = as_float_array(rgb_p)\n rgb_b = as_float_array(rgb_b)\n rgb_w = as_float_array(rgb_w)\n p = as_float_array(p)\n\n p_rgb = rgb_p / rgb_b\n rgb_w = (rgb_w * (spow((1 - p) * p_rgb + (1 + p) / p_rgb, 0.5)) / (spow(\n (1 + p) * p_rgb + (1 - p) / p_rgb, 0.5)))\n\n return rgb_w\n\n\ndef achromatic_post_adaptation_signal(rgb):\n \"\"\"\n Returns the achromatic post adaptation signal :math:`A` from given\n *Hunt-Pointer-Estevez* :math:`\\\\rho\\\\gamma\\\\beta` colourspace array.\n\n Parameters\n ----------\n rgb : array_like\n *Hunt-Pointer-Estevez* :math:`\\\\rho\\\\gamma\\\\beta` colourspace array.\n\n Returns\n -------\n numeric or ndarray\n Achromatic post adaptation signal :math:`A`.\n\n Examples\n --------\n >>> rgb = np.array([6.89594549, 6.89599915, 6.89657085])\n >>> achromatic_post_adaptation_signal(rgb) # doctest: +ELLIPSIS\n 18.9827186...\n \"\"\"\n\n r, g, b = tsplit(rgb)\n\n A = 2 * r + g + (1 / 20) * b - 3.05 + 1\n\n return A\n\n\ndef colour_difference_signals(rgb):\n \"\"\"\n Returns the colour difference signals :math:`C_1`, :math:`C_2` and\n :math:`C_3` from given *Hunt-Pointer-Estevez* :math:`\\\\rho\\\\gamma\\\\beta`\n colourspace array.\n\n Parameters\n ----------\n rgb : array_like\n *Hunt-Pointer-Estevez* :math:`\\\\rho\\\\gamma\\\\beta` colourspace array.\n\n Returns\n -------\n ndarray\n Colour difference signals :math:`C_1`, :math:`C_2` and :math:`C_3`.\n\n Examples\n --------\n >>> rgb = np.array([6.89594549, 6.89599915, 6.89657085])\n >>> colour_difference_signals(rgb) # doctest: +ELLIPSIS\n array([ -5.3660000...e-05, -5.7170000...e-04, 6.2536000...e-04])\n \"\"\"\n\n r, g, b = tsplit(rgb)\n\n C_1 = r - g\n C_2 = g - b\n C_3 = b - r\n\n C = tstack([C_1, C_2, C_3])\n\n return C\n\n\ndef hue_angle(C):\n \"\"\"\n Returns the *hue* angle :math:`h` in degrees from given colour difference\n signals :math:`C`.\n\n Parameters\n ----------\n C : array_like\n Colour difference signals :math:`C`.\n\n Returns\n -------\n numeric or ndarray\n *Hue* angle :math:`h` in degrees.\n\n Examples\n --------\n >>> C = np.array([\n ... -5.365865581996587e-05,\n ... -0.000571699383647,\n ... 0.000625358039467\n ... ])\n >>> hue_angle(C) # doctest: +ELLIPSIS\n 269.2737594...\n \"\"\"\n\n C_1, C_2, C_3 = tsplit(C)\n\n hue = (180 * np.arctan2(0.5 * (C_2 - C_3) / 4.5, C_1 -\n (C_2 / 11)) / np.pi) % 360\n return hue\n\n\ndef eccentricity_factor(hue):\n \"\"\"\n Returns eccentricity factor :math:`e_s` from given hue angle :math:`h`\n in degrees.\n\n Parameters\n ----------\n hue : numeric or array_like\n Hue angle :math:`h` in degrees.\n\n Returns\n -------\n numeric or ndarray\n Eccentricity factor :math:`e_s`.\n\n Examples\n --------\n >>> eccentricity_factor(269.273759) # doctest: +ELLIPSIS\n array(1.1108365...)\n \"\"\"\n\n hue = as_float_array(hue)\n\n h_s = HUE_DATA_FOR_HUE_QUADRATURE['h_s']\n e_s = HUE_DATA_FOR_HUE_QUADRATURE['e_s']\n\n x = np.interp(hue, h_s, e_s)\n x = np.where(hue < 20.14, 0.856 - (hue / 20.14) * 0.056, x)\n x = np.where(hue > 237.53, 0.856 + 0.344 * (360 - hue) / (360 - 237.53), x)\n\n return x\n\n\ndef low_luminance_tritanopia_factor(L_A):\n \"\"\"\n Returns the low luminance tritanopia factor :math:`F_t` from given adapting\n field *luminance* :math:`L_A` in :math:`cd/m^2`.\n\n Parameters\n ----------\n L_A : numeric or array_like\n Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`.\n\n Returns\n -------\n numeric or ndarray\n Low luminance tritanopia factor :math:`F_t`.\n\n Examples\n --------\n >>> low_luminance_tritanopia_factor(318.31) # doctest: +ELLIPSIS\n 0.9996859...\n \"\"\"\n\n L_A = as_float_array(L_A)\n\n F_t = L_A / (L_A + 0.1)\n\n return F_t\n\n\ndef yellowness_blueness_response(C, e_s, N_c, N_cb, F_t):\n \"\"\"\n Returns the yellowness / blueness response :math:`M_{yb}`.\n\n Parameters\n ----------\n C : array_like\n Colour difference signals :math:`C`.\n e_s : numeric or array_like\n Eccentricity factor :math:`e_s`.\n N_c : numeric or array_like\n Chromatic surround induction factor :math:`N_c`.\n N_cb : numeric or array_like\n Chromatic background induction factor :math:`N_{cb}`.\n F_t : numeric or array_like\n Low luminance tritanopia factor :math:`F_t`.\n\n Returns\n -------\n numeric or ndarray\n Yellowness / blueness response :math:`M_{yb}`.\n\n Examples\n --------\n >>> C = np.array([\n ... -5.365865581996587e-05,\n ... -0.000571699383647,\n ... 0.000625358039467\n ... ])\n >>> e_s = 1.110836504862630\n >>> N_c = 1.0\n >>> N_cb = 0.725000000000000\n >>> F_t = 0.99968593951195\n >>> yellowness_blueness_response(C, e_s, N_c, N_cb, F_t)\n ... # doctest: +ELLIPSIS\n -0.0082372...\n \"\"\"\n\n _C_1, C_2, C_3 = tsplit(C)\n e_s = as_float_array(e_s)\n N_c = as_float_array(N_c)\n N_cb = as_float_array(N_cb)\n F_t = as_float_array(F_t)\n\n M_yb = (\n 100 * (0.5 * (C_2 - C_3) / 4.5) * (e_s * (10 / 13) * N_c * N_cb * F_t))\n\n return M_yb\n\n\ndef redness_greenness_response(C, e_s, N_c, N_cb):\n \"\"\"\n Returns the redness / greenness response :math:`M_{yb}`.\n\n Parameters\n ----------\n C : array_like\n Colour difference signals :math:`C`.\n e_s : numeric or array_like\n Eccentricity factor :math:`e_s`.\n N_c : numeric or array_like\n Chromatic surround induction factor :math:`N_c`.\n N_cb : numeric or array_like\n Chromatic background induction factor :math:`N_{cb}`.\n\n Returns\n -------\n numeric or ndarray\n Redness / greenness response :math:`M_{rg}`.\n\n Examples\n --------\n >>> C = np.array([\n ... -5.365865581996587e-05,\n ... -0.000571699383647,\n ... 0.000625358039467\n ... ])\n >>> e_s = 1.110836504862630\n >>> N_c = 1.0\n >>> N_cb = 0.725000000000000\n >>> redness_greenness_response(C, e_s, N_c, N_cb) # doctest: +ELLIPSIS\n -0.0001044...\n \"\"\"\n\n C_1, C_2, _C_3 = tsplit(C)\n e_s = as_float_array(e_s)\n N_c = as_float_array(N_c)\n N_cb = as_float_array(N_cb)\n\n M_rg = 100 * (C_1 - (C_2 / 11)) * (e_s * (10 / 13) * N_c * N_cb)\n\n return M_rg\n\n\ndef overall_chromatic_response(M_yb, M_rg):\n \"\"\"\n Returns the overall chromatic response :math:`M`.\n\n Parameters\n ----------\n M_yb : numeric or array_like\n Yellowness / blueness response :math:`M_{yb}`.\n M_rg : numeric or array_like\n Redness / greenness response :math:`M_{rg}`.\n\n Returns\n -------\n numeric or ndarray\n Overall chromatic response :math:`M`.\n\n Examples\n --------\n >>> M_yb = -0.008237223618825\n >>> M_rg = -0.000104447583276\n >>> overall_chromatic_response(M_yb, M_rg) # doctest: +ELLIPSIS\n 0.0082378...\n \"\"\"\n\n M_yb = as_float_array(M_yb)\n M_rg = as_float_array(M_rg)\n\n M = spow((M_yb ** 2) + (M_rg ** 2), 0.5)\n\n return M\n\n\ndef saturation_correlate(M, rgb_a):\n \"\"\"\n Returns the *saturation* correlate :math:`s`.\n\n Parameters\n ----------\n M : numeric or array_like\n Overall chromatic response :math:`M`.\n rgb_a : array_like\n Adapted *Hunt-Pointer-Estevez* :math:`\\\\rho\\\\gamma\\\\beta` colourspace\n array.\n\n Returns\n -------\n numeric or ndarray\n *Saturation* correlate :math:`s`.\n\n Examples\n --------\n >>> M = 0.008237885787274\n >>> rgb_a = np.array([6.89594549, 6.89599915, 6.89657085])\n >>> saturation_correlate(M, rgb_a) # doctest: +ELLIPSIS\n 0.0199093...\n \"\"\"\n\n M = as_float_array(M)\n rgb_a = as_float_array(rgb_a)\n\n s = 50 * M / np.sum(rgb_a, axis=-1)\n\n return s\n\n\ndef achromatic_signal(L_AS, S, S_w, N_bb, A_a):\n \"\"\"\n Returns the achromatic signal :math:`A`.\n\n Parameters\n ----------\n L_AS : numeric or array_like\n Scotopic luminance :math:`L_{AS}` of the illuminant.\n S : numeric or array_like\n Scotopic response :math:`S` to the stimulus.\n S_w : numeric or array_like\n Scotopic response :math:`S_w` for the reference white.\n N_bb : numeric or array_like\n Brightness background induction factor :math:`N_{bb}`.\n A_a: numeric or array_like\n Achromatic post adaptation signal of the stimulus :math:`A_a`.\n\n Returns\n -------\n numeric or ndarray\n Achromatic signal :math:`A`.\n\n Examples\n --------\n >>> L_AS = 769.9376286541402\n >>> S = 20.0\n >>> S_w = 100.0\n >>> N_bb = 0.725000000000000\n >>> A_a = 18.982718664838487\n >>> achromatic_signal(L_AS, S, S_w, N_bb, A_a) # doctest: +ELLIPSIS\n 15.5068546...\n \"\"\"\n\n L_AS = as_float_array(L_AS)\n S = as_float_array(S)\n S_w = as_float_array(S_w)\n N_bb = as_float_array(N_bb)\n A_a = as_float_array(A_a)\n\n j = 0.00001 / ((5 * L_AS / 2.26) + 0.00001)\n\n # Computing scotopic luminance level adaptation factor :math:`F_{LS}`.\n F_LS = 3800 * (j ** 2) * (5 * L_AS / 2.26)\n F_LS += 0.2 * (spow(1 - (j ** 2), 0.4)) * (spow(5 * L_AS / 2.26, 1 / 6))\n\n # Computing cone bleach factors :math:`B_S`.\n B_S = 0.5 / (1 + 0.3 * spow((5 * L_AS / 2.26) * (S / S_w), 0.3))\n B_S += 0.5 / (1 + 5 * (5 * L_AS / 2.26))\n\n # Computing adapted scotopic signal :math:`A_S`.\n A_S = (f_n(F_LS * S / S_w) * 3.05 * B_S) + 0.3\n\n # Computing achromatic signal :math:`A`.\n A = N_bb * (A_a - 1 + A_S - 0.3 + np.sqrt((1 + (0.3 ** 2))))\n\n return A\n\n\ndef brightness_correlate(A, A_w, M, N_b):\n \"\"\"\n Returns the *brightness* correlate :math:`Q`.\n\n Parameters\n ----------\n A : numeric or array_like\n Achromatic signal :math:`A`.\n A_w: numeric or array_like\n Achromatic post adaptation signal of the reference white :math:`A_w`.\n M : numeric or array_like\n Overall chromatic response :math:`M`.\n N_b : numeric or array_like\n Brightness surround induction factor :math:`N_b`.\n\n Returns\n -------\n numeric or ndarray\n *Brightness* correlate :math:`Q`.\n\n Examples\n --------\n >>> A = 15.506854623621885\n >>> A_w = 35.718916676317086\n >>> M = 0.008237885787274\n >>> N_b = 75.0\n >>> brightness_correlate(A, A_w, M, N_b) # doctest: +ELLIPSIS\n 22.2097654...\n \"\"\"\n\n A = as_float_array(A)\n A_w = as_float_array(A_w)\n M = as_float_array(M)\n N_b = as_float_array(N_b)\n\n N_1 = (spow(7 * A_w, 0.5)) / (5.33 * spow(N_b, 0.13))\n N_2 = (7 * A_w * spow(N_b, 0.362)) / 200\n\n Q = spow(7 * (A + (M / 100)), 0.6) * N_1 - N_2\n\n return Q\n\n\ndef lightness_correlate(Y_b, Y_w, Q, Q_w):\n \"\"\"\n Returns the *Lightness* correlate :math:`J`.\n\n Parameters\n ----------\n Y_b : numeric or array_like\n Tristimulus values :math:`Y_b` the background.\n Y_w : numeric or array_like\n Tristimulus values :math:`Y_b` the reference white.\n Q : numeric or array_like\n *Brightness* correlate :math:`Q` of the stimulus.\n Q_w : numeric or array_like\n *Brightness* correlate :math:`Q` of the reference white.\n\n Returns\n -------\n numeric or ndarray\n *Lightness* correlate :math:`J`.\n\n Examples\n --------\n >>> Y_b = 100.0\n >>> Y_w = 100.0\n >>> Q = 22.209765491265024\n >>> Q_w = 40.518065821226081\n >>> lightness_correlate(Y_b, Y_w, Q, Q_w) # doctest: +ELLIPSIS\n 30.0462678...\n \"\"\"\n\n Y_b = as_float_array(Y_b)\n Y_w = as_float_array(Y_w)\n Q = as_float_array(Q)\n Q_w = as_float_array(Q_w)\n\n Z = 1 + spow(Y_b / Y_w, 0.5)\n J = 100 * spow(Q / Q_w, Z)\n\n return J\n\n\ndef chroma_correlate(s, Y_b, Y_w, Q, Q_w):\n \"\"\"\n Returns the *chroma* correlate :math:`C_94`.\n\n Parameters\n ----------\n s : numeric or array_like\n *Saturation* correlate :math:`s`.\n Y_b : numeric or array_like\n Tristimulus values :math:`Y_b` the background.\n Y_w : numeric or array_like\n Tristimulus values :math:`Y_b` the reference white.\n Q : numeric or array_like\n *Brightness* correlate :math:`Q` of the stimulus.\n Q_w : numeric or array_like\n *Brightness* correlate :math:`Q` of the reference white.\n\n Returns\n -------\n numeric or ndarray\n *Chroma* correlate :math:`C_94`.\n\n Examples\n --------\n >>> s = 0.0199093206929\n >>> Y_b = 100.0\n >>> Y_w = 100.0\n >>> Q = 22.209765491265024\n >>> Q_w = 40.518065821226081\n >>> chroma_correlate(s, Y_b, Y_w, Q, Q_w) # doctest: +ELLIPSIS\n 0.1210508...\n \"\"\"\n\n s = as_float_array(s)\n Y_b = as_float_array(Y_b)\n Y_w = as_float_array(Y_w)\n Q = as_float_array(Q)\n Q_w = as_float_array(Q_w)\n\n C_94 = (2.44 * spow(s, 0.69) * (spow(Q / Q_w, Y_b / Y_w)) *\n (1.64 - spow(0.29, Y_b / Y_w)))\n\n return C_94\n\n\ndef colourfulness_correlate(F_L, C_94):\n \"\"\"\n Returns the *colourfulness* correlate :math:`M_94`.\n\n Parameters\n ----------\n F_L : numeric or array_like\n Luminance adaptation factor :math:`F_L`.\n C_94 : numeric\n *Chroma* correlate :math:`C_94`.\n\n Returns\n -------\n numeric\n *Colourfulness* correlate :math:`M_94`.\n\n Examples\n --------\n >>> F_L = 1.16754446414718\n >>> C_94 = 0.121050839936176\n >>> colourfulness_correlate(F_L, C_94) # doctest: +ELLIPSIS\n 0.1238964...\n \"\"\"\n\n F_L = as_float_array(F_L)\n C_94 = as_float_array(C_94)\n\n M_94 = spow(F_L, 0.15) * C_94\n\n return M_94\n", "# -*- coding: utf-8 -*-\n\"\"\"\nRIMM, ROMM and ERIMM Encodings\n==============================\n\nDefines the *RIMM, ROMM and ERIMM* encodings opto-electrical transfer functions\n(OETF / OECF) and electro-optical transfer functions (EOTF / EOCF):\n\n- :func:`colour.models.cctf_encoding_ROMMRGB`\n- :func:`colour.models.cctf_decoding_ROMMRGB`\n- :func:`colour.models.cctf_encoding_ProPhotoRGB`\n- :func:`colour.models.cctf_decoding_ProPhotoRGB`\n- :func:`colour.models.cctf_encoding_RIMMRGB`\n- :func:`colour.models.cctf_decoding_RIMMRGB`\n- :func:`colour.models.log_encoding_ERIMMRGB`\n- :func:`colour.models.log_decoding_ERIMMRGB`\n\nReferences\n----------\n- :cite:`ANSI2003a` : ANSI. (2003). Specification of ROMM RGB (pp. 1-2).\n http://www.color.org/ROMMRGB.pdf\n- :cite:`Spaulding2000b` : Spaulding, K. E., Woolfe, G. J., & Giorgianni, E.\n J. (2000). Reference Input/Output Medium Metric RGB Color Encodings\n (RIMM/ROMM RGB) (pp. 1-8). http://www.photo-lovers.org/pdf/color/romm.pdf\n\"\"\"\n\nimport numpy as np\n\nfrom colour.algebra import spow\nfrom colour.utilities import (as_float, as_int, domain_range_scale,\n from_range_1, to_domain_1)\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'cctf_encoding_ROMMRGB', 'cctf_decoding_ROMMRGB',\n 'cctf_encoding_ProPhotoRGB', 'cctf_decoding_ProPhotoRGB',\n 'cctf_encoding_RIMMRGB', 'cctf_decoding_RIMMRGB', 'log_encoding_ERIMMRGB',\n 'log_decoding_ERIMMRGB'\n]\n\n\ndef cctf_encoding_ROMMRGB(X, bit_depth=8, out_int=False):\n \"\"\"\n Defines the *ROMM RGB* encoding colour component transfer function\n (Encoding CCTF).\n\n Parameters\n ----------\n X : numeric or array_like\n Linear data :math:`X_{ROMM}`.\n bit_depth : int, optional\n Bit depth used for conversion.\n out_int : bool, optional\n Whether to return value as integer code value or float equivalent of a\n code value at a given bit depth.\n\n Returns\n -------\n numeric or ndarray\n Non-linear data :math:`X'_{ROMM}`.\n\n Notes\n -----\n\n +----------------+-----------------------+---------------+\n | **Domain \\\\*** | **Scale - Reference** | **Scale - 1** |\n +================+=======================+===============+\n | ``X`` | [0, 1] | [0, 1] |\n +----------------+-----------------------+---------------+\n\n +----------------+-----------------------+---------------+\n | **Range \\\\*** | **Scale - Reference** | **Scale - 1** |\n +================+=======================+===============+\n | ``X_p`` | [0, 1] | [0, 1] |\n +----------------+-----------------------+---------------+\n\n \\\\* This definition has an output integer switch, thus the domain-range\n scale information is only given for the floating point mode.\n\n References\n ----------\n :cite:`ANSI2003a`, :cite:`Spaulding2000b`\n\n Examples\n --------\n >>> cctf_encoding_ROMMRGB(0.18) # doctest: +ELLIPSIS\n 0.3857114...\n >>> cctf_encoding_ROMMRGB(0.18, out_int=True)\n 98\n \"\"\"\n\n X = to_domain_1(X)\n\n I_max = 2 ** bit_depth - 1\n\n E_t = 16 ** (1.8 / (1 - 1.8))\n\n X_p = np.where(X < E_t, X * 16 * I_max, spow(X, 1 / 1.8) * I_max)\n\n if out_int:\n return as_int(np.round(X_p))\n else:\n return as_float(from_range_1(X_p / I_max))\n\n\ndef cctf_decoding_ROMMRGB(X_p, bit_depth=8, in_int=False):\n \"\"\"\n Defines the *ROMM RGB* decoding colour component transfer function\n (Encoding CCTF).\n\n Parameters\n ----------\n X_p : numeric or array_like\n Non-linear data :math:`X'_{ROMM}`.\n bit_depth : int, optional\n Bit depth used for conversion.\n in_int : bool, optional\n Whether to treat the input value as integer code value or float\n equivalent of a code value at a given bit depth.\n\n Returns\n -------\n numeric or ndarray\n Linear data :math:`X_{ROMM}`.\n\n Notes\n -----\n\n +----------------+-----------------------+---------------+\n | **Domain \\\\*** | **Scale - Reference** | **Scale - 1** |\n +================+=======================+===============+\n | ``X_p`` | [0, 1] | [0, 1] |\n +----------------+-----------------------+---------------+\n\n +----------------+-----------------------+---------------+\n | **Range \\\\*** | **Scale - Reference** | **Scale - 1** |\n +================+=======================+===============+\n | ``X`` | [0, 1] | [0, 1] |\n +----------------+-----------------------+---------------+\n\n \\\\* This definition has an input integer switch, thus the domain-range\n scale information is only given for the floating point mode.\n\n References\n ----------\n :cite:`ANSI2003a`, :cite:`Spaulding2000b`\n\n Examples\n --------\n >>> cctf_decoding_ROMMRGB(0.385711424751138) # doctest: +ELLIPSIS\n 0.1...\n >>> cctf_decoding_ROMMRGB(98, in_int=True) # doctest: +ELLIPSIS\n 0.1...\n \"\"\"\n\n X_p = to_domain_1(X_p)\n\n I_max = 2 ** bit_depth - 1\n\n if not in_int:\n X_p = X_p * I_max\n\n E_t = 16 ** (1.8 / (1 - 1.8))\n\n X = np.where(\n X_p < 16 * E_t * I_max,\n X_p / (16 * I_max),\n spow(X_p / I_max, 1.8),\n )\n\n return as_float(from_range_1(X))\n\n\ncctf_encoding_ProPhotoRGB = cctf_encoding_ROMMRGB\ncctf_encoding_ProPhotoRGB.__doc__ = cctf_encoding_ProPhotoRGB.__doc__.replace(\n '*ROMM RGB*', '*ProPhoto RGB*')\ncctf_decoding_ProPhotoRGB = cctf_decoding_ROMMRGB\ncctf_decoding_ProPhotoRGB.__doc__ = cctf_decoding_ROMMRGB.__doc__.replace(\n '*ROMM RGB*', '*ProPhoto RGB*')\n\n\ndef cctf_encoding_RIMMRGB(X, bit_depth=8, out_int=False, E_clip=2.0):\n \"\"\"\n Defines the *RIMM RGB* encoding colour component transfer function\n (Encoding CCTF).\n\n *RIMM RGB* encoding non-linearity is based on that specified by\n *Recommendation ITU-R BT.709-6*.\n\n Parameters\n ----------\n X : numeric or array_like\n Linear data :math:`X_{RIMM}`.\n bit_depth : int, optional\n Bit depth used for conversion.\n out_int : bool, optional\n Whether to return value as integer code value or float equivalent of a\n code value at a given bit depth.\n E_clip : numeric, optional\n Maximum exposure level.\n\n Returns\n -------\n numeric or ndarray\n Non-linear data :math:`X'_{RIMM}`.\n\n Notes\n -----\n\n +----------------+-----------------------+---------------+\n | **Domain \\\\*** | **Scale - Reference** | **Scale - 1** |\n +================+=======================+===============+\n | ``X`` | [0, 1] | [0, 1] |\n +----------------+-----------------------+---------------+\n\n +----------------+-----------------------+---------------+\n | **Range \\\\*** | **Scale - Reference** | **Scale - 1** |\n +================+=======================+===============+\n | ``X_p`` | [0, 1] | [0, 1] |\n +----------------+-----------------------+---------------+\n\n \\\\* This definition has an output integer switch, thus the domain-range\n scale information is only given for the floating point mode.\n\n References\n ----------\n :cite:`Spaulding2000b`\n\n Examples\n --------\n >>> cctf_encoding_RIMMRGB(0.18) # doctest: +ELLIPSIS\n 0.2916737...\n >>> cctf_encoding_RIMMRGB(0.18, out_int=True)\n 74\n \"\"\"\n\n X = to_domain_1(X)\n\n I_max = 2 ** bit_depth - 1\n\n V_clip = 1.099 * spow(E_clip, 0.45) - 0.099\n q = I_max / V_clip\n\n X_p = q * np.select([X < 0.0, X < 0.018, X >= 0.018, X > E_clip],\n [0, 4.5 * X, 1.099 * spow(X, 0.45) - 0.099, I_max])\n\n if out_int:\n return as_int(np.round(X_p))\n else:\n return as_float(from_range_1(X_p / I_max))\n\n\ndef cctf_decoding_RIMMRGB(X_p, bit_depth=8, in_int=False, E_clip=2.0):\n \"\"\"\n Defines the *RIMM RGB* decoding colour component transfer function\n (Encoding CCTF).\n\n Parameters\n ----------\n X_p : numeric or array_like\n Non-linear data :math:`X'_{RIMM}`.\n bit_depth : int, optional\n Bit depth used for conversion.\n in_int : bool, optional\n Whether to treat the input value as integer code value or float\n equivalent of a code value at a given bit depth.\n E_clip : numeric, optional\n Maximum exposure level.\n\n Returns\n -------\n numeric or ndarray\n Linear data :math:`X_{RIMM}`.\n\n Notes\n -----\n\n +----------------+-----------------------+---------------+\n | **Domain \\\\*** | **Scale - Reference** | **Scale - 1** |\n +================+=======================+===============+\n | ``X_p`` | [0, 1] | [0, 1] |\n +----------------+-----------------------+---------------+\n\n +----------------+-----------------------+---------------+\n | **Range \\\\*** | **Scale - Reference** | **Scale - 1** |\n +================+=======================+===============+\n | ``X`` | [0, 1] | [0, 1] |\n +----------------+-----------------------+---------------+\n\n \\\\* This definition has an input integer switch, thus the domain-range\n scale information is only given for the floating point mode.\n\n References\n ----------\n :cite:`Spaulding2000b`\n\n Examples\n --------\n >>> cctf_decoding_RIMMRGB(0.291673732475746) # doctest: +ELLIPSIS\n 0.1...\n >>> cctf_decoding_RIMMRGB(74, in_int=True) # doctest: +ELLIPSIS\n 0.1...\n \"\"\"\n\n X_p = to_domain_1(X_p)\n\n I_max = 2 ** bit_depth - 1\n\n if not in_int:\n X_p = X_p * I_max\n\n V_clip = 1.099 * spow(E_clip, 0.45) - 0.099\n\n m = V_clip * X_p / I_max\n\n with domain_range_scale('ignore'):\n X = np.where(\n X_p / I_max < cctf_encoding_RIMMRGB(\n 0.018, bit_depth, E_clip=E_clip),\n m / 4.5,\n spow((m + 0.099) / 1.099, 1 / 0.45),\n )\n\n return as_float(from_range_1(X))\n\n\ndef log_encoding_ERIMMRGB(X,\n bit_depth=8,\n out_int=False,\n E_min=0.001,\n E_clip=316.2):\n \"\"\"\n Defines the *ERIMM RGB* log encoding curve / opto-electronic transfer\n function (OETF / OECF).\n\n Parameters\n ----------\n X : numeric or array_like\n Linear data :math:`X_{ERIMM}`.\n bit_depth : int, optional\n Bit depth used for conversion.\n out_int : bool, optional\n Whether to return value as integer code value or float equivalent of a\n code value at a given bit depth.\n E_min : numeric, optional\n Minimum exposure limit.\n E_clip : numeric, optional\n Maximum exposure limit.\n\n Returns\n -------\n numeric or ndarray\n Non-linear data :math:`X'_{ERIMM}`.\n\n Notes\n -----\n\n +----------------+-----------------------+---------------+\n | **Domain \\\\*** | **Scale - Reference** | **Scale - 1** |\n +================+=======================+===============+\n | ``X`` | [0, 1] | [0, 1] |\n +----------------+-----------------------+---------------+\n\n +----------------+-----------------------+---------------+\n | **Range \\\\*** | **Scale - Reference** | **Scale - 1** |\n +================+=======================+===============+\n | ``X_p`` | [0, 1] | [0, 1] |\n +----------------+-----------------------+---------------+\n\n \\\\* This definition has an output integer switch, thus the domain-range\n scale information is only given for the floating point mode.\n\n References\n ----------\n :cite:`Spaulding2000b`\n\n Examples\n --------\n >>> log_encoding_ERIMMRGB(0.18) # doctest: +ELLIPSIS\n 0.4100523...\n >>> log_encoding_ERIMMRGB(0.18, out_int=True)\n 105\n \"\"\"\n\n X = to_domain_1(X)\n\n I_max = 2 ** bit_depth - 1\n\n E_t = np.exp(1) * E_min\n\n X_p = np.select([\n X < 0.0,\n X <= E_t,\n X > E_t,\n X > E_clip,\n ], [\n 0,\n I_max * ((np.log(E_t) - np.log(E_min)) /\n (np.log(E_clip) - np.log(E_min))) * (X / E_t),\n I_max * (\n (np.log(X) - np.log(E_min)) / (np.log(E_clip) - np.log(E_min))),\n I_max,\n ])\n\n if out_int:\n return as_int(np.round(X_p))\n else:\n return as_float(from_range_1(X_p / I_max))\n\n\ndef log_decoding_ERIMMRGB(X_p,\n bit_depth=8,\n in_int=False,\n E_min=0.001,\n E_clip=316.2):\n \"\"\"\n Defines the *ERIMM RGB* log decoding curve / electro-optical transfer\n function (EOTF / EOCF).\n\n Parameters\n ----------\n X_p : numeric or array_like\n Non-linear data :math:`X'_{ERIMM}`.\n bit_depth : int, optional\n Bit depth used for conversion.\n in_int : bool, optional\n Whether to treat the input value as integer code value or float\n equivalent of a code value at a given bit depth.\n E_min : numeric, optional\n Minimum exposure limit.\n E_clip : numeric, optional\n Maximum exposure limit.\n\n Returns\n -------\n numeric or ndarray\n Linear data :math:`X_{ERIMM}`.\n\n Notes\n -----\n\n +----------------+-----------------------+---------------+\n | **Domain \\\\*** | **Scale - Reference** | **Scale - 1** |\n +================+=======================+===============+\n | ``X_p`` | [0, 1] | [0, 1] |\n +----------------+-----------------------+---------------+\n\n +----------------+-----------------------+---------------+\n | **Range \\\\*** | **Scale - Reference** | **Scale - 1** |\n +================+=======================+===============+\n | ``X`` | [0, 1] | [0, 1] |\n +----------------+-----------------------+---------------+\n\n \\\\* This definition has an input integer switch, thus the domain-range\n scale information is only given for the floating point mode.\n\n References\n ----------\n :cite:`Spaulding2000b`\n\n Examples\n --------\n >>> log_decoding_ERIMMRGB(0.410052389492129) # doctest: +ELLIPSIS\n 0.1...\n >>> log_decoding_ERIMMRGB(105, in_int=True) # doctest: +ELLIPSIS\n 0.1...\n \"\"\"\n\n X_p = to_domain_1(X_p)\n\n I_max = 2 ** bit_depth - 1\n\n if not in_int:\n X_p = X_p * I_max\n\n E_t = np.exp(1) * E_min\n\n X = np.where(\n X_p <= I_max * (\n (np.log(E_t) - np.log(E_min)) / (np.log(E_clip) - np.log(E_min))),\n ((np.log(E_clip) - np.log(E_min)) / (np.log(E_t) - np.log(E_min))) * (\n (X_p * E_t) / I_max),\n np.exp((X_p / I_max) * (np.log(E_clip) - np.log(E_min)) +\n np.log(E_min)),\n )\n\n return as_float(from_range_1(X))\n", "# -*- coding: utf-8 -*-\n\"\"\"\nDefines the unit tests for the :mod:`colour.temperature.krystek1985` module.\n\"\"\"\n\nimport numpy as np\nimport unittest\nfrom itertools import permutations\n\nfrom colour.temperature import uv_to_CCT_Krystek1985, CCT_to_uv_Krystek1985\nfrom colour.utilities import ignore_numpy_errors\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['TestUv_to_CCT_Krystek1985']\n\n\nclass TestUv_to_CCT_Krystek1985(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.temperature.krystek1985.uv_to_CCT_Krystek1985`\n definition unit tests methods.\n \"\"\"\n\n def test_uv_to_CCT_Krystek1985(self):\n \"\"\"\n Tests :func:`colour.temperature.krystek1985.uv_to_CCT_Krystek1985`\n definition.\n \"\"\"\n\n np.testing.assert_allclose(\n uv_to_CCT_Krystek1985(\n np.array([0.448087794140145, 0.354731965027727]),\n {'method': 'Nelder-Mead'}),\n 1000,\n rtol=0.0000001,\n atol=0.0000001)\n\n np.testing.assert_allclose(\n uv_to_CCT_Krystek1985(\n np.array([0.198152565091092, 0.307023596915037]),\n {'method': 'Nelder-Mead'}),\n 7000,\n rtol=0.0000001,\n atol=0.0000001)\n\n np.testing.assert_allclose(\n uv_to_CCT_Krystek1985(\n np.array([0.185675876767054, 0.282233658593898]),\n {'method': 'Nelder-Mead'}),\n 15000,\n rtol=0.0000001,\n atol=0.0000001)\n\n def test_n_dimensional_uv_to_CCT_Krystek1985(self):\n \"\"\"\n Tests :func:`colour.temperature.krystek1985.uv_to_CCT_Krystek1985`\n definition n-dimensional arrays support.\n \"\"\"\n\n uv = np.array([0.198152565091092, 0.307023596915037])\n CCT = uv_to_CCT_Krystek1985(uv)\n\n uv = np.tile(uv, (6, 1))\n CCT = np.tile(CCT, 6)\n np.testing.assert_almost_equal(\n uv_to_CCT_Krystek1985(uv), CCT, decimal=7)\n\n uv = np.reshape(uv, (2, 3, 2))\n CCT = np.reshape(CCT, (2, 3))\n np.testing.assert_almost_equal(\n uv_to_CCT_Krystek1985(uv), CCT, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_uv_to_CCT_Krystek1985(self):\n \"\"\"\n Tests :func:`colour.temperature.krystek1985.uv_to_CCT_Krystek1985`\n definition nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=2))\n for case in cases:\n uv_to_CCT_Krystek1985(case)\n\n\nclass TestCCT_to_uv_Krystek1985(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.temperature.krystek1985.CCT_to_uv_Krystek1985`\n definition unit tests methods.\n \"\"\"\n\n def test_CCT_to_uv_Krystek1985(self):\n \"\"\"\n Tests :func:`colour.temperature.krystek1985.CCT_to_uv_Krystek1985`\n definition.\n \"\"\"\n\n np.testing.assert_almost_equal(\n CCT_to_uv_Krystek1985(1000),\n np.array([0.448087794140145, 0.354731965027727]),\n decimal=7)\n\n np.testing.assert_almost_equal(\n CCT_to_uv_Krystek1985(7000),\n np.array([0.198152565091092, 0.307023596915037]),\n decimal=7)\n\n np.testing.assert_almost_equal(\n CCT_to_uv_Krystek1985(15000),\n np.array([0.185675876767054, 0.282233658593898]),\n decimal=7)\n\n def test_n_dimensional_CCT_to_uv_Krystek1985(self):\n \"\"\"\n Tests :func:`colour.temperature.krystek1985.CCT_to_uv_Krystek1985`\n definition n-dimensional arrays support.\n \"\"\"\n\n CCT = 7000\n uv = CCT_to_uv_Krystek1985(CCT)\n\n CCT = np.tile(CCT, 6)\n uv = np.tile(uv, (6, 1))\n np.testing.assert_almost_equal(\n CCT_to_uv_Krystek1985(CCT), uv, decimal=7)\n\n CCT = np.reshape(CCT, (2, 3))\n uv = np.reshape(uv, (2, 3, 2))\n np.testing.assert_almost_equal(\n CCT_to_uv_Krystek1985(CCT), uv, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_CCT_to_uv_Krystek1985(self):\n \"\"\"\n Tests :func:`colour.temperature.krystek1985.CCT_to_uv_Krystek1985`\n definition nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=1))\n for case in cases:\n CCT_to_uv_Krystek1985(case)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.arange", "numpy.linalg.norm", "numpy.cos", "numpy.sin", "numpy.mean", "numpy.floor", "numpy.sum", "numpy.empty" ], [ "numpy.reshape", "numpy.array", "numpy.tile" ], [ "numpy.array" ], [ "numpy.log", "numpy.exp", "numpy.sqrt" ], [ "numpy.sqrt", "numpy.linalg.inv", "numpy.arctan2", "numpy.interp", "numpy.array", "numpy.where", "numpy.sum" ], [ "numpy.round", "numpy.log", "numpy.exp" ], [ "numpy.reshape", "numpy.array", "numpy.tile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jjohnson-arm/incubator-tvm
[ "2b6d69c62c07acc102c6ca42ee5c4edcc3de41f1" ]
[ "python/tvm/relay/frontend/tensorflow.py" ]
[ "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition, broad-except\n# pylint: disable=import-outside-toplevel\n\"\"\"TF: Tensorflow frontend.\"\"\"\nimport warnings\nfrom collections import defaultdict\n\n# Numpy support\nimport numpy as np\nimport tvm\n\nfrom tvm.ir import IRModule\nfrom tvm.relay.prelude import Prelude\nfrom tvm.ir import structural_hash as s_hash\n\nfrom .. import analysis\nfrom .. import expr as _expr\nfrom .. import function as _function\nfrom .. import op as _op\nfrom ..expr_functor import ExprMutator, ExprVisitor\nfrom .common import AttrCvt, get_relay_op\nfrom .common import infer_type as _infer_type\nfrom .common import infer_shape as _infer_shape\nfrom .common import infer_channels as _infer_channels\nfrom .common import infer_value as _infer_value\nfrom .common import infer_value_simulated as _infer_value_simulated\n\n__all__ = ['from_tensorflow']\n\n\ndef _get_pad_pair(input1d, kernel1d, stride1d):\n if input1d % stride1d == 0:\n pad = max(kernel1d - stride1d, 0)\n else:\n pad = max(kernel1d - (input1d % stride1d), 0)\n\n pad_before = pad // 2\n pad_after = pad - pad_before\n\n return [pad_before, pad_after]\n\ndef _math_name_picker(surfix):\n def _impl(attr):\n return 'broadcast_' + surfix\n return _impl\n\ndef _dimension_picker(prefix, surfix=''):\n def _impl(attr):\n kernel = attr['kernel_shape']\n if len(kernel) == 2:\n return prefix + '2d' + surfix\n if len(kernel) == 3:\n return prefix + '3d' + surfix\n raise tvm.error.OpAttributeInvalid(\n 'Only 2D or 3D kernels are supported for operator {}'.format(prefix + '2d or 3d'))\n return _impl\n\ndef _dimension_constraint():\n def _dim_check(attrs):\n if len(attrs['kernel_shape']) in (2, 3):\n return True\n return False\n return _dim_check, \"Only 2d or 3d kernel supported.\"\n\ndef _get_param(params, input_node):\n if isinstance(input_node, _expr.Constant):\n return np.atleast_1d(input_node.data.asnumpy())\n return params[input_node.name_hint].asnumpy()\n\ndef _get_num_param(params, input_node):\n return _get_param(params, input_node).item()\n\ndef _get_list_param(params, input_node):\n return _get_param(params, input_node).tolist()\n\ndef _get_tuple_param(params, input_node):\n return tuple(_get_param(params, input_node))\n\ndef _need_prelude_for_shape_inference(op):\n return \"TensorArray\" in op\n\ndef _rsqrt():\n def _impl(inputs, attr, params, mod):\n inputs.append(tvm.relay.const(-0.5, attr['T'].name))\n return AttrCvt(op_name=\"power\")(inputs, attr)\n return _impl\n\ndef _argx(func, func_name):\n \"\"\" A common wrapper for argmin and argmax operations \"\"\"\n def _impl(inputs, attr, params, mod):\n try:\n # In Tensorflow, `axis` argument is a Tensor, not attribute. We\n # support the case where it inputs from a scalar constant.\n axis_input_value = [_get_num_param(params, inputs[1])]\n except (IndexError, KeyError):\n raise TypeError(\n \"Unsupported argument for `{}` : `axis` should be a constant\".format(func_name))\n return func(inputs[0], axis=axis_input_value, keepdims=False)\n return _impl\n\ndef _elemwise(name):\n def _impl(inputs, attr, params, mod):\n assert len(inputs) == 2, \"{} take 2 inputs, {} given\".format(name, len(inputs))\n return get_relay_op(name)(*inputs)\n return _impl\n\ndef _pool3d(name):\n def _impl(inputs, attr, params, mod):\n attr['data_format'] = attr['data_format'].decode(\"utf-8\")\n flip_layout = False\n\n input_shape = _infer_shape(inputs[0], mod)\n\n if attr['data_format'] == 'NDHWC':\n attr['kernel_shape'] = (attr['ksize'][1], attr['ksize'][2], attr['ksize'][3])\n attr['strides'] = (attr['strides'][1], attr['strides'][2], attr['strides'][3])\n elif attr['data_format'] == 'NCDHW':\n attr['kernel_shape'] = (attr['ksize'][2], attr['ksize'][3], attr['ksize'][4])\n attr['strides'] = (attr['strides'][2], attr['strides'][3], attr['strides'][4])\n else:\n msg = 'Value {} of attribute \"data_format\" of operator Pooling ' \\\n 'is not valid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr['data_format']))\n if attr['data_format'] == \"NDHWC\":\n input_shape = [_infer_shape(inputs[0], mod)[i] for i in (0, 4, 1, 2, 3)]\n inputs[0] = _op.transpose(inputs[0], axes=(0, 4, 1, 2, 3))\n attr['data_format'] = \"NCDHW\"\n flip_layout = True\n\n attr['padding'] = attr['padding'].decode(\"utf-8\")\n\n if attr['padding'] == 'VALID':\n attr['padding'] = [0, 0, 0, 0, 0, 0]\n elif attr['padding'] == 'SAME':\n stride_d, stride_h, stride_w = attr['strides']\n kernel_d, kernel_h, kernel_w = attr['kernel_shape']\n if attr['data_format'] == 'NDHWC':\n in_d = input_shape[1]\n in_h = input_shape[2]\n in_w = input_shape[3]\n else:\n in_d = input_shape[2]\n in_h = input_shape[3]\n in_w = input_shape[4]\n pad_d = _get_pad_pair(in_d, kernel_d, stride_d)\n pad_v = _get_pad_pair(in_h, kernel_h, stride_h)\n pad_h = _get_pad_pair(in_w, kernel_w, stride_w)\n\n attr['padding'] = [pad_d[0], pad_v[0], pad_h[0], pad_d[1], pad_v[1], pad_h[1]]\n else:\n msg = 'Value {} in attribute \"padding\" of operator Pooling is ' \\\n 'not valid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr['padding']))\n\n if name == \"avg_pool\":\n attr['count_include_pad'] = False\n attr['ceil_mode'] = False\n out = AttrCvt(\n op_name=name,\n transforms={\n 'kernel_shape': 'pool_size',\n 'data_format': 'layout'},\n ignores=['ksize'])(inputs, attr)\n if flip_layout:\n out = _op.transpose(out, axes=(0, 2, 3, 4, 1))\n return out\n\n return _impl\n\ndef _pooling(name):\n def _impl(inputs, attr, params, mod):\n\n attr['data_format'] = attr['data_format'].decode(\"utf-8\")\n flip_layout = False\n\n input_shape = _infer_shape(inputs[0], mod)\n\n if attr['data_format'] == 'NHWC':\n attr['kernel_shape'] = (attr['ksize'][1], attr['ksize'][2])\n attr['strides'] = (attr['strides'][1], attr['strides'][2])\n elif attr['data_format'] == 'NCHW':\n attr['kernel_shape'] = (attr['ksize'][2], attr['ksize'][3])\n attr['strides'] = (attr['strides'][2], attr['strides'][3])\n else:\n msg = 'Value {} of attribute \"data_format\" of operator Pooling ' \\\n 'is not valid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr['data_format']))\n\n if attr['_target_layout'] == \"NCHW\" and attr['data_format'] == \"NHWC\":\n tmp_shape = _infer_shape(inputs[0], mod)\n input_shape = [tmp_shape[ii] for ii in (0, 3, 1, 2)]\n inputs[0] = _op.transpose(inputs[0], axes=(0, 3, 1, 2))\n attr['data_format'] = \"NCHW\"\n flip_layout = True\n\n # Fix padding\n attr['padding'] = attr['padding'].decode(\"utf-8\")\n\n if attr['padding'] == 'VALID':\n attr['padding'] = [0, 0]\n elif attr['padding'] == 'SAME':\n stride_h, stride_w = attr['strides']\n kernel_h, kernel_w = attr['kernel_shape']\n if attr['data_format'] == 'NHWC':\n in_h = input_shape[1]\n in_w = input_shape[2]\n else:\n in_h = input_shape[2]\n in_w = input_shape[3]\n\n pad_v = _get_pad_pair(in_h, kernel_h, stride_h)\n pad_h = _get_pad_pair(in_w, kernel_w, stride_w)\n\n attr['padding'] = [pad_v[0], pad_h[0], pad_v[1], pad_h[1]]\n else:\n msg = 'Value {} in attribute \"padding\" of operator Pooling is ' \\\n 'not valid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr['padding']))\n\n if name == \"avg_pool\":\n attr['count_include_pad'] = False\n\n out = AttrCvt(\n op_name=_dimension_picker(name),\n transforms={\n 'kernel_shape':'pool_size',\n 'data_format':'layout'},\n ignores=['ksize'],\n extras={'ceil_mode': False},\n custom_check=_dimension_constraint())(inputs, attr)\n\n if flip_layout:\n out = _op.transpose(out, axes=(0, 2, 3, 1))\n\n return out\n return _impl\n\ndef _conv(opname):\n def _impl(inputs, attr, params, mod):\n attr['data_format'] = attr['data_format'].decode(\"utf-8\")\n flip_layout = False\n\n if opname == 'conv_transpose' and attr['data_format'] == 'NHWC':\n # transform to NCHW for TVM backend compatible and set 'flip_layout'\n # to have output flip back to NHWC\n tmp_shape = _infer_shape(inputs[2], mod)\n tmp_shape = [tmp_shape[ii] for ii in (0, 3, 1, 2)]\n inputs[2] = _op.transpose(inputs[2], axes=(0, 3, 1, 2))\n attr['strides'][1], attr['strides'][2], attr['strides'][3] = \\\n attr['strides'][3], attr['strides'][1], attr['strides'][2]\n attr['data_format'] = 'NCHW'\n\n if opname == 'conv_transpose' and len(attr['_output_shapes']) > 0:\n tmp_shape = attr['_output_shapes'][0]\n tmp_shape = [tmp_shape[ii] for ii in (0, 3, 1, 2)]\n attr['_output_shapes'][0] = tmp_shape\n\n flip_layout = True\n\n inputs_data = inputs[0] if opname != 'conv_transpose' else inputs[2]\n\n # NCHW Layout require weights transpose\n weights_shape = _infer_shape(inputs[1])\n if attr['data_format'] == 'NCHW':\n tmp_shape = weights_shape\n if opname in ['conv', 'conv_transpose']:\n tmp_shape = [tmp_shape[ii] for ii in (3, 2, 0, 1)]\n inputs[1] = _op.transpose(inputs[1], axes=(3, 2, 0, 1))\n else:\n tmp_shape = [tmp_shape[ii] for ii in (2, 3, 0, 1)]\n inputs[1] = _op.transpose(inputs[1], axes=(2, 3, 0, 1))\n weights_shape = tmp_shape\n\n\n input_shape = _infer_shape(inputs_data)\n if attr['_target_layout'] == \"NCHW\" and attr['data_format'] == \"NHWC\":\n input_shape = [input_shape[ii] for ii in (0, 3, 1, 2)]\n inputs_data = _op.transpose(inputs_data, axes=(0, 3, 1, 2))\n if opname in ['conv', 'conv_transpose']:\n weights_shape = [weights_shape[ii] for ii in (3, 2, 0, 1)]\n inputs[1] = _op.transpose(inputs[1], axes=(3, 2, 0, 1))\n else:\n weights_shape = [weights_shape[ii] for ii in (2, 3, 0, 1)]\n inputs[1] = _op.transpose(inputs[1], axes=(2, 3, 0, 1))\n\n attr['data_format'] = \"NCHW\"\n attr['strides'] = [attr['strides'][ii] for ii in (0, 3, 1, 2)]\n flip_layout = True\n\n if attr['data_format'] == 'NHWC':\n in_channels = input_shape[3]\n kernel_h, kernel_w, _, depth_mult = weights_shape\n attr['kernel_shape'] = (weights_shape[0], weights_shape[1])\n if opname == 'conv':\n attr['channels'] = weights_shape[3]\n elif opname == 'conv_transpose':\n attr['channels'] = weights_shape[2]\n else:\n attr['channels'] = input_shape[3] * depth_mult\n\n if 'dilations' in attr:\n attr['dilations'] = (attr['dilations'][1], attr['dilations'][2])\n attr['strides'] = (attr['strides'][1], attr['strides'][2])\n elif attr['data_format'] == 'NCHW':\n in_channels = input_shape[1]\n _, depth_mult, kernel_h, kernel_w = weights_shape\n attr['kernel_shape'] = (weights_shape[2], weights_shape[3])\n if opname == 'conv':\n attr['channels'] = weights_shape[0]\n elif opname == 'conv_transpose':\n attr['channels'] = weights_shape[1]\n else:\n attr['channels'] = input_shape[1] * depth_mult\n if attr['channels'] < 0:\n attr['channels'] *= -1\n\n if 'dilations' in attr:\n attr['dilations'] = (attr['dilations'][2], attr['dilations'][3])\n attr['strides'] = (attr['strides'][2], attr['strides'][3])\n else:\n msg = 'Value {} in attribute \"data_format\" of operator Conv is ' \\\n 'not valid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr['data_format']))\n\n if opname == 'depthwise':\n attr['groups'] = in_channels\n\n # Fix padding\n attr['padding'] = attr['padding'].decode(\"utf-8\")\n\n if attr['padding'] == 'VALID':\n attr['padding'] = [0, 0]\n elif attr['padding'] == 'SAME':\n stride_h, stride_w = attr['strides']\n kernel_h, kernel_w = attr['kernel_shape']\n\n pdata_shape = input_shape\n if opname == 'conv_transpose' and len(attr['_output_shapes']) > 0:\n pdata_shape = attr['_output_shapes'][0]\n\n if attr['data_format'] == 'NHWC':\n in_h = pdata_shape[1]\n in_w = pdata_shape[2]\n else:\n in_h = pdata_shape[2]\n in_w = pdata_shape[3]\n\n dilation_h = attr['dilations'][0]\n dilation_w = attr['dilations'][1]\n dilated_kernel_h = (kernel_h - 1) * dilation_h + 1\n dilated_kernel_w = (kernel_w - 1) * dilation_w + 1\n pad_v = _get_pad_pair(in_h, dilated_kernel_h, stride_h)\n pad_h = _get_pad_pair(in_w, dilated_kernel_w, stride_w)\n\n attr['padding'] = [pad_v[0], pad_h[0], pad_v[1], pad_h[1]]\n else:\n msg = 'Value {} in attribute \"padding\" of operator Conv is not ' \\\n 'valid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr['padding']))\n\n if 'kernel_layout' not in attr:\n if opname in ['conv', 'conv_transpose']:\n attr['kernel_layout'] = 'HWIO' if attr['data_format'] == 'NHWC' else 'OIHW'\n else:\n attr['kernel_layout'] = 'HWOI' if attr['data_format'] == 'NHWC' else 'OIHW'\n\n use_bias = len(inputs) == (3 if opname != 'conv_transpose' else 4)\n channel_axis = 1 if attr['data_format'] == \"NCHW\" else 3\n\n # Ignore the new attributes from TF2.0, for now.\n out = AttrCvt(\n op_name=_dimension_picker('conv',\n surfix=\"_transpose\" if opname == 'conv_transpose' else \"\"),\n ignores=['explicit_paddings'],\n transforms={\n 'kernel_shape': 'kernel_size',\n 'data_format': 'data_layout',\n 'dilations': ('dilation', (0, 0)),\n 'group': ('groups', 1)},\n custom_check=_dimension_constraint())([inputs_data, inputs[1]], attr)\n\n if use_bias:\n out = _op.nn.bias_add(out,\n inputs[2] if opname != 'conv_transpose' else inputs[3],\n axis=channel_axis)\n\n if flip_layout:\n out = _op.transpose(out, axes=(0, 2, 3, 1))\n\n return out\n return _impl\n\n\n# Dilation2d\ndef _dilation2d():\n def _impl(inputs, attr, params, mod):\n if 'data_format' not in attr:\n attr['data_format'] = 'NHWC'\n\n input_shape = _infer_shape(inputs[0], mod)\n weights_shape = _infer_shape(inputs[1], mod)\n\n if attr['_target_layout'] == \"NCHW\" and attr['data_format'] == \"NHWC\":\n input_shape = [input_shape[ii] for ii in (0, 3, 1, 2)]\n inputs[0] = _op.transpose(inputs[0], axes=(0, 3, 1, 2))\n weights_shape = [weights_shape[ii] for ii in (2, 0, 1)]\n inputs[1] = _op.transpose(inputs[1], axes=(2, 0, 1))\n attr['data_format'] = \"NCHW\"\n\n if attr['data_format'] in ['NHWC', 'NCHW']:\n if 'rates' in attr:\n attr['dilations'] = attr['rates']\n if 'dilations' in attr:\n attr['dilations'] = (attr['dilations'][1], attr['dilations'][2])\n attr['strides'] = (attr['strides'][1], attr['strides'][2])\n else:\n msg = 'Value {} in attribute \"data_format\" of operator Dilation2D is ' \\\n 'not valid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr['data_format']))\n\n attr['padding'] = attr['padding'].decode(\"utf-8\")\n if attr['padding'] == 'VALID':\n attr['padding'] = [0, 0]\n elif attr['padding'] == 'SAME':\n stride_h, stride_w = attr['strides']\n if attr['data_format'] == 'NHWC':\n kernel_h, kernel_w = weights_shape[0], weights_shape[1]\n else:\n kernel_h, kernel_w = weights_shape[1], weights_shape[2]\n if attr['data_format'] == 'NHWC':\n in_h = input_shape[1]\n in_w = input_shape[2]\n else:\n in_h = input_shape[2]\n in_w = input_shape[3]\n\n dilation_h = attr['dilations'][0]\n dilation_w = attr['dilations'][1]\n dilated_kernel_h = (kernel_h - 1) * dilation_h + 1\n dilated_kernel_w = (kernel_w - 1) * dilation_w + 1\n pad_v = _get_pad_pair(in_h, dilated_kernel_h, stride_h)\n pad_h = _get_pad_pair(in_w, dilated_kernel_w, stride_w)\n\n if attr['data_format'] == 'NHWC':\n inputs[0] = _op.nn.pad(data=inputs[0],\n pad_width=((0, 0),\n (pad_v[0], pad_v[1]),\n (pad_h[0], pad_h[1]),\n (0, 0)))\n else:\n inputs[0] = _op.nn.pad(data=inputs[0],\n pad_width=((0, 0),\n (0, 0),\n (pad_v[0], pad_v[1]),\n (pad_h[0], pad_h[1])))\n\n attr['padding'] = [0, 0]\n\n else:\n msg = 'Value {} in attribute \"padding\" of operator Dilation2d is not ' \\\n 'valid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr['padding']))\n\n attr['kernel_layout'] = 'HWI' if attr['data_format'] == 'NHWC' else 'IHW'\n out = AttrCvt(\n op_name='dilation2d',\n ignores=['explicit_paddings', 'rates'],\n transforms={\n 'data_format': 'data_layout',\n })([inputs[0], inputs[1]], attr)\n if attr['_target_layout'] == \"NCHW\":\n out = _op.transpose(out, axes=(0, 2, 3, 1))\n return out\n\n return _impl\n\n\ndef _conv3d(opname):\n def _impl(inputs, attr, params, mod):\n attr['data_format'] = attr['data_format'].decode(\"utf-8\")\n flip_layout = False\n\n inputs_data = inputs[0] if opname != 'conv_transpose' else inputs[2]\n\n # NCDHW Layout require weights transpose\n weights_shape = _infer_shape(inputs[1], mod)\n if attr['data_format'] == 'NCDHW':\n tmp_shape = weights_shape\n tmp_shape = [tmp_shape[ii] for ii in (4, 3, 0, 1, 2)]\n inputs[1] = _op.transpose(inputs[1], axes=(4, 3, 0, 1, 2))\n weights_shape = tmp_shape\n\n input_shape = _infer_shape(inputs_data, mod)\n\n if attr['_target_layout'] == \"NCDHW\" and attr['data_format'] == \"NDHWC\":\n input_shape = [input_shape[ii] for ii in (0, 4, 1, 2, 3)]\n inputs_data = _op.transpose(inputs_data, axes=(0, 4, 1, 2, 3))\n weights_shape = [weights_shape[ii] for ii in (4, 3, 0, 1, 2)]\n inputs[1] = _op.transpose(inputs[1], axes=(4, 3, 0, 1, 2))\n\n attr['data_format'] = \"NCDHW\"\n attr['strides'] = [attr['strides'][ii] for ii in (0, 4, 1, 2, 3)]\n flip_layout = True\n\n if attr['data_format'] == 'NDHWC':\n kernel_d, kernel_h, kernel_w, _, _ = weights_shape\n attr['kernel_shape'] = (kernel_d, kernel_h, kernel_w)\n if opname == 'conv':\n attr['channels'] = weights_shape[4]\n elif opname == 'conv_transpose':\n attr['channels'] = weights_shape[3]\n\n if 'dilations' in attr:\n attr['dilations'] = \\\n (attr['dilations'][1], attr['dilations'][2], attr['dilations'][3])\n attr['strides'] = (attr['strides'][1], attr['strides'][2], attr['strides'][3])\n elif attr['data_format'] == 'NCDHW':\n _, _, kernel_d, kernel_h, kernel_w = weights_shape\n attr['kernel_shape'] = (kernel_d, kernel_h, kernel_w)\n if opname == 'conv':\n attr['channels'] = weights_shape[0]\n elif opname == 'conv_transpose':\n attr['channels'] = weights_shape[1]\n\n if 'dilations' in attr:\n attr['dilations'] = \\\n (attr['dilations'][2], attr['dilations'][3], attr['dilations'][4])\n attr['strides'] = (attr['strides'][2], attr['strides'][3], attr['strides'][4])\n else:\n msg = 'Value {} in attribute \"data_format\" of operator Conv is ' \\\n 'not valid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr['data_format']))\n\n # Fix padding\n attr['padding'] = attr['padding'].decode(\"utf-8\")\n\n if attr['padding'] == 'VALID':\n attr['padding'] = [0, 0, 0]\n elif attr['padding'] == 'SAME':\n stride_d, stride_h, stride_w = attr['strides']\n kernel_d, kernel_h, kernel_w = attr['kernel_shape']\n\n pdata_shape = input_shape\n if opname == 'conv_transpose' and len(attr['_output_shapes']) > 0:\n pdata_shape = attr['_output_shapes'][0]\n\n if attr['data_format'] == 'NDHWC':\n in_d = pdata_shape[1]\n in_h = pdata_shape[2]\n in_w = pdata_shape[3]\n else:\n in_d = pdata_shape[2]\n in_h = pdata_shape[3]\n in_w = pdata_shape[4]\n\n dilation_d = attr['dilations'][0]\n dilation_h = attr['dilations'][1]\n dilation_w = attr['dilations'][2]\n dilated_kernel_d = (kernel_d - 1) * dilation_d + 1\n dilated_kernel_h = (kernel_h - 1) * dilation_h + 1\n dilated_kernel_w = (kernel_w - 1) * dilation_w + 1\n pad_d = _get_pad_pair(in_d, dilated_kernel_d, stride_d)\n pad_v = _get_pad_pair(in_h, dilated_kernel_h, stride_h)\n pad_h = _get_pad_pair(in_w, dilated_kernel_w, stride_w)\n\n attr['padding'] = [pad_d[0], pad_v[0], pad_h[0], pad_d[1], pad_v[1], pad_h[1]]\n\n else:\n msg = 'Value {} in attribute \"padding\" of operator Conv is not ' \\\n 'valid.'\n raise tvm.error.OpAttributeInvalid(msg.format(attr['padding']))\n\n if 'kernel_layout' not in attr:\n attr['kernel_layout'] = 'DHWIO' if attr['data_format'] == 'NDHWC' else 'OIDHW'\n\n use_bias = len(inputs) == (3 if opname != 'conv_transpose' else 4)\n channel_axis = 1 if attr['data_format'] == \"NCDHW\" else 4\n\n # Ignore the new attributes from TF2.0, for now.\n out = AttrCvt(\n op_name=_dimension_picker('conv',\n surfix=\"_transpose\" if opname == 'conv_transpose' else \"\"),\n ignores=['explicit_paddings'],\n transforms={\n 'kernel_shape': 'kernel_size',\n 'data_format': 'data_layout',\n 'dilations': ('dilation', (0, 0)),\n 'group': ('groups', 1)},\n custom_check=_dimension_constraint())([inputs_data, inputs[1]], attr)\n\n if use_bias:\n out = _op.nn.bias_add(out,\n inputs[2] if opname != 'conv_transpose' else inputs[3],\n axis=channel_axis)\n\n if flip_layout:\n out = _op.transpose(out, axes=(0, 2, 3, 4, 1))\n\n return out\n return _impl\n\ndef _decode_image():\n def _impl(inputs, attr, params, mod):\n # Image decode wrapper: Expecting user to feed decoded input to next layer drop this layer.\n warnings.warn(\"DecodeJpeg: It's a pass through, please handle preprocessing before input\")\n return inputs[0]\n return _impl\n\ndef _unravel_index():\n def _impl(inputs, attr, params, mod):\n return _op.unravel_index(inputs[0], inputs[1])\n return _impl\n\ndef _crop_and_resize():\n def _impl(inputs, attr, params, mod):\n # input image is a 4-D tensor of shape [batch, image_height, image_width, depth]\n # boxes is a 2-D tensor of shape [num_boxes, 4], 4 is for [y1, x1, y2, x2]\n try:\n crop_size = _get_list_param(params, inputs[3])\n except (IndexError, KeyError):\n crop_size = _infer_value(inputs[3], params).asnumpy().tolist()\n\n method = attr['method'].decode()\n method = 'nearest_neighbor' if method == 'nearest' else method\n if method not in ['bilinear', 'nearest_neighbor']:\n raise tvm.error.OpAttributeUnImplemented(\n 'Method {} is not supported'.format(method))\n layout = attr['layout'] if 'layout' in attr else 'NHWC'\n extrapolation_value = attr['extrapolation_value']\n\n return get_relay_op(\"crop_and_resize\")(inputs[0], inputs[1], inputs[2], crop_size,\n layout, method, extrapolation_value)\n return _impl\n\ndef _cast():\n def _impl(inputs, attr, params, mod):\n return inputs[0].astype(attr['DstT'].name)\n return _impl\n\ndef _expand_dims():\n def _impl(inputs, attr, params, mod):\n dim_input = inputs.pop(1)\n axis = _get_num_param(params, dim_input)\n return AttrCvt(op_name=\"expand_dims\", ignores=['Tdim', 'N'],\n extras={'axis': int(axis), 'num_newaxis': 1})(inputs, attr)\n return _impl\n\ndef _resize(method):\n def _impl(inputs, attr, params, mod):\n if attr['_output_shapes'][0] is not None:\n size = attr['_output_shapes'][0][1:3]\n # Important that the size is defined. If an axis is not, we need to infer what\n # the shape should be.\n if -1 in size:\n size = _infer_value(inputs[1], params).asnumpy().reshape([-1]).tolist()\n else:\n size = _infer_value(inputs[1], params).asnumpy().reshape([-1]).tolist()\n\n attr['size'] = size\n inputs.pop(1)\n # NHWC\n attr['layout'] = 'NHWC'\n if attr.pop('align_corners') is True:\n attr['coordinate_transformation_mode'] = 'align_corners'\n else:\n attr['coordinate_transformation_mode'] = 'asymmetric'\n\n # Ignore the new attributes from TF2.0, for now.\n return AttrCvt(op_name='resize',\n ignores=['Tdim', 'half_pixel_centers'],\n extras={'method': method})(inputs, attr)\n return _impl\n\ndef _check_numerics():\n def _impl(inputs, attr, params, mod):\n # Making a copy node assuming no need to verify\n return AttrCvt(op_name=\"copy\", ignores=['message'])(inputs, attr)\n return _impl\n\ndef _assert():\n # ToDo: In general people want asserts to be gone from TensorFlow graphs\n # when they are optimizing them, so converting it to a no-op is\n # reasonable. However, it would be nice to have the option to keep them\n # once Relay gets a Halt or Assert op.\n return _no_op()\n\ndef _no_op():\n def _impl(inputs, attr, params, mod):\n # ToDo: This should really be an op that returns nothing, which could\n # be represented as an empty tuple. It turns out that TVM\n # infrastructure doesn't like running functions that return None and\n # also don't like running functions that return an empty tuple. So it\n # doesn't work, but it should be made to work and then this could be\n # improved. In the mean time, it is hard to imagine a case where it\n # matters in any real way that a no-op is converted to a constant 0.\n return tvm.relay.const(0)\n return _impl\n\ndef _matmul():\n def _impl(inputs, attr, params, mod):\n channels = _infer_channels(inputs[1], not attr['transpose_b'])\n if attr['transpose_a']:\n inputs[0] = _op.transpose(inputs[0], axes=(1, 0))\n if not attr['transpose_b']:\n inputs[1] = _op.transpose(inputs[1], axes=(1, 0))\n return AttrCvt(op_name=\"dense\",\n extras={'units': channels},\n ignores=['transpose_a', 'transpose_b', 'T'])(inputs, attr)\n\n return _impl\n\ndef _batch_matmul():\n def _impl(inputs, attr, params, mod):\n input_x = inputs[0]\n input_y = inputs[1]\n orig_shape_x = _infer_shape(input_x, mod)\n orig_shape_y = _infer_shape(input_y, mod)\n\n # reshape n-dimensional batch matmul into 3d\n if len(orig_shape_x) > 3:\n outer_dims = [orig_shape_x[i] for i in range(0, len(orig_shape_x) - 2)]\n num_outer_elts = np.prod(outer_dims)\n new_shape_x = (num_outer_elts, orig_shape_x[-2], orig_shape_x[-1])\n new_shape_y = (num_outer_elts, orig_shape_y[-2], orig_shape_y[-1])\n input_x = _op.reshape(input_x, newshape=new_shape_x)\n input_y = _op.reshape(input_y, newshape=new_shape_y)\n\n adj_x = attr['adj_x']\n adj_y = attr['adj_y']\n input_x = _op.transpose(input_x, axes=[0, 2, 1]) if adj_x else input_x\n input_y = _op.transpose(input_y, axes=[0, 2, 1]) if not adj_y else input_y\n ret = get_relay_op('batch_matmul')(input_x, input_y)\n\n # reshape result back to n-dimensional\n if len(orig_shape_x) > 3:\n final_shape = list(orig_shape_x)\n final_shape[-2] = orig_shape_x[-1] if adj_x else orig_shape_x[-2]\n final_shape[-1] = orig_shape_y[-2] if adj_y else orig_shape_y[-1]\n ret = _op.reshape(ret, newshape=final_shape)\n\n return ret\n return _impl\n\ndef _identity():\n def _impl(inputs, attr, params, mod):\n return inputs[0]\n return _impl\n\ndef _concatV2():\n def _impl(inputs, attr, params, mod):\n pop_node = inputs.pop(len(inputs)-1)\n axis = int(_get_num_param(params, pop_node))\n return AttrCvt(\n op_name=\"concatenate\", ignores=['T', 'N', 'Tidx'],\n extras={'axis': axis})([inputs], attr)\n return _impl\n\ndef _concat():\n def _impl(inputs, attr, params, mod):\n pop_node = inputs.pop(0)\n axis = int(_get_num_param(params, pop_node))\n return AttrCvt(\n op_name=\"concatenate\", ignores=['N'],\n extras={'axis': axis})([inputs], attr)\n return _impl\n\ndef _pack():\n def _impl(inputs, attr, params, mod):\n axis = int(attr[\"axis\"])\n inputs_reshaped = [_op.expand_dims(i, axis=axis, num_newaxis=1) for i in inputs]\n return _op.concatenate(inputs_reshaped, axis)\n return _impl\n\ndef _tensor_array():\n def _impl(inputs, attr, params, prelude):\n dtype_str = attr.get('dtype').name\n tensor_array_constructor = prelude.get_var('tensor_array', dtype_str)\n return tensor_array_constructor(_op.take(inputs[0], tvm.relay.const(0)))\n return _impl\n\ndef _tensor_array_scatter():\n def _impl(inputs, attr, params, prelude):\n dtype_str = attr.get('T').name\n values_rank = len(inputs[2].type_annotation.shape)\n unstack_name = \"tensor_array_unstack_tensor{}\".format(values_rank)\n unstack_function = prelude.get_var(unstack_name, dtype_str)\n values = unstack_function(inputs[2])\n tensor_array_scatter_func = prelude.get_var('tensor_array_scatter', dtype_str)\n return tensor_array_scatter_func(inputs[0], inputs[1], values)\n return _impl\n\ndef _tensor_array_gather():\n def _impl(inputs, attr, params, prelude):\n return prelude.tensor_array_gather(inputs[2], inputs[1])\n return _impl\n\ndef _tensor_array_size():\n def _impl(inputs, attr, params, prelude):\n return prelude.length(inputs[0])\n return _impl\n\ndef _tensor_array_write():\n def _impl(inputs, attr, params, prelude):\n input_rank = len(inputs[2].type_annotation.shape)\n dtype = attr.get('T').name\n\n tensor_name = 'tensor{}'.format(input_rank)\n tensor_func = prelude.get_var(tensor_name, dtype)\n v = tensor_func(inputs[2])\n write_func = prelude.get_var('tensor_array_write', dtype)\n\n return write_func(inputs[3], _op.take(inputs[1], tvm.relay.const(0)), v)\n return _impl\n\ndef _tensor_array_read():\n def _impl(inputs, attr, params, prelude):\n read_func = prelude.get_var('tensor_array_read', attr.get('dtype').name)\n return read_func(inputs[2], _op.take(inputs[1], tvm.relay.const(0)))\n return _impl\n\ndef _tensor_array_split():\n def _impl(inputs, attr, params, prelude):\n input_rank = len(inputs[1].type_annotation.shape)\n dtype_str = attr.get('T').name\n v = prelude.get_var(\"tensor{}\".format(input_rank), dtype_str)(inputs[1])\n lengths = _op.cast(inputs[2], 'int32')\n split_var = prelude.get_var('tensor_array_split', dtype_str)\n return split_var(inputs[0], v, lengths)\n return _impl\n\ndef _tensor_array_concat():\n def _impl(inputs, attr, params, prelude):\n concat_func = prelude.get_var('tensor_array_concat', attr['dtype'].name)\n return concat_func(inputs[1])\n return _impl\n\ndef _tile():\n def _impl(inputs, attr, params, mod):\n reps = _get_list_param(params, inputs.pop())\n new_input = []\n new_input.append(inputs.pop(0))\n\n return AttrCvt(\n op_name='tile',\n extras={'reps': tuple(reps)},\n ignores=['Tmultiples'])(new_input, attr)\n return _impl\n\ndef _slice():\n def _impl(inputs, attr, params, mod):\n try:\n begin = _get_list_param(params, inputs[1])\n except (IndexError, KeyError, AttributeError):\n begin = _infer_value(inputs[1], params).asnumpy().tolist()[0]\n try:\n size = _get_list_param(params, inputs[2])\n except (IndexError, KeyError, AttributeError):\n # Handle symbolic size\n try:\n size = _infer_value(inputs[2], params).asnumpy().tolist()[0]\n except Exception:\n size = inputs[2]\n data_shape = _infer_shape(inputs[0], mod)\n data_dim = len(data_shape)\n end = size\n if not isinstance(end, (_expr.Call, _expr.Var)):\n for i in range(data_dim):\n if size[i] == -1:\n end[i] = data_shape[i]\n else:\n end[i] += begin[i]\n return _op.strided_slice(inputs[0], begin=begin, end=end)\n return _impl\n\n\ndef _reshape():\n def _impl(inputs, attr, params, mod):\n pop_node = inputs.pop(1)\n\n try:\n shape_arg = _get_tuple_param(params, pop_node)\n except AttributeError:\n # Shape operator is already pruned, hence\n # try to infer shape by precompute prune if possible.\n try:\n params_new = _infer_value(pop_node, params)\n shape_arg = tuple(params_new.asnumpy().astype('int64').flatten())\n except Exception:\n # Deal with symbolic shape case.\n # Currently only shape_of can be the direct ancestor.\n if not isinstance(pop_node, tvm.relay.expr.Call) or \\\n \"shape_of\" not in str(pop_node.op):\n raise RuntimeError(\"If shape operator is used in reshape to \"\n \"express reshape_like, shape_of must be \"\n \"the direct ancestor of reshape when input \"\n \"shape is symbolic.\")\n return _op.reshape_like(inputs[0], pop_node.args[0])\n return AttrCvt(\n op_name=\"reshape\",\n extras={'newshape': shape_arg},\n ignores=['Tshape'])(inputs, attr)\n return _impl\n\n\ndef _depth_to_space():\n def _impl(inputs, attr, params, mod):\n block_size = int(attr['block_size'])\n layout = attr['data_format'].decode(\"utf-8\")\n return _op.nn.depth_to_space(inputs[0], block_size, layout)\n\n return _impl\n\n\ndef _space_to_depth():\n def _impl(inputs, attr, params, mod):\n block_size = int(attr['block_size'])\n layout = attr['data_format'].decode(\"utf-8\")\n return _op.nn.space_to_depth(inputs[0], block_size, layout)\n\n return _impl\n\n\ndef _bias_add():\n def _impl(inputs, attr, params, mod):\n # Must expand for proper broadcasting in NCHW.\n if attr['data_format'].decode(\"utf-8\") == 'NCHW':\n bias = _op.reshape(inputs[1], newshape=(1, -1, 1, 1))\n else:\n bias = inputs[1]\n return _op.add(inputs[0], bias)\n return _impl\n\ndef _broadcast_to():\n def _impl(inputs, attr, params, mod):\n if isinstance(inputs[1], _expr.Var):\n shape = params[inputs[1].name_hint]\n else:\n shape = _infer_value(inputs[1], params)\n shape = list(shape.asnumpy().reshape([-1]))\n return _op.broadcast_to(inputs[0], shape)\n return _impl\n\ndef _squeeze():\n def _impl(inputs, attr, params, mod):\n if len(attr['squeeze_dims']) == 0:\n attr['squeeze_dims'] = None\n return AttrCvt(\n op_name=\"squeeze\",\n transforms={'squeeze_dims':'axis'},\n ignores=['T'])(inputs, attr)\n return _impl\n\ndef _fused_batch_norm():\n def _impl(inputs, attr, params, mod):\n # Tensorflow: (data, gamma, beta, moving_mean, moving_variance)\n # Relay: (data, gamma, beta, moving_mean, moving_varience)\n assert len(inputs) == 5\n axis = 3\n need_cast = False\n\n if 'data_format' in attr:\n attr['data_format'] = attr['data_format'].decode(\"utf-8\")\n if attr['data_format'] == 'NCHW':\n axis = 1\n if 'U' in attr:\n need_cast = True\n inputs[0] = _op.cast(inputs[0], dtype=attr['U'].name)\n # Check if mean and variance are empty\n # If so, replace them with Mean and Variance Ops\n # For run-time calculation\n moving_mean_shape = [int(n) for n in inputs[3].type_annotation.shape]\n moving_variance_shape = [int(n) for n in inputs[4].type_annotation.shape]\n if (moving_mean_shape[0] == 0 and moving_variance_shape[0] == 0):\n inputs[3] = _op.mean(inputs[0], axis=axis, keepdims=False, exclude=True)\n inputs[4] = _op.variance(inputs[0], axis=axis, keepdims=False, exclude=True)\n out = AttrCvt(op_name='batch_norm',\n transforms={'scale_after_normalization':'scale',\n 'variance_epsilon':'epsilon'},\n extras={'axis': axis},\n ignores=['data_format', 'U'],\n disables=['momentum'])(inputs, attr)\n\n if need_cast:\n out = _expr.TupleGetItem(out.astuple(), 0)\n out = _op.cast(out, dtype=attr['T'].name)\n return out\n return _impl\n\ndef _batch_norm():\n def _impl(inputs, attr, params, mod):\n # Rearrange inputs from\n # (data, moving_mean, moving_variance, beta, gamma)\n # to\n # (data, gamma, beta, moving_mean, moving_var)\n new_inputs = [inputs[0], inputs[4], inputs[3], inputs[1], inputs[2]]\n\n axis = 3\n if 'data_format' in attr:\n attr['data_format'] = attr['data_format'].decode(\"utf-8\")\n if attr['data_format'] == 'NCHW':\n axis = 1\n\n return AttrCvt(\n op_name='batch_norm',\n transforms={'scale_after_normalization':'scale', 'variance_epsilon':'epsilon'},\n extras={'axis': axis},\n ignores=['data_format'],\n disables=['momentum'])(new_inputs, attr)\n return _impl\n\ndef _relu6():\n def _impl(inputs, attr, params, mod):\n return _op.clip(inputs[0], a_min=0, a_max=6)\n return _impl\n\ndef _shape():\n def _impl(inputs, attr, params, mod):\n is_symbolic_shape = False\n input_shape = _infer_shape(inputs[0], mod)\n for axis in input_shape:\n if not isinstance(axis, (int, tvm.tir.IntImm)):\n is_symbolic_shape = True\n break\n\n if is_symbolic_shape:\n ret = _op.shape_of(inputs[0], dtype='int32')\n else:\n ret = np.array(input_shape, dtype='int32')\n return ret\n\n return _impl\n\ndef _fill():\n def _impl(inputs, attr, params, mod):\n output_shape = attr['_output_shapes'][0]\n # Output shape must be defined to avoid errors. If any axis is not, we must\n # try to compute its shape.\n if output_shape is None or -1 in output_shape:\n output_shape = _infer_value(inputs[0], params).asnumpy().reshape([-1]).tolist()\n\n fill_arg = _get_num_param(params, inputs.pop(1))\n dtype = attr['T'].name\n return _op.full(tvm.relay.const(fill_arg, dtype),\n output_shape, dtype)\n return _impl\n\ndef _lrn():\n def _impl(inputs, attr, params, mod):\n attr_new = {}\n depth_radius = attr.get('depth_radius', 5)\n size = (depth_radius * 2) + 1\n attr_new['axis'] = 3 # Fix axis, NHWC format\n attr_new['size'] = size\n attr_new['bias'] = attr.get('bias', 1)\n attr_new['alpha'] = attr.get('alpha', 1) * size\n attr_new['beta'] = attr.get('beta', 0.5)\n return AttrCvt(op_name='lrn')(inputs, attr_new)\n return _impl\n\ndef _sum():\n def _impl(inputs, attr, params, mod):\n axis = _get_tuple_param(params, inputs[1])\n return AttrCvt(\n op_name='sum',\n extras={'axis': axis},\n transforms={'keep_dims':'keepdims'},\n ignores=['name', 'Tidx'])([inputs[0]], attr)\n return _impl\n\ndef _reduce(op):\n def _impl(inputs, attr, params, mod):\n axis = _get_list_param(params, inputs[1])\n axis = tuple(axis)\n return AttrCvt(\n op_name=op,\n extras={'axis': axis},\n transforms={'keep_dims':'keepdims'},\n ignores=['name', 'Tidx'])([inputs[0]], attr)\n return _impl\n\ndef _square():\n def _impl(inputs, attr, params, mod):\n return _op.multiply(inputs[0], inputs[0])\n return _impl\n\ndef _gather():\n \"GatherV2, Gather\"\n def _impl(inputs, attr, params, mod):\n if len(inputs) > 2:\n axis = _get_num_param(params, inputs.pop(2))\n else:\n axis = 0\n if int(attr.get('batch_dims', 0)) != 0:\n raise tvm.error.OpAttributeUnImplemented(\n 'Attribute batch_dims is not supported')\n new_input = inputs[0:2]\n return AttrCvt(op_name=\"take\",\n extras={'axis': tvm.tir.const(axis, 'int32')},\n ignores=['Tindices', 'Tparams', 'validate_indices',\n 'Taxis', '_class', 'batch_dims'])(new_input, attr)\n return _impl\n\ndef _gather_nd():\n \"\"\"GatherNd\"\"\"\n def _impl(inputs, attr, params, mod):\n return AttrCvt(op_name=\"gather_nd\",\n ignores=['Tindices', 'Tparams',\\\n 'Taxis', '_class'])(inputs, attr)\n return _impl\n\ndef _stridedSlice():\n def _impl(inputs, attr, params, mod):\n \"\"\"Strided Slice.\n Operator description: https://www.tensorflow.org/api_docs/python/tf/strided_slice\n Tensorflow mask validation: https://github.com/tensorflow/tensorflow/blob/master/\n tensorflow/core/util/strided_slice_op.cc#L147-L368\n \"\"\"\n begin = _get_list_param(params, inputs[1])\n end = _get_list_param(params, inputs[2])\n stride = _get_list_param(params, inputs[3])\n begin_mask = int(attr.get('begin_mask', 0))\n end_mask = int(attr.get('end_mask', 0))\n ellipsis_mask = int(attr.get('ellipsis_mask', 0))\n new_axis_mask = int(attr.get('new_axis_mask', 0))\n shrink_axis_mask = int(attr.get('shrink_axis_mask', 0))\n data_shape = _infer_shape(inputs[0], mod)\n data_dim = len(data_shape)\n stride_dim = len(stride)\n\n def _transform_mask(stride_dim, ellipsis_mask):\n \"\"\"Handle mask inputs to create new begin, end, stride and output shape\"\"\"\n m_begin = [0] * data_dim\n m_end = [0] * data_dim\n m_stride = [0] * data_dim\n fshape_indices = []\n #Count new axis after ellipsis_mask, consider while applying ellipsis_mask.\n ellipsis_seen = False\n new_axes_after_ellipsis = 0\n for i in range(stride_dim):\n mask = 1 << i\n if ellipsis_seen and (mask & new_axis_mask) != 0:\n new_axes_after_ellipsis += 1\n if (mask & ellipsis_mask) != 0:\n ellipsis_seen = True\n if not ellipsis_seen:\n #Used later for extending the stride attributes in the below loop.\n ellipsis_mask |= (1 << stride_dim)\n stride_dim += 1\n final_index = 0\n for index in range(stride_dim):\n mask = 1 << index\n if mask & ellipsis_mask:\n #Identify the end index for applying ellipsis_mask\n to_index = min(((data_dim - (stride_dim-index)) + 1\n + new_axes_after_ellipsis), data_dim)\n for i in range(final_index, to_index):\n m_begin[final_index] = 0\n m_end[final_index] = data_shape[final_index]\n m_stride[final_index] = 1\n fshape_indices.append(final_index)\n final_index += 1\n elif mask &new_axis_mask:\n fshape_indices.append(-1)\n elif not mask & new_axis_mask:\n if final_index == len(m_begin):\n break\n if mask & begin_mask:\n m_begin[final_index] = data_shape[final_index] \\\n if stride[index] < 0 else 0\n elif begin[index]:\n m_begin[final_index] = begin[index]\n if mask & end_mask:\n m_end[final_index] = 0 if stride[index] < 0 \\\n else data_shape[final_index]\n elif end[index]:\n m_end[final_index] = end[index]\n m_stride[final_index] = stride[index]\n if mask & shrink_axis_mask:\n #Tensorflow make axis with shrink_axis_mask as dimension 1\n m_begin[final_index] = data_shape[final_index] + begin[index] \\\n if begin[index] < 0 else begin[index]\n m_end[final_index] = begin[index] + 1\n m_stride[final_index] = 1\n fshape_indices.append(-2)\n else:\n fshape_indices.append(final_index)\n\n final_index += 1\n return m_begin, m_end, m_stride, fshape_indices\n\n fshape_indices = None\n if begin_mask or end_mask or ellipsis_mask or new_axis_mask or shrink_axis_mask:\n begin, end, stride, fshape_indices = _transform_mask(stride_dim, ellipsis_mask)\n out = _op.strided_slice(inputs[0], begin=begin, end=end, strides=stride)\n out_shape = _infer_shape(out, mod)\n if not fshape_indices:\n fshape_indices = range(len(out_shape))\n\n #Create final output shape.\n final_output = []\n for gather_index in fshape_indices:\n if gather_index == -1:\n final_output.append(1)\n elif gather_index == -2:\n pass\n else:\n final_output.append(out_shape[gather_index])\n\n if not final_output:\n if not shrink_axis_mask:\n ret = out\n else:\n final_shape = []\n for dim in out_shape:\n if dim != 1:\n final_shape.append(dim)\n if len(final_shape) == 0:\n ret = _op.squeeze(out)\n else:\n # We need reshape to handle dynamic shape.\n ret = _op.reshape(out, newshape=tuple(final_shape))\n else:\n ret = _op.reshape(out, newshape=tuple(final_output))\n return ret\n return _impl\n\ndef _pad(name):\n def _impl(inputs, attr, params, mod):\n padlist = _get_param(params, inputs[1])\n paddings = tuple(tuple(l) for l in padlist)\n attr['pad_width'] = paddings\n attr['pad_value'] = 0\n new_inputs = [inputs[0]]\n if name == 'PadV2':\n constant_values = _get_num_param(params, inputs[2])\n attr['pad_value'] = constant_values\n return AttrCvt(\n op_name='pad',\n ignores=['Tpaddings'],)(new_inputs, attr)\n return _impl\n\ndef _mirror_pad():\n def _impl(inputs, attr, params, mod):\n padlist = _get_param(params, inputs[1])\n paddings = tuple(tuple(l) for l in padlist)\n attr['pad_width'] = paddings\n mode = attr['mode'].decode('utf-8')\n attr['mode'] = mode\n new_inputs = [inputs[0]]\n return AttrCvt(\n op_name='mirror_pad',\n ignores=['Tpaddings'],)(new_inputs, attr)\n return _impl\n\ndef _transpose():\n def _impl(inputs, attr, params, mod):\n # If perm is not specified, axes is left empty,\n # otherwise its value is get from params\n try:\n axes = _get_list_param(params, inputs[1])\n except (IndexError, KeyError, AttributeError):\n axes = _infer_value_simulated(inputs[1], params).asnumpy()\n return _op.transpose(inputs[0], axes=axes)\n return _impl\n\ndef _where():\n def _impl(inputs, attr, params, mod):\n if len(inputs) == 1:\n return AttrCvt(op_name=\"argwhere\")(inputs, attr)\n return AttrCvt(op_name=\"where\")(inputs, attr)\n return _impl\n\ndef _clip_by_value():\n def _impl(inputs, attr, params, mod):\n a_min = _get_num_param(params, inputs[1])\n a_max = _get_num_param(params, inputs[2])\n return _op.clip(inputs[0], a_min=a_min, a_max=a_max)\n return _impl\n\ndef _reverse_v2():\n def _impl(inputs, attr, params, mod):\n axis = _get_num_param(params, inputs[1])\n return AttrCvt(\n op_name=\"reverse\",\n ignores=['Tidx'],\n extras={'axis': int(axis)})([inputs[0]], attr)\n return _impl\n\ndef _rank():\n def _impl(inputs, attr, params, mod):\n input_shape = _infer_shape(inputs[0], mod)\n\n name = attr[\"_node_name\"]\n params[name] = tvm.nd.array([len(input_shape)])\n return [_expr.var(name,\n shape=params[name].shape,\n dtype='int32')]\n\n return _impl\n\ndef _range():\n def _impl(inputs, attr, params, mod):\n try:\n start = _get_param(params, inputs[0])[0]\n except (IndexError, KeyError, AttributeError):\n try:\n start = _infer_value(inputs[1], params).asnumpy().tolist()\n start = start if not isinstance(start, list) else start[0]\n except Exception:\n # Symbolic start\n start = inputs[0]\n\n if hasattr(inputs[1], \"name_hint\") or isinstance(inputs[1], _expr.Constant):\n limit = _get_param(params, inputs[1])[0]\n else:\n if any(['Rank' in param for param in params]):\n limit = params.pop('Rank').asnumpy()[0]\n else:\n try:\n limit = _infer_value(inputs[1], params, mod).asnumpy().tolist()\n limit = limit if not isinstance(limit, list) else limit[0]\n except Exception:\n # Symbolic limit\n limit = inputs[1]\n\n try:\n delta = _get_param(params, inputs[2])[0]\n except (IndexError, KeyError, AttributeError):\n try:\n delta = _infer_value(inputs[2], params, mod).asnumpy().tolist()\n delta = delta if not isinstance(delta, list) else delta[0]\n except Exception:\n # Symbolic delta\n delta = inputs[2]\n\n\n dtype = attr['Tidx'].name if 'Tidx' in attr else str(start.dtype)\n if isinstance(start, (np.int32, np.int64, int, np.float32, np.float64, float)):\n start = _expr.const(start)\n if isinstance(limit, (np.int32, np.int64, int, np.float32, np.float64, float)):\n limit = _expr.const(limit)\n if isinstance(delta, (np.int32, np.int64, int, np.float32, np.float64, float)):\n delta = _expr.const(delta)\n\n return AttrCvt(\n op_name=\"arange\",\n ignores=['Tidx'],\n extras={'start': start,\n 'stop': limit,\n 'step': delta,\n 'dtype': dtype})([], attr)\n return _impl\n\ndef _elu():\n def _impl(inputs, attr, params, mod):\n dtype = attr['T'].name\n alpha = tvm.relay.const(-1.0, dtype)\n return alpha * _op.nn.relu(tvm.relay.const(1, dtype) \\\n - _op.exp(inputs[0])) + _op.nn.relu(inputs[0])\n return _impl\n\ndef _selu():\n def _impl(inputs, attr, params, mod):\n dtype = attr['T'].name\n alpha = tvm.relay.const(-1.6732632423543772848170429916717, dtype)\n gamma = tvm.relay.const(1.0507009873554804934193349852946, dtype)\n return gamma * (alpha * _op.nn.relu(tvm.relay.const(1, dtype)\n - _op.exp(inputs[0])) + _op.nn.relu(inputs[0]))\n return _impl\n\ndef _mean():\n def _impl(inputs, attr, params, mod):\n axis = _get_tuple_param(params, inputs[1])\n return AttrCvt(op_name=\"mean\", ignores=['Tdim', 'Tidx'],\n transforms={'keep_dims': 'keepdims'},\n extras={'axis': axis})([inputs[0]], attr)\n return _impl\n\ndef _broadcast(name):\n def _impl(inputs, attr, params, mod):\n return AttrCvt(\n op_name=name,\n ignores=['name', 'incompatible_shape_error', 'Tidx']\n )(inputs, attr)\n return _impl\n\ndef _split(has_size_vector):\n # TF documentation https://www.tensorflow.org/api_docs/python/tf/split\n def _impl(inputs, attr, params, mod):\n try:\n # order and number of inputs are different:\n # if has_size_vector:\n # https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/split-v\n # else:\n # https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/split\n\n # in addition, `axis` and `num_or_size_splits` can be tensors in TensorFlow,\n # we can only support constants\n if has_size_vector:\n input_node_index = 0\n input_axis_index = 2\n size_splits = _get_param(params, inputs[1])\n section_beginnings = np.cumsum(size_splits)[:-1]\n indices_or_sections = tuple(section_beginnings)\n else:\n input_node_index = 1\n input_axis_index = 0\n indices_or_sections = attr['num_split']\n input_node = inputs[input_node_index]\n axis_input_value = _get_num_param(params, inputs[input_axis_index])\n except (IndexError, KeyError):\n raise TypeError(\n \"Unsupported argument for split: `axis` and `num_or_size_splits` \"\n \"should be constants\")\n return _op.split(input_node,\n indices_or_sections=indices_or_sections,\n axis=int(axis_input_value))\n return _impl\n\ndef _unpack():\n def _impl(inputs, attr, params, mod):\n input_node = inputs[0]\n axis = attr['axis']\n input_shape = _infer_shape(input_node, mod)\n axis_length = input_shape[axis]\n if axis_length < 0:\n raise TypeError(\"Unstack with unknown axis length\")\n splitted = _op.split(input_node,\n indices_or_sections=axis_length,\n axis=axis)\n axis = [axis]\n return _expr.TupleWrapper(\n _expr.Tuple([_op.squeeze(split_item, axis=axis) \\\n for split_item in splitted]), len(splitted))\n return _impl\n\ndef _softmax():\n def _impl(inputs, attr, params, mod):\n return AttrCvt(op_name='softmax',\n transforms={'axis': ('axis', 1)})([inputs[0]], attr)\n return _impl\n\ndef _softplus():\n # op description: https://www.tensorflow.org/api_docs/python/tf/math/softplus\n def _impl(inputs, attr, params, mod):\n exp_out = AttrCvt('exp')(inputs, attr)\n inputs.append(tvm.relay.const(1, attr['T'].name))\n rh = tvm.relay.const(1, attr['T'].name)\n add_out = get_relay_op('add')(exp_out, rh)\n return get_relay_op('log')(add_out)\n return _impl\n\ndef _topk():\n def _impl(inputs, attr, params, mod):\n k_input = inputs.pop(1)\n try:\n k = int(_get_num_param(params, k_input))\n except (IndexError, KeyError, AttributeError):\n k = int(_infer_value(k_input, params).asnumpy().tolist())\n if k < 1:\n raise tvm.error.OpAttributeInvalid(\n 'Attribute k must be positive in operator TopKV2')\n if attr['sorted'] is False:\n raise tvm.error.OpAttributeUnImplemented(\n 'Attribute sorted=False is not supported in operator TopKV2')\n return AttrCvt(op_name='topk',\n ignores=['sorted'],\n extras={'k': k, 'is_ascend': False, 'dtype': 'int32'})(inputs, attr)\n return _impl\n\ndef _floordiv():\n def _impl(inputs, attr, params, mod):\n assert len(inputs) == 2\n return AttrCvt('floor_divide')(inputs, attr)\n return _impl\n\ndef _floormod():\n def _impl(inputs, attr, params, mod):\n assert len(inputs) == 2\n return AttrCvt('floor_mod')(inputs, attr)\n return _impl\n\ndef _logical(name):\n def _impl(inputs, attr, params, mod):\n return AttrCvt(op_name=name)(inputs, attr)\n return _impl\n\ndef _space_to_batch_nd():\n def _impl(inputs, attr, params, mod):\n input_node = inputs[0]\n input_shape = _infer_shape(input_node, mod)\n try:\n block_shape = _get_list_param(params, inputs[1])\n except (IndexError, KeyError, AttributeError):\n block_shape = _infer_value(inputs[1], params).asnumpy().tolist()\n\n try:\n paddings = _get_list_param(params, inputs[2])\n except (IndexError, KeyError, AttributeError):\n paddings = _infer_value(inputs[2], params).asnumpy()\n paddings = np.squeeze(paddings)\n if len(paddings.shape) == 1:\n paddings = np.expand_dims(paddings, exis=0)\n paddings = paddings.tolist()\n N = len(input_shape)\n M = len(block_shape)\n batch = input_shape[0]\n remaining_shape_length = N - M - 1\n paddings = [(0, 0)] + paddings + [(0, 0)] * remaining_shape_length\n # From https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/space-to-batch-n-d:\n # Zero-pad the start and end of dimensions [1, ..., M] of the input according to paddings\n # to produce padded of shape padded_shape.\n padded = tvm.relay.nn.pad(input_node, pad_width=paddings)\n # Reshape padded to reshaped_padded of shape:\n # [batch] + [padded_shape[1] / block_shape[0], block_shape[0], ...,\n # padded_shape[M] / block_shape[M-1], block_shape[M-1]] + remaining_shape\n shape1 = [batch] + [item for i in range(M) for item in [-4, -1, block_shape[i]]] + [-2]\n reshaped_padded = tvm.relay.reshape(padded, newshape=shape1)\n # Permute dimensions of reshaped_padded to produce permuted_reshaped_padded of shape:\n # block_shape + [batch] + [padded_shape[1] / block_shape[0], ...,\n # padded_shape[M] / block_shape[M-1]] + remaining_shape\n axes = [2 * i + 2 for i in range(M)] + [0] + [2 * i + 1 for i in range(M)] + \\\n list(range(1 + 2 * M, 1 + 2 * M + remaining_shape_length))\n permuted_reshaped_padded = tvm.relay.transpose(reshaped_padded, axes=axes)\n permuted_reshaped_padded_shape = _infer_shape(permuted_reshaped_padded)\n # Reshape permuted_reshaped_padded to flatten block_shape into the batch dimension,\n # producing an output tensor of shape:\n # [batch * prod(block_shape)] + [padded_shape[1] / block_shape[0], ...,\n # padded_shape[M] / block_shape[M-1]] + remaining_shape\n shape2 = [batch * np.prod(block_shape)] + list(permuted_reshaped_padded_shape)[M + 1:]\n reshaped_permuted_reshaped_padded = tvm.relay.reshape(permuted_reshaped_padded,\n newshape=shape2)\n return reshaped_permuted_reshaped_padded\n\n return _impl\n\n\ndef _batch_to_space_nd():\n def _impl(inputs, attr, params, mod):\n input_node = inputs[0]\n input_shape = _infer_shape(input_node, mod)\n try:\n block_shape = _get_list_param(params, inputs[1])\n except (IndexError, KeyError, AttributeError):\n block_shape = _infer_value(inputs[1], params).asnumpy().tolist()\n\n try:\n crops = _get_list_param(params, inputs[2])\n except (IndexError, KeyError, AttributeError):\n crops = _infer_value(inputs[2], params).asnumpy()\n crops = np.squeeze(crops)\n if len(crops.shape) == 1:\n crops = np.expand_dims(crops, axis=0)\n crops = crops.tolist()\n M = len(block_shape)\n batch = input_shape[0]\n # From https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d:\n # Reshape input to reshaped of shape:\n # [block_shape[0], ..., block_shape[M-1], batch / prod(block_shape),\n # input_shape[1], ..., input_shape[N-1]]\n shape1 = block_shape + [batch // np.prod(block_shape)] + list(input_shape[1:])\n reshaped = tvm.relay.reshape(input_node, newshape=shape1)\n # Permute dimensions of reshaped to produce permuted of shape\n # [batch / prod(block_shape), input_shape[1], block_shape[0], ...,\n # input_shape[M], block_shape[M-1], input_shape[M+1], ..., input_shape[N-1]]\n axes = [M] + [axis for i in range(M) for axis in [M + i + 1, i]] + \\\n list(range(2 * M + 1, len(shape1)))\n permuted = tvm.relay.transpose(reshaped, axes=axes)\n # Reshape permuted to produce reshaped_permuted of shape\n # [batch / prod(block_shape), input_shape[1] * block_shape[0], ...,\n # input_shape[M] * block_shape[M-1], input_shape[M+1], ..., input_shape[N-1]]\n shape2 = [0] + [-3] * M + [-2]\n reshaped_permuted = tvm.relay.reshape(permuted, newshape=shape2)\n # Crop the start and end of dimensions [1, ..., M] of reshaped_permuted according to crops\n # to produce the output of shape:\n # [batch / prod(block_shape), input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],\n # ..., input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],\n # input_shape[M+1], ..., input_shape[N-1]]\n reshaped_permuted_shape = _infer_shape(reshaped_permuted)\n cropped = reshaped_permuted\n for axis in range(1, M+1):\n crop = crops[axis - 1]\n if crop != [0, 0]:\n indices = tvm.relay.arange(\n _expr.const(crop[0]),\n _expr.const(reshaped_permuted_shape[axis] - crop[1]),\n dtype='int32'\n )\n cropped = tvm.relay.take(cropped, indices=indices, axis=axis)\n\n return cropped\n\n return _impl\n\ndef _atan2():\n def _impl(inputs, attr, params, mod):\n divide = _elemwise(\"divide\")(inputs, attr, params, mod)\n return get_relay_op(\"atan\")(divide)\n return _impl\n\ndef _prod():\n def _impl(inputs, attr, params, mod):\n axis = _get_num_param(params, inputs[1])\n keepdims = attr['keep_dims']\n return _op.prod(inputs[0], int(axis), keepdims=keepdims)\n return _impl\n\ndef _log1p():\n # op description: https://www.tensorflow.org/api_docs/python/tf/math/log1p\n def _impl(inputs, attr, params, mod):\n one = tvm.relay.const(1, attr['T'].name)\n add_out = get_relay_op('add')(inputs[0], one)\n return get_relay_op('log')(add_out)\n return _impl\n\ndef _one_hot():\n def _impl(inputs, attr, params, mod):\n depth = int(_get_num_param(params, inputs[1]))\n dtype = attr['T'].name\n\n on_value = _get_num_param(params, inputs[2])\n off_value = _get_num_param(params, inputs[3])\n new_inputs = [inputs[0],\n tvm.relay.const(on_value, dtype),\n tvm.relay.const(off_value, dtype)]\n return AttrCvt('one_hot',\n ignores=['TI'],\n extras={'depth' : depth, 'dtype' : dtype})(new_inputs, attr)\n return _impl\n\ndef _squared_difference():\n def _impl(inputs, attr, params, mod):\n difference = _op.subtract(inputs[0], inputs[1])\n return _op.multiply(difference, difference)\n return _impl\n\ndef _size():\n def _impl(inputs, attr, params, mod):\n new_attr = attr\n new_attr['out_type'] = attr['out_type'].name\n return AttrCvt('ndarray_size', transforms={'out_type' : 'dtype'})(inputs, new_attr)\n return _impl\n\ndef _add_n():\n def _impl(inputs, attr, params, mod):\n if not isinstance(inputs, tuple):\n inputs = list(inputs)\n assert len(inputs) > 0, \"add_n take >=1 inputs, but 0 given.\"\n _res = inputs[0]\n for each in inputs[1:]:\n _res = _op.add(_res, each)\n return _res\n return _impl\n\n\n# compatible operators that do NOT require any conversion.\n_identity_list = []\n\n# Operators that get pruned away when the complete graph is frozen.\n# These operators are not needed for inference.\n_freezed_graph_pruned_op_list = ['ReadVariableOp', 'ResourceGather', 'Variable',\n 'VariableV2', 'VarHandleOp', 'Assign', 'AssignVariableOp']\n\n\n# _convert_map defines maps of name to converter functor(callable)\n# for 1 to 1 mapping, use Renamer if nothing but name is different\n# use AttrCvt if attributes need to be converted\n# for 1 to N mapping(composed), use custom callable functions\n# for N to 1 mapping, currently not supported(?)\n_convert_map = {\n 'Abs' : AttrCvt('abs'),\n 'Add' : _elemwise('add'),\n 'AddV2' : _elemwise('add'),\n 'AddN' : _add_n(),\n 'All' : _reduce('all'),\n 'Any' : _reduce('any'),\n 'ArgMax' : _argx(_op.argmax, 'argmax'),\n 'ArgMin' : _argx(_op.argmin, 'argmin'),\n 'Assert' : _assert(),\n 'Atan' : AttrCvt('atan'),\n 'Atan2' : _atan2(),\n 'AvgPool' : _pooling('avg_pool'),\n 'AvgPool3D' : _pool3d('avg_pool3d'),\n 'BatchMatMul' : _batch_matmul(),\n 'BatchMatMulV2' : _batch_matmul(),\n 'BatchNormWithGlobalNormalization' : _batch_norm(),\n 'BatchToSpaceND' : _batch_to_space_nd(),\n 'BiasAdd' : _bias_add(),\n 'BroadcastTo' : _broadcast_to(),\n 'Cast' : _cast(),\n 'Ceil' : AttrCvt('ceil'),\n 'CheckNumerics' : _check_numerics(),\n 'ClipByValue' : _clip_by_value(),\n 'Concat' : _concat(),\n 'ConcatV2' : _concatV2(),\n 'Conv2D' : _conv('conv'),\n 'Conv3D' : _conv3d('conv'),\n 'Conv2DBackpropInput' : _conv('conv_transpose'),\n 'CropAndResize' : _crop_and_resize(),\n 'DecodeJpeg' : _decode_image(),\n 'DepthwiseConv2dNative' : _conv('depthwise'),\n 'DepthToSpace' : _depth_to_space(),\n 'Dilation2D' : _dilation2d(),\n 'Equal' : _broadcast('equal'),\n 'Elu' : _elu(),\n 'Erf' : AttrCvt('erf'),\n 'Exp' : AttrCvt('exp'),\n 'ExpandDims' : _expand_dims(),\n 'Fill' : _fill(),\n 'Floor' : AttrCvt('floor'),\n 'FloorDiv' : _floordiv(),\n 'FloorMod' : _floormod(),\n 'FusedBatchNorm' : _fused_batch_norm(),\n 'FusedBatchNormV2' : _fused_batch_norm(),\n 'FusedBatchNormV3' : _fused_batch_norm(),\n 'Gather' : _gather(),\n 'GatherNd' : _gather_nd(),\n 'GatherV2' : _gather(),\n 'Greater' : _broadcast('greater'),\n 'GreaterEqual' : _broadcast('greater_equal'),\n 'Identity' : _identity(),\n 'IsFinite' : AttrCvt('isfinite'),\n 'IsInf' : AttrCvt('isinf'),\n 'LeakyRelu' : AttrCvt('leaky_relu'),\n 'LeftShift' : AttrCvt('left_shift'),\n 'Less' : _broadcast('less'),\n 'LessEqual' : _broadcast('less_equal'),\n 'Log' : AttrCvt('log'),\n 'Log1p' : _log1p(),\n 'Tan' : AttrCvt('tan'),\n 'Cos' : AttrCvt('cos'),\n 'Sin' : AttrCvt('sin'),\n 'LogicalAnd' : _logical('logical_and'),\n 'LogicalOr' : _logical('logical_or'),\n 'LogicalNot' : _logical('logical_not'),\n 'LogSoftmax' : AttrCvt('log_softmax'),\n 'LRN' : _lrn(),\n 'MatMul' : _matmul(),\n 'Max' : _reduce('max'),\n 'MaxPool' : _pooling('max_pool'),\n 'MaxPool3D' : _pool3d('max_pool3d'),\n 'Maximum' : _elemwise('maximum'),\n 'Mean' : _mean(),\n 'Min' : _reduce('min'),\n 'Minimum' : _elemwise('minimum'),\n 'MirrorPad' : _mirror_pad(),\n 'Mod' : _elemwise('mod'),\n 'Mul' : _elemwise('multiply'),\n 'Neg' : AttrCvt('negative'),\n 'NoOp' : _no_op(),\n 'NotEqual' : _broadcast('not_equal'),\n 'OneHot' : _one_hot(),\n 'Pack' : _pack(),\n 'TensorArrayV3' : _tensor_array(),\n 'TensorArrayScatterV3' : _tensor_array_scatter(),\n 'TensorArrayGatherV3' : _tensor_array_gather(),\n 'TensorArraySizeV3' : _tensor_array_size(),\n 'TensorArrayWriteV3' : _tensor_array_write(),\n 'TensorArrayReadV3' : _tensor_array_read(),\n 'TensorArraySplitV3' : _tensor_array_split(),\n 'TensorArrayConcatV3' : _tensor_array_concat(),\n 'Pad' : _pad('Pad'),\n 'PadV2' : _pad('PadV2'),\n 'Pow' : _elemwise('power'),\n 'Prod' : _prod(),\n 'Range' : _range(),\n 'Rank' : _rank(),\n 'RealDiv' : _elemwise('divide'),\n 'Relu' : AttrCvt('relu'),\n 'Relu6' : _relu6(),\n 'Reshape' : _reshape(),\n 'ResizeBilinear' : _resize('bilinear'),\n 'ResizeBicubic' : _resize('bilinear'),\n 'ResizeNearestNeighbor' : _resize('nearest_neighbor'),\n 'ReverseV2' : _reverse_v2(),\n 'RightShift' : AttrCvt('right_shift'),\n 'Round' : AttrCvt('round'),\n 'Rsqrt' : _rsqrt(),\n 'Select' : _where(),\n 'Selu' : _selu(),\n 'Shape' : _shape(),\n 'Sigmoid' : AttrCvt('sigmoid'),\n 'Sign' : AttrCvt('sign'),\n 'Size' : _size(),\n 'Slice' : _slice(),\n 'Softmax' : _softmax(),\n 'Softplus' : _softplus(),\n 'SpaceToBatchND' : _space_to_batch_nd(),\n 'SpaceToDepth' : _space_to_depth(),\n 'Split' : _split(False),\n 'SplitV' : _split(True),\n 'Sqrt' : AttrCvt('sqrt'),\n 'Square' : _square(),\n 'SquaredDifference' : _squared_difference(),\n 'Squeeze' : _squeeze(),\n 'StopGradient' : _identity(),\n 'StridedSlice' : _stridedSlice(),\n 'Sub' : _elemwise('subtract'),\n 'Sum' : _sum(),\n 'Tanh' : AttrCvt('tanh'),\n 'Tile' : _tile(),\n 'TopKV2' : _topk(),\n 'Transpose' : _transpose(),\n 'TruncateMod' : _elemwise('mod'),\n 'Unpack' : _unpack(),\n 'UnravelIndex' : _unravel_index(),\n 'Where' : _where(),\n 'ZerosLike' : AttrCvt('zeros_like'),\n\n}\n\ndef _LSTMBlockCell():\n def _impl(inputs, in_state_c, in_state_h, attr, params, mod):\n \"\"\"LSTM Block cell.\n Calculations are described in: https://github.com/tensorflow/tensorflow/blob/\n r1.8/tensorflow/contrib/rnn/python/ops/lstm_ops.py#L41-L114\n\n Parameters\n ----------\n inputs : relay.Expr\n Input data\n in_state_c: list of relay.Expr\n Cell state input values for all the layers\n in_state_h: list of relay.Expr\n Hidden state input values for all the layers\n attrs : dict\n Dict of operator attributes\n params : dict\n List of pretrained weights and bias\n\n Returns\n -------\n sym : relay.Expr\n Converted relay.Expr\n output: relay.Expr\n Output state value.\n \"\"\"\n in_data = inputs[0]\n in_weight = inputs[3]\n in_bias = inputs[7]\n forget_bias = attr.pop('forget_bias')\n input_shape = _infer_shape(inputs[0], mod)\n weight_shape = _infer_shape(inputs[3], mod)\n batch_size, input_size = input_shape[0], input_shape[1]\n num_hidden_layers = weight_shape[1]\n num_hidden = num_hidden_layers // 4\n\n in_data = _op.reshape(in_data,\n newshape=(batch_size, input_size))\n ixh = _op.concatenate([in_data, in_state_h], axis=1)\n in_weight = _op.transpose(in_weight, axes=None)\n gates = _op.nn.dense(ixh, in_weight,\n units=num_hidden_layers)\n gates_bias = _op.add(gates, in_bias)\n gate_list = _op.split(gates_bias, indices_or_sections=4, axis=1)\n in_gate = _op.sigmoid(gate_list[0])\n in_transform = _op.tanh(gate_list[1])\n forget_gate = _op.add(gate_list[2], tvm.relay.const(forget_bias, attr['T'].name))\n forget_gate = _op.sigmoid(forget_gate)\n out_gate = _op.sigmoid(gate_list[3])\n next_c = _op.add(_op.multiply(forget_gate, in_state_c),\n _op.multiply(in_gate, in_transform))\n next_h = out_gate * _op.tanh(next_c)\n out_state = _op.concatenate([next_c, next_h], axis=1)\n out_state = _op.reshape(out_state,\n newshape=(2, batch_size, num_hidden))\n return next_h, out_state\n return _impl\n\n# _convert_map_rnn defines maps of rnn operator name to\n# converter functor(callable) for 1 to 1 mapping.\n_convert_map_rnn = {\n 'LSTMBlockCell' : _LSTMBlockCell(),\n}\n\nclass RecurrentNetworks(object):\n \"\"\"Recurrent network layer handlers.\n\n Handle Layer operations.\n ToDo: Operators like RNN/GRU layer concepts also can be handled here\n\n Parameters\n ----------\n nodes : list\n list of graph nodes used for tensorflow parsing.\n\n out_rnn : list\n List of RecurrentNetwork outputs. This output will be appended to the\n 'head' nodes of the graph.\n\n graph : tensorflow graph definition object\n The loaded tensorflow GraphDef\n\n convert_map : dict\n Dict of name : callable, where name is the op's name that\n require conversion to relay, callable are functions which\n take attrs and return (new_op_name, new_attrs)\n \"\"\"\n def __init__(self, nodes, out_rnn, graph, convert_map):\n self._graph = graph\n self._convert_map = convert_map\n self._nodes = nodes\n self._out_rnn = out_rnn\n self._cur_lstm_layer = 0\n self._layer_name_list = []\n self._recurrent_ops_layer_map = {\n 'LSTMBlockCell' : self._LSTMBlockCellLayer(),\n }\n\n def _LSTMBlockCellLayer(self):\n \"\"\"LSTMBlockCell layer handler.\n\n Parameters\n ----------\n op_name : str\n Operator name, eg:LSTMBlockCell\n\n layer_name : str list\n Layer name is used for creating the state input placeholder.\n\n inputs : relay.Expr\n Input data\n\n attrs : dict\n Dict of operator attributes\n\n params : dict\n List of pretrained weights and bias\n\n num_layers : int\n Total number of LSTM layer presented in the graph\n\n Returns\n -------\n sym : relay.Expr\n The returned relay Expr\n \"\"\"\n def _impl(op_name, layer_name, inputs, attrs, params, num_layers, mod):\n in_state_c_name = layer_name+'_c'\n in_state_h_name = layer_name+'_h'\n\n def _init_state(num_layers, batch_size, num_hidden):\n \"\"\"Create the initial states for the first layer in the graph.\"\"\"\n in_state_c = [_expr.var(in_state_c_name,\n shape=(num_layers, batch_size, num_hidden),\n dtype='float32')]\n\n in_state_h = [_expr.var(in_state_h_name,\n shape=(num_layers, batch_size, num_hidden),\n dtype='float32')]\n return in_state_c, in_state_h\n\n def _get_cur_input_state(in_state_c, in_state_h, num_layers,\n layer, batch_size, num_hidden):\n \"\"\"Select the appropriate states for the current layer\"\"\"\n in_state_c_tup = _op.split(in_state_c[0],\n indices_or_sections=num_layers, axis=0)\n in_state_h_tup = _op.split(in_state_h[0],\n indices_or_sections=num_layers, axis=0)\n cur_in_state_c = _op.reshape(in_state_c_tup[layer],\n newshape=(batch_size, num_hidden))\n cur_in_state_h = _op.reshape(in_state_h_tup[layer],\n newshape=(batch_size, num_hidden))\n return cur_in_state_c, cur_in_state_h\n\n def _LSTMBlockCellWrapper(inputs, attr, params,\n num_layers, layer):\n \"\"\"LSTM cell warapper to prepare the inputs\"\"\"\n input_shape = _infer_shape(inputs[0], mod)\n weight_shape = _infer_shape(inputs[3], mod)\n\n batch_size = input_shape[0]\n num_hidden = weight_shape[1] // 4\n\n if layer == 0:\n #Create initial states placeholder in case of first layer\n in_state_c, in_state_h = _init_state(num_layers,\n batch_size, num_hidden)\n else:\n in_state_c = self._nodes[in_state_c_name]\n in_state_h = self._nodes[in_state_h_name]\n\n cur_in_state_c, cur_in_state_h = _get_cur_input_state(\n in_state_c, in_state_h,\n num_layers, layer,\n batch_size, num_hidden)\n output, out_state = self._convert_map[op_name](inputs, cur_in_state_c,\n cur_in_state_h,\n attr, params, mod)\n return output, out_state, in_state_c, in_state_h\n\n sym, cur_out_state, in_state_c, in_state_h = \\\n _LSTMBlockCellWrapper(inputs, attrs, params,\n num_layers, self._cur_lstm_layer)\n self._nodes[in_state_c_name] = in_state_c\n self._nodes[in_state_h_name] = in_state_h\n cur_out_state = _op.expand_dims(cur_out_state, axis=0, num_newaxis=1)\n self._out_rnn.append(cur_out_state)\n self._cur_lstm_layer += 1\n return sym\n return _impl\n\n def process_op(self, op_name, inputs, attrs, params, mod):\n \"\"\"Process recurrent layer operators.\n\n List '_recurrent_ops_layer_map' map each Layer based operators with its\n layer handlers. Total number of layers are calculated to form the input\n data shapes.\n\n Parameters\n ----------\n op_name : str\n Operator name, such as LSTMBlockCell\n\n inputs : relay.Expr\n Input data\n\n attrs : dict\n Dict of operator attributes\n\n params : dict\n List of pretrained weights and bias\n\n Returns\n -------\n sym : relay.Expr\n Returns relay.Expr\n \"\"\"\n def _get_abs_layer_name(node):\n \"\"\"Identify the layer name is already handled. Return the absolute name\n \"\"\"\n if not self._layer_name_list:\n self._layer_name_list.append(node.name)\n return node.name\n\n for _name in self._layer_name_list:\n if _name in node.name:\n abs_name = _name\n else:\n self._layer_name_list.append(node.name)\n abs_name = node.name\n return abs_name\n\n #Find number of layers of this same operator node in the graph\n #and also read the inputs name for the current op.\n num_layers = 0\n for _, node in enumerate(self._graph.node):\n if node.op == op_name:\n layer_name = _get_abs_layer_name(node)\n num_layers += 1\n\n sym = self._recurrent_ops_layer_map[op_name](op_name, layer_name, inputs, attrs,\n params, num_layers, mod)\n return sym\n\n# An internal list to contain all the control flow primitives used in Tensorflow\n# 1.x.\n_control_flow_nodes = ['Merge', 'Switch', 'NextIteration', 'Exit', 'Enter', 'LoopCond']\n\nclass RewriteSubgraph(ExprMutator):\n \"\"\"\n A helper class to rewrite expr in while loop function to variable\n\n Parameters\n ----------\n rewrite_map : Dict[expr, expr]\n A dictionay contains a set of expr to var mapping.\n \"\"\"\n def __init__(self, rewrite_map):\n ExprMutator.__init__(self)\n self.rewrite_map = rewrite_map\n\n def visit(self, expr):\n if expr in self.rewrite_map:\n return self.rewrite_map[expr]\n return super().visit(expr)\n\ndef rewrite_subgraph(expr, rewrites):\n return RewriteSubgraph(rewrites).visit(expr)\n\ndef _in_while_loop(control_flow_node_map, op_name):\n \"\"\"\n Check if a given control flow operator is part of a while loop execution\n frame. This is based on the fact that there is only one occurrence of\n `LoopCond` for a loop execution frame and it is only presented in the loop\n construct.\n\n Parameters\n ----------\n control_flow_node_map : Dict[str, Set[str]]\n A dictionay contains the unique control flow execution frame name to\n a set of primitive operators mapping.\n\n op_name : str\n The name of a control flow primitive.\n\n Returns\n -------\n ret : bool\n Return true if the operator is in a while loop execution frame,\n otherwise, return false.\n \"\"\"\n return op_name in control_flow_node_map and \\\n \"LoopCond\" in control_flow_node_map[op_name]\n\nclass Branch:\n \"\"\"A class contains the components that are used to build up a Relay if\n node.\n\n Parameters\n ----------\n cond : tvm.relay.Expr\n The condition of a if node.\n\n true_branch : tvm.relay.Expr\n The body of the true branch of a if expression.\n\n false_branch: tvm.relay.Expr\n The body of the false branch of a if expression.\n\n _if : tvm.relay.Expr\n An internal variable indicates where an if expression is already created\n for a matched TF condition construct.\n\n Examples\n --------\n The following is a cond statement written in TensorFlow:\n\n .. code-block:: python\n\n def vanilla_cond():\n i = tf.constant(1)\n j = tf.constant(4)\n\n def f1():\n return tf.multiply(1, 17)\n\n def f2():\n return tf.add(4, 23)\n r = tf.cond(tf.less(i, j), f1, f2)\n\n This condition statement should be converted into Relay in the following\n form:\n\n .. code-block:: python\n\n fn (%Const: Tensor[(1,), int32],\n %Const_1: Tensor[(1,), int32],\n %cond/Mul/x: Tensor[(1,), int32],\n %cond/Mul/y: Tensor[(1,), int32],\n %cond/Add/x: Tensor[(1,), int32],\n %cond/Add/y: Tensor[(1,), int32]) {\n %0 = less(%Const, %Const_1) # ty=Tensor[(1,), bool]\n %1 = min(%0)\n if (%1) {\n %2 = multiply(%cond/Mul/x, %cond/Mul/y)\n %2\n } else {\n %3 = add(%cond/Add/x, %cond/Add/y)\n %3\n }\n }\n \"\"\"\n def __init__(self):\n self._if = None\n self.cond = None\n self.true_branch = None\n self.false_branch = None\n\n def _if_node(self):\n \"\"\"An internal API to create a relay if node from the matched TF\n condition construct.\n \"\"\"\n # `cond` returns a tensor that contains boolean values. We add a `min`\n # operator to checks if there is any false value. If so, this condition\n # doesn't not hold.\n cond = tvm.relay.op.min(self.cond)\n return tvm.relay.If(cond, self.true_branch, self.false_branch)\n\n def if_node(self):\n \"\"\"Create an tvm.relay.If node if it hasn't been created yet.\"\"\"\n if self._if is None:\n self._if = self._if_node()\n return self._if\n\n\nclass LoopBound(ExprVisitor):\n \"\"\"\n When a loop body is create, we get a Relay expression backtracing all\n the way back to input node. This will result in lots of unnecessary\n expression placed into loop body and compute multiple times. For example,\n consider the following tensorflow code:\n\n .. code-block:: python\n\n i = tf.constant(0)\n data = tf.compat.v1.placeholder(tf.float32, shape=(1024, 1024))\n slice = tf.strided_slice(data, 0, 512)\n def c(i): return tf.less(i, 10)\n def b(i): return [tf.add(i, 1), tf.add(i, 1) + slice]\n r = tf.while_loop(c, b, [i])\n\n If we directly create recursive function, slice will be placed into function body.\n Instead, we recognize whether slice is inside while_loop block and pass it as an\n extra loop variable to avoid duplicate computation.\n\n TODO(kevinthesun): Add a LICM pass for Relay to handle generic loop/function.\n \"\"\"\n def __init__(self, loop_name, hash2tfnode, while_loop_name_set):\n ExprVisitor.__init__(self)\n self._loop_name = loop_name\n self._hash2tfnode = hash2tfnode\n self._while_loop_name_set = while_loop_name_set\n self.extra_loop_var_names = set()\n\n def _find_parent_loop_name(self, node_name):\n \"\"\"Find name of direct parent while loop.\"\"\"\n ploop_name = \"\"\n name_prefix = node_name.rsplit('/', 1)[0]\n if name_prefix.startswith(\"^\"):\n name_prefix = name_prefix[1:]\n # To get the name of the direct parent while loop for a given node,\n # we iterate all the while loop names inside TensorFlow graph def.\n # If we find a loop name with which current node name starts,\n # it means current node is under this loop. However, due to nested\n # loop, this loop may not be the direct parent while loop of current\n # node. We need to keep the longest loop name, which represents the\n # innermost while loop corresponding to current node.\n for lname in self._while_loop_name_set:\n if name_prefix.startswith(lname) and len(ploop_name) < len(lname):\n ploop_name = lname\n\n if len(ploop_name) == 0:\n ploop_name = name_prefix\n\n return ploop_name\n\n def visit(self, expr):\n \"\"\"\n For each expression in the body, look up the corresponding\n TensorFlow node with its structural hash. If the current loop is the\n direct parent of this node, we check whether its every input node belongs\n to the current loop. If not, we mark this input node as an extra loop\n variable to the current loop.\n \"\"\"\n expr_hash = s_hash(expr)\n\n if expr_hash in self._hash2tfnode:\n node = self._hash2tfnode[expr_hash]\n ploop_name = self._find_parent_loop_name(node.name)\n # It is possibel that a node is under nested loop of current loop.\n # We only check the direct children of current loop.\n if ploop_name == self._loop_name:\n for iname in node.input:\n iploop_name = self._find_parent_loop_name(iname)\n # Use startswith to deal with nested loop\n if not iploop_name.startswith(self._loop_name):\n if iname not in self.extra_loop_var_names:\n self.extra_loop_var_names.add(iname)\n super().visit(expr)\n\n\nclass Loop:\n \"\"\"\n A class contains the components that are used to build up a Relay\n recursive call.\n\n Parameters\n ----------\n loop_vars : List[tvm.relay.Expr]\n The loop variables that used in a while loop.\n\n cond : tvm.relay.Expr\n The condition of a while loop.\n\n body : tvm.relay.Expr\n The body of a matched while loop.\n\n _loop : tvm.relay.Expr\n An internal variable indicates where a recursive call is already created\n for a matched TF while loop construct.\n\n Examples\n --------\n The following is a vanilla loop from TensorFlow:\n\n .. code-block:: python\n\n i = tf.constant(0)\n c = lambda i: tf.less(i, 10)\n b = lambda i: tf.add(i, 1)\n r = tf.while_loop(c, b, [i])\n\n It will be converted to the following recursive call in Relay:\n\n .. code-block:: python\n\n fn (%while/Less/y: Tensor[(1,), int32],\n %while/Add/y: Tensor[(1,), int32],\n %Const: Tensor[(1,), int32]) {\n %0 = fn(%loop_var0: Tensor[(1,), int32]) {\n %1 = less(%loop_var0, %while/Less/y)\n %2 = min(%1)\n if (%2) {\n %3 = add(%loop_var0, %while/Add/y)\n free_var %while_loop\n %4 = %while_loop(%3)\n %4\n } else {\n %5 = (%loop_var0,)\n %5\n }\n }\n let %while_loop1 = %0\n %6 = %while_loop1(%Const)\n %6\n }\n \"\"\"\n def __init__(self, mod, loop_name, hash2tfnode,\n node_map, while_loop_name_set):\n self.loop_vars = []\n self.cond = None\n self.body = []\n self._loop = None\n self._mod = mod\n self._loop_name = loop_name\n self._hash2tfnode = hash2tfnode\n self._node_map = node_map\n self._while_loop_name_set = while_loop_name_set\n self.aligned = False\n\n def _while_loop(self):\n \"\"\"An internal API to create a Relay recursive call for a matched TF\n `while_loop` construct.\n \"\"\"\n wl = tvm.relay.var('while_loop')\n\n sb = tvm.relay.scope_builder.ScopeBuilder()\n\n loop_checker = LoopBound(self._loop_name,\n self._hash2tfnode,\n self._while_loop_name_set)\n for body in self.body:\n loop_checker.visit(body)\n\n loop_vars = []\n bind_map = {}\n loop_var_hash_set = set()\n for var in self.loop_vars:\n loop_var_hash_set.add(s_hash(var))\n\n extra_nodes = []\n for extra_loop_var_name in loop_checker.extra_loop_var_names:\n extra_loop_var_name = extra_loop_var_name.split(':')[0].split(\"^\")[-1]\n extra_node = self._node_map[extra_loop_var_name]\n extra_node = extra_node if isinstance(extra_node, _expr.Tuple) else extra_node[0]\n if s_hash(extra_node) not in loop_var_hash_set:\n self.loop_vars.append(extra_node)\n extra_nodes.append(extra_node)\n\n for i, var in enumerate(self.loop_vars):\n if not isinstance(var, _expr.Var):\n var_chk = _infer_type(var, self._mod)\n var_type = var_chk.checked_type\n else:\n var_type = var.type_annotation\n\n v = tvm.relay.var(\"loop_var\" + str(i), type_annotation=var_type)\n loop_vars.append(v)\n bind_map[var] = v\n\n\n self.cond = rewrite_subgraph(self.cond, bind_map)\n self.body = [rewrite_subgraph(b, bind_map) for b in self.body]\n\n self.body_shape = []\n for body in self.body:\n current_node = body\n shape = _infer_shape(current_node, self._mod)\n while not isinstance(shape, (tuple, list)):\n current_node = current_node.args[-1]\n shape = _infer_shape(current_node, self._mod)\n self.body_shape.append(shape)\n\n cond = tvm.relay.op.min(self.cond)\n\n with sb.if_scope(cond):\n extra_args = []\n if extra_nodes:\n extra_args = list(loop_vars[-len(extra_nodes):])\n sb.ret(wl(*list(self.body + extra_args)))\n with sb.else_scope():\n sb.ret(tvm.relay.Tuple(loop_vars))\n\n loop_fn = tvm.relay.Function(loop_vars, sb.get())\n sb = tvm.relay.scope_builder.ScopeBuilder()\n sb.let(wl, loop_fn)\n loop_ret = wl(*self.loop_vars)\n\n sb.ret(loop_ret)\n ret = sb.get()\n return ret\n\n def while_loop(self):\n \"\"\"Instantiate a while loop if it has not been created yet.\"\"\"\n if self._loop is None:\n self._loop = self._while_loop()\n return self._loop\n return self._loop\n\n\nclass GraphProto(object):\n \"\"\" A helper class for handling relay graph copying from Tensorflow GraphDef.\n Definition:\n https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/graph.proto\n \"\"\"\n def __init__(self):\n self._nodes = {}\n self._tf_node_map = {}\n self._params = {}\n self._input_shapes = {}\n self._output_shapes = {}\n self._num_rnn_layer = False\n self._input_shapes = {}\n self._loops = {}\n self._branches = {}\n self._mod = IRModule({})\n self._prelude = Prelude(self._mod)\n self._control_flow_node_map = defaultdict(set)\n self._loop_body_order = {}\n self._loop_var_order = {}\n self._hash2tfnode = {}\n self._while_loop_name_set = set()\n\n def from_tensorflow(self, graph, layout=\"NHWC\", shape=None, outputs=None):\n \"\"\"Construct relay nodes from tensorflow graph definition - GraphDef.\n\n Follow the tensorflow graph definition to parse and convert it to Relay.\n Some of the assumptions listed below.\n\n -> All Placeholders are considered as graph input.\n -> All Const nodes are params.\n -> Last node is assumed as graph output.\n -> _output_shapes : Graph should be frozen with add_shapes=True.\n Or user can pass input shape dictionary optionally.\n -> DecodeJpeg, ResizeBilinear: These are dummy operators.\n Hence user should handle preprocessing outside.\n -> CheckNumerics: No implementation as of now for this.\n Just copies input to output.\n\n Parameters\n ----------\n graph : tensorflow graph definition object\n The loaded tensorflow GraphDef\n\n layout : target layout to be used (Optional)\n NCHW only supported now to enable NHWC models on GPU.\n\n shape : Dictionary of input dimensions (Optional)\n Graph level input shape dictionary.\n\n outputs : List of output tensor names (Optional)\n if not specified then the last node is assumed as graph output.\n\n Returns\n -------\n mod : tvm.IRModule\n The module that optimizations will be performed on.\n\n params : dict\n A dict of name: tvm.nd.array pairs, used as pretrained weights\n \"\"\"\n try:\n from tensorflow.python.framework import tensor_util\n except ImportError as e:\n raise ImportError(\n \"Unable to import tensorflow which is required {}\".format(e))\n\n missing_operators = self._parse_import_prerequisites(graph)\n control_flow_nodes = []\n self._in_shape = shape\n self._layout = layout\n self._graph = graph\n\n if missing_operators:\n freezed_ops = [op for op in missing_operators if op in _freezed_graph_pruned_op_list]\n if freezed_ops:\n raise Exception(\"Graph is not frozen. Provide a frozen graph. \"\n \"Found operators {}\".format(freezed_ops))\n\n raise NotImplementedError(\n \"The following operators are not implemented: {}\".format(missing_operators))\n\n for node in graph.node:\n node_name_prefix = node.name.rsplit('/', 1)[0]\n self._control_flow_node_map[node_name_prefix].add(node.op)\n self._tf_node_map[node.name] = node\n\n # Parse output_shapes attribute\n parsed_attr = self._parse_attr(node.attr)\n if '_output_shapes' in parsed_attr:\n self._output_shapes[node.name] = \\\n [tensor_util.TensorShapeProtoToList(tshape) \\\n for tshape in parsed_attr['_output_shapes']]\n else:\n self._output_shapes[node.name] = [None]\n\n # Parse placeholder and const here since input shape info is required.\n if node.op == 'Placeholder' or node.op == 'PlaceholderWithDefault':\n # Give priority to user argument.\n if shape and node.name in shape:\n self._input_shapes[node.name] = list(shape[node.name])\n else:\n self._input_shapes[node.name] = \\\n tensor_util.TensorShapeProtoToList(node.attr['shape'].shape)\n for idx, dim in enumerate(self._input_shapes[node.name]):\n if dim < 0:\n self._input_shapes[node.name][idx] = 1\n warnings.warn(\"Use 1 instead of -1 in shape of operator %s.\"\n % node.name)\n\n self._output_shapes[node.name] = [self._input_shapes[node.name]]\n attr = self._parse_attr(node.attr)\n self._nodes[node.name] = [_expr.var(node.name,\n shape=self._input_shapes[node.name],\n dtype=attr['dtype'].name)]\n\n # Ignore user's input shape for Non placeholder\n elif node.op == 'Const':\n tensor_value = node.attr['value'].tensor\n self._input_shapes[node.name] = \\\n tensor_util.TensorShapeProtoToList(tensor_value.tensor_shape)\n self._output_shapes[node.name] = [self._input_shapes[node.name]]\n if shape and node.name in shape:\n warnings.warn(\"Ignore the passed shape. Shape in graphdef \"\n \"will be used for operator %s.\" % node.name)\n for key, value in node.attr.items():\n self._parse_param(key, value, node.name, self._in_shape)\n elif node.op in _control_flow_nodes:\n # We assume that the direct parent node of Exit is a while loop block\n if node.op == \"Exit\":\n self._while_loop_name_set.add(node_name_prefix)\n control_flow_nodes.append(node)\n\n # First, parse all control flow nodes.\n # Convert tf.cond to Branch and tf.while_loop to Loop.\n sorted_cf_nodes = []\n current_node_name_prefix = None\n exits = []\n # Sort control flow nodes to move all Exit nodes to the end\n # of corresponding while_loop block.\n for i, node in enumerate(control_flow_nodes):\n node_name_prefix = node.name.rsplit('/', 1)[0]\n if current_node_name_prefix is None or current_node_name_prefix != node_name_prefix:\n if node_name_prefix in self._while_loop_name_set:\n sorted_cf_nodes.extend(exits)\n exits.clear()\n current_node_name_prefix = node_name_prefix\n\n if node.op == \"Exit\":\n exits.append(node)\n else:\n sorted_cf_nodes.append(node)\n\n if i == len(control_flow_nodes) - 1:\n sorted_cf_nodes.extend(exits)\n\n for node in sorted_cf_nodes:\n self._backtrack_construct(node.name)\n\n # Second, parse other nodes to re-create TF graph using Relay operators.\n for node in graph.node:\n self._backtrack_construct(node.name)\n\n out = []\n if outputs is None:\n last_node = graph.node[-1]\n op = self._nodes[last_node.name.split(\":\")[0]]\n if last_node.op == \"Exit\":\n out = [op[0].tuple_value]\n else:\n out = op\n else:\n for out_name in outputs:\n if \":\" in out_name:\n out_name, out_num = out_name.split(\":\")\n out_num = int(out_num)\n out.append(self._nodes[out_name][out_num])\n else:\n out.append(self._nodes[out_name][0])\n\n #Add the RNN outputs also with 'head' nodes of the relay graph\n if self._num_rnn_layer:\n if len(self._out_rnn) == 1:\n out.append(self._out_rnn[0])\n else:\n out_rnn = _op.concatenate(self._out_rnn, axis=0)\n out.append(out_rnn)\n\n out = out[0] if len(out) == 1 else _expr.Tuple(out)\n func = _function.Function(analysis.free_vars(out), out)\n self._mod[\"main\"] = func\n return self._mod, self._params\n\n def _parse_import_prerequisites(self, graph):\n \"\"\" Calculate the named preconditions from TensorFlow `graph`.\n Return prerequisites for parsing:\n a. Set of operator names which don't have their mapping in TVM, i.e.\n which are not supported\n \"\"\"\n missing_operators = set()\n for node in graph.node:\n if node.op == \"Placeholder\" or node.op == 'PlaceholderWithDefault':\n pass\n elif node.op == \"Const\":\n pass\n else:\n if any([node.op in t for t in [_identity_list, _convert_map,\n _convert_map_rnn,\n _control_flow_nodes]]):\n pass\n else:\n missing_operators.add(node.op)\n\n return missing_operators\n\n def _parse_param(self, key, value, name, shape):\n try:\n from tensorflow.python.framework import tensor_util\n except ImportError as e:\n raise ImportError(\n \"Unable to import tensorflow which is required {}\".format(e))\n\n if key == 'value':\n np_array = tensor_util.MakeNdarray(value.tensor)\n\n if np_array.dtype == np.dtype(object):\n # Object types are generally tensorflow DT_STRING (DecodeJpeg op).\n # Just leave it as placeholder.\n if shape and name in shape:\n var_shape = shape[name]\n else:\n var_shape = tensor_util.TensorShapeProtoToList(value.tensor.tensor_shape)\n self._nodes[name] = [_expr.var(name, shape=var_shape, dtype='uint8')]\n return\n\n array_ndim = len(np_array.shape)\n if array_ndim == 0:\n self._nodes[name] = [tvm.relay.const(np_array)]\n else:\n self._params[name] = tvm.nd.array(np_array)\n self._nodes[name] = [_expr.var(name,\n shape=self._params[name].shape,\n dtype=self._params[name].dtype)]\n else:\n if key not in ('dtype', '_output_shapes', '_class'):\n raise NotImplementedError \\\n (\"Other attributes for a Const(param) Node {} ? .\".format(key))\n\n def _get_attr(self, buf):\n \"\"\"Returns the value of the attr of this buf with the given `name`.\n\n Args:\n buf: attrvalue protobuf.\n\n Returns:\n The value of the attr, as a Python object.\n\n Raises:\n ValueError: If this op does not have an attr with the given `name`.\n \"\"\"\n fields = [\"s\", \"i\", \"f\", \"b\", \"type\", \"shape\", \"tensor\", \"func\"]\n\n x = buf\n\n ret = []\n\n try:\n from tensorflow.python.framework import dtypes\n except ImportError as e:\n raise ImportError(\n \"Unable to import tensorflow which is required {}\".format(e))\n\n # Treat an empty oneof value as an empty list.\n if not x.WhichOneof(\"value\"):\n return ret\n if x.HasField(\"list\"):\n for f in fields:\n if getattr(x.list, f):\n if f == \"type\":\n ret += [dtypes.as_dtype(x) for x in list(getattr(x.list, f))]\n else:\n ret += list(getattr(x.list, f))\n else:\n for f in fields:\n if x.HasField(f):\n if f == \"type\":\n ret = dtypes.as_dtype(getattr(x, f))\n else:\n ret = getattr(x, f)\n return ret\n\n def _parse_attr(self, attr_proto):\n \"\"\"Convert a list of AttributeProto to a dict, with names as keys.\"\"\"\n attrs = {}\n for key, value in attr_proto.items():\n attrs[key] = self._get_attr(value)\n\n return attrs\n\n def _convert_rnn_operator(self, op_name, inputs,\n attrs, params, graph, convert_map):\n \"\"\"Convert RNN and its variant operators to Relay operators.\n This converter read the input states of each layers and\n also maintain the output states of each layer in a list.\n\n Parameters\n ----------\n op_name : str\n Operator name, such as LSTMBlockCell\n inputs : list of relay.Expr\n List of input symbols.\n attrs : dict\n Dict of operator attributes\n params : dict\n List of pretrained weights and bias\n graph : Tensorflow graph object\n Graph is to find the number of upcoming same operator to\n calculate the number of layers.\n convert_map : dict\n Dict of name : callable, where name is the op's name that\n require conversion to relay, callable are functions which\n take attrs and return (new_op_name, new_attrs)\n\n Returns\n -------\n sym : relay.Expr\n Converted relay.Expr\n \"\"\"\n if not self._num_rnn_layer:\n self._out_rnn = []\n self.rnn = RecurrentNetworks(self._nodes, self._out_rnn, graph, convert_map)\n self._num_rnn_layer = True\n sym = self.rnn.process_op(op_name, inputs, attrs, params, self._mod)\n return sym\n\n def _convert_control_flow_operator(self, node, inputs, attrs, control_flow_node_map):\n \"\"\"\n Convert the Relay control flow primitive into corresponding component\n of a Relay control flow construct, i.e. `tf.cond` and `tf.while_loop`\n are converted in Relay `If` and recusrive call, respectively.\n\n Parameters\n ----------\n node: TensorFlow graph node object.\n A TensorFlow graph node object.\n\n inputs : List[tvm.relay.Expr]\n List of input symbols.\n\n attrs : Dict[tvm.Attrs]\n Dict of operator attributes.\n\n control_flow_node_map : Dict[str, Set[str]]\n A dictionary contains the execution frame name to primitives\n mapping.\n\n Returns\n -------\n op : tvm.relay.Expr\n Converted relay expression.\n \"\"\"\n node_name_prefix = node.name.rsplit('/', 1)[0]\n if node.op == \"Merge\":\n if _in_while_loop(self._control_flow_node_map, node_name_prefix):\n op = self._backtrack_construct(node.input[0])\n if node_name_prefix not in self._loops:\n self._loops[node_name_prefix] = Loop(self._mod,\n node_name_prefix,\n self._hash2tfnode,\n self._nodes,\n self._while_loop_name_set)\n else:\n if len(self._branches) == 0:\n raise RuntimeError(\"Cannot find a created \"\n \"conditional for merge node\")\n branch = self._branches[node_name_prefix]\n false_br = self._backtrack_construct(node.input[0])\n true_br = self._backtrack_construct(node.input[1])\n assert len(true_br) == 1\n assert len(false_br) == 1\n branch.true_branch = true_br[0]\n branch.false_branch = false_br[0]\n op = [branch.if_node()]\n if node_name_prefix not in self._while_loop_name_set:\n try:\n cond_val = np.all(_infer_value(branch.cond, self._params,\n self._mod).asnumpy())\n if cond_val:\n op = [branch.true_branch]\n else:\n op = [branch.false_branch]\n except Exception:\n op = [branch.if_node()]\n elif node.op == \"Exit\":\n loop = self._loops[node_name_prefix]\n\n # Check whether the order of loop variables aligns\n # with loop body. If not, create new loop variable list\n # with correct order.\n if not loop.aligned:\n loop_vars = []\n for i in self._loop_body_order[node_name_prefix]:\n for j, k in enumerate(self._loop_var_order[node_name_prefix]):\n if k == i:\n loop_vars.append(loop.loop_vars[j])\n loop.loop_vars = loop_vars\n loop.aligned = True\n exit_name = node.name.split('/')[-1]\n if '_' in exit_name:\n exit_number = int(exit_name[5:])\n else:\n exit_number = 0\n expr = loop.while_loop()\n body_pos = exit_number\n for i, j in enumerate(self._loop_body_order[node_name_prefix]):\n if exit_number == j:\n body_pos = i\n break\n op = [_expr.TupleGetItem(expr, body_pos)]\n elif node.op == \"Enter\":\n op = self._backtrack_construct(node.input[0])\n elif node.op == \"LoopCond\":\n op = self._backtrack_construct(node.input[0])\n assert len(op) == 1\n self._loops[node_name_prefix].cond = op[0]\n elif node.op == \"Switch\":\n op = self._backtrack_construct(node.input[0])\n cond = self._backtrack_construct(node.input[1])\n assert len(op) == 1\n if _in_while_loop(self._control_flow_node_map, node_name_prefix):\n if node_name_prefix not in self._loop_var_order:\n self._loop_var_order[node_name_prefix] = []\n if node.name.endswith(\"Switch\"):\n self._loop_var_order[node_name_prefix].append(0)\n else:\n self._loop_var_order[node_name_prefix].\\\n append(int(node.name.split(\"Switch_\")[-1]))\n self._loops[node_name_prefix].loop_vars.append(op[0])\n else:\n if node_name_prefix not in self._branches:\n self._branches[node_name_prefix] = Branch()\n self._branches[node_name_prefix].cond = cond[0]\n elif node.op == \"NextIteration\":\n if node_name_prefix not in self._loop_body_order:\n self._loop_body_order[node_name_prefix] = []\n if node.name.endswith(\"NextIteration\"):\n self._loop_body_order[node_name_prefix].append(0)\n else:\n self._loop_body_order[node_name_prefix].\\\n append(int(node.name.split(\"NextIteration_\")[-1]))\n op = self._backtrack_construct(node.input[0])\n\n assert len(op) == 1\n self._loops[node_name_prefix].body.append(op[0])\n else:\n raise Exception(\"Cannot identify control flow operator: \" +\n \"{}\".format(node.op))\n\n return op\n\n def _convert_operator(self, op_name, inputs, attrs,\n graph, identity_list=None, convert_map=None):\n \"\"\"Convert from Tensorflow operator to relay operator.\n The converter must specify conversions explicitly for incompatible name, and\n apply handlers to operator attributes.\n\n Parameters\n ----------\n op_name : str\n Operator name, such as Conv2D, AvgPool\n inputs : list of relay.op\n List of input symbols.\n attrs : dict\n Dict of operator attributes\n identity_list : list\n List of operators that don't require conversion\n convert_map : dict\n Dict of name : callable, where name is the op's name that\n require conversion to relay, callable are functions which\n take attrs and return (new_op_name, new_attrs)\n\n Returns\n -------\n sym : relay.op\n Converted relay operator\n \"\"\"\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n convert_map_rnn = _convert_map_rnn\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n\n elif op_name in convert_map_rnn:\n sym = self._convert_rnn_operator(op_name, inputs, attrs,\n self._params, graph,\n convert_map_rnn)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n return sym\n\n def _backtrack_construct(self, node_name):\n \"\"\"Convert a specific tensorflow node to relay expression.\n\n If any of its ancestor node is not converted yet, backtrack as\n far as input node and covert all nodes on the path.\n\n This is required when parsing control flow nodes, since the parsing\n order may not follow the original graph def.\n\n Parameters\n ----------\n node_name : str\n Tensorflow node name.\n\n Returns\n -------\n op : relay.Expr\n Converted relay expression\n \"\"\"\n node_name = node_name.split(':')[0].split(\"^\")[-1]\n\n if node_name not in self._nodes:\n node = self._tf_node_map[node_name]\n attr = self._parse_attr(node.attr)\n\n if node.op in _control_flow_nodes:\n attr = self._parse_attr(node.attr)\n op = self._convert_control_flow_operator(node, [],\n attr,\n self._control_flow_node_map)\n else:\n attr[\"_output_shapes\"] = self._output_shapes[node_name]\n attr[\"_node_name\"] = node.name\n attr[\"_target_layout\"] = self._layout\n inputs = []\n for iname in node.input:\n in_op = self._backtrack_construct(iname)\n if isinstance(in_op, _expr.TupleWrapper):\n tn = iname.split(':')\n tensor_slot = int(tn[1]) if len(tn) > 1 else 0\n in_op = in_op[tensor_slot]\n else:\n in_op = in_op[0]\n\n inputs.append(in_op)\n op = self._convert_operator(node.op, inputs, attr, self._graph)\n\n if isinstance(op, np.ndarray):\n self._params[node.name] = tvm.nd.array(op)\n op = [_expr.var(node.name,\n shape=self._params[node.name].shape,\n dtype=self._params[node.name].dtype)]\n\n elif isinstance(op, (_expr.Expr, _expr.TupleGetItem)):\n op = [op]\n\n node_hash = s_hash(op) if isinstance(op, _expr.Tuple) else s_hash(op[0])\n self._hash2tfnode[node_hash] = node\n self._nodes[node_name] = op\n\n return self._nodes[node_name]\n\ndef from_tensorflow(graph, layout=\"NHWC\", shape=None, outputs=None):\n \"\"\"Load tensorflow graph which is a python tensorflow graph object into relay.\n The companion parameters will be handled automatically.\n\n Parameters\n ----------\n graph : GraphDef object\n Tensorflow GraphDef\n\n layout : target layout to be used (Optional)\n NCHW only supported now to enable NHWC models on GPU.\n\n shape : Dictionary of input dimensions (Optional)\n Graph level input shape dictionary.\n\n outputs : List of output tensor names (Optional)\n if not specified then the last node is assumed as graph output.\n\n Returns\n -------\n mod : tvm.IRModule\n The module that optimizations will be performed on.\n\n params : dict of str to tvm.nd.NDArray\n Dict of converted parameters stored in tvm.nd.NDArray format\n \"\"\"\n g = GraphProto()\n mod, params = g.from_tensorflow(graph, layout, shape, outputs)\n return mod, params\n" ]
[ [ "tensorflow.python.framework.tensor_util.MakeNdarray", "numpy.expand_dims", "numpy.squeeze", "numpy.cumsum", "numpy.dtype", "tensorflow.python.framework.dtypes.as_dtype", "numpy.prod", "numpy.array", "tensorflow.python.framework.tensor_util.TensorShapeProtoToList" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] } ]
cair/covid-19-us-dataset
[ "f4193c6c5ee8a7176c851065ed0a5879149f617a" ]
[ "main.py" ]
[ "import datetime\nfrom functools import reduce\n\nimport gym\nimport numpy as np\nimport pandas as pd\nfrom loguru import logger\nimport us_state_abbrev\nimport util\nimport matplotlib.pyplot as plt\nimport argparse\nimport seaborn as sns\npd.options.mode.chained_assignment = None\n\ndef print_full(x):\n pd.set_option('display.max_rows', None)\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', 2000)\n pd.set_option('display.float_format', '{:20,.2f}'.format)\n pd.set_option('display.max_colwidth', None)\n print(x)\n pd.reset_option('display.max_rows')\n pd.reset_option('display.max_columns')\n pd.reset_option('display.width')\n pd.reset_option('display.float_format')\n pd.reset_option('display.max_colwidth')\n\n\n# Simulation based of\n# https://github.com/owid/covid-19-data/tree/master/public/data/vaccinations\n# . Vaccinations began on December 14, 2020.\n\n\nclass BaseEnvironment(gym.Env):\n\n def __init__(self):\n self.action_space = gym.spaces.Discrete(12)\n # self.observation_space = gym.spaces.Box()\n\n def step(self, action):\n pass\n\n\nclass USCountry(BaseEnvironment):\n\n def __init__(self, date_from=datetime.datetime(2019, 1, 1), date_to=datetime.datetime.now()):\n super().__init__()\n\n self.date_from = date_from\n self.date_to = date_to\n\n self.population = self.load_population()\n self.dataset = self.load_data()\n\n\n def load_population(self):\n # https://www2.census.gov/programs-surveys/popest/technical-documentation/file-layouts/2010-2019/sc-est2019-agesex-civ.pdf\n # https://www2.census.gov/programs-surveys/popest/tables/2010-2019/state/asrh/sc-est2019-agesex-civ.csv\n population = util.download(\"us-residents\", \"https://www2.census.gov/programs-surveys/popest/tables/2010-2019/state/asrh/sc-est2019-agesex-civ.csv\")\n population = population[population[\"SUMLEV\"] == 40]\n population.drop('REGION', inplace=True, axis=1)\n population.drop('DIVISION', inplace=True, axis=1)\n population.drop('STATE', inplace=True, axis=1)\n population.drop('SUMLEV', inplace=True, axis=1)\n population.drop('ESTBASE2010_CIV', inplace=True, axis=1)\n\n for x in range(2010, 2019):\n population.drop(f'POPEST{x}_CIV', inplace=True, axis=1)\n\n #bins = pd.cut(population['AGE'], [-1, 18, 65, 200])\n #9 778 694\n\n rename_dict = {\n \"POPEST2019_CIV\": \"population\",\n \"NAME\": \"state\"\n }\n population = population.rename(columns=rename_dict)\n rename_dict.update({k: k.lower() for k in population.columns.values})\n population = population.rename(columns=rename_dict)\n\n population = population[population.age != 999]\n # example: sum of alabama population with all genders (0)\n # population[(population[\"name\"] == \"Alabama\") & (population[\"sex\"] == 0)][\"population\"].sum()\n return population\n\n\n def load_data(self):\n\n\n vaccinations = util.download(\"us-vaccinations\",\n \"https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/us_state_vaccinations.csv\")\n vaccinations = vaccinations.rename(\n columns={\n 'location': 'state'\n }\n )\n\n vaccine_allocations_pfizer = util.download(\"us-vaccine-allocations-pfizer\",\n \"https://data.cdc.gov/api/views/saz5-9hgg/rows.csv?accessType=DOWNLOAD\")\n vaccine_allocations_pfizer = vaccine_allocations_pfizer.rename(\n columns={\n 'Jurisdiction': \"state\",\n 'Week of Allocations': 'date',\n '1st Dose Allocations': 'vaccine_alloc_1_pfizer',\n '2nd Dose Allocations': 'vaccine_alloc_2_pfizer'\n })\n\n vaccine_allocations_janssen = util.download(\"us-vaccine-allocations-janssen\",\n \"https://data.cdc.gov/api/views/w9zu-fywh/rows.csv?accessType=DOWNLOAD\")\n vaccine_allocations_janssen = vaccine_allocations_janssen.rename(\n columns={\n 'Jurisdiction': \"state\",\n 'Week of Allocations': 'date',\n '1st Dose Allocations': 'vaccine_alloc_1_janssen',\n '2nd Dose Allocations': 'vaccine_alloc_2_jannsen'\n })\n\n vaccine_allocations_moderna = util.download(\"us-vaccine-allocations-moderna\",\n \"https://data.cdc.gov/api/views/b7pe-5nws/rows.csv?accessType=DOWNLOAD\")\n vaccine_allocations_moderna = vaccine_allocations_moderna.rename(\n columns={\n 'Jurisdiction': \"state\",\n 'Week of Allocations': 'date',\n '1st Dose Allocations': 'vaccine_alloc_1_moderna',\n '2nd Dose Allocations': 'vaccine_alloc_2_moderna'\n })\n\n death_counts = util.download(\"us-death-counts-advanced\",\n \"https://data.cdc.gov/api/views/9bhg-hcku/rows.csv?accessType=DOWNLOAD\")\n death_counts = death_counts.rename(columns={'State': \"state\", 'Data As Of': 'date'})\n\n cases_and_deaths = util.download(\"us-cases-death\",\n \"https://data.cdc.gov/api/views/9mfq-cb36/rows.csv?accessType=DOWNLOAD\")\n cases_and_deaths = cases_and_deaths.rename(columns={'submission_date': \"date\"})\n\n cases_and_deaths = cases_and_deaths.replace({\"state\": us_state_abbrev.abbrev_us_state})\n cases_and_deaths = cases_and_deaths.replace({\"state\": us_state_abbrev.abbrev_us_state_3})\n\n # Datasets tend to name stuff differently. Above, we try to normalize state names.\n # A warning is output with states that does not match all dataset sources.\n # You can choose to ignore the warning, or fix the 'problem' (We remove these from the datasets)\n common_states, uncommon_states = util.count_common_states(\n vaccine_allocations_pfizer[\"state\"].unique(),\n vaccine_allocations_janssen[\"state\"].unique(),\n vaccine_allocations_moderna[\"state\"].unique(),\n death_counts[\"state\"].unique(),\n cases_and_deaths[\"state\"].unique(),\n vaccinations[\"state\"].unique()\n )\n logger.warning(\"The following states are excluded from the dataset: {}\", uncommon_states)\n\n vaccine_allocations_pfizer, \\\n vaccine_allocations_janssen, \\\n vaccine_allocations_moderna, \\\n death_counts, \\\n cases_and_deaths, \\\n vaccinations = util.remove_uncommon_states(uncommon_states, vaccine_allocations_pfizer,\n vaccine_allocations_janssen,\n vaccine_allocations_moderna,\n death_counts,\n cases_and_deaths,\n vaccinations)\n assert len(vaccine_allocations_janssen[\"state\"].unique()) == len(common_states), \"State count not valid!\"\n\n logger.info(vaccine_allocations_pfizer.columns.values)\n logger.info(vaccine_allocations_janssen.columns.values)\n logger.info(vaccine_allocations_moderna.columns.values)\n logger.info(death_counts.columns.values)\n logger.info(vaccinations.columns.values)\n logger.info(cases_and_deaths.columns.values)\n\n all_data_sources = [\n vaccine_allocations_pfizer,\n vaccine_allocations_janssen,\n vaccine_allocations_moderna,\n vaccinations,\n # death_counts,\n cases_and_deaths\n ]\n\n util.convert_datestring_to_datetime(*all_data_sources)\n\n combined_data = reduce(lambda left, right: pd.merge(left, right, on=['date', 'state'],\n how='outer'), all_data_sources).sort_values(by=\"date\")#.interpolate(method='linear', limit_direction='forward', axis=0)\n\n # Fill specific missing values\n util.fill_na_by_partial_name(\"vaccine_alloc\", combined_data)\n\n # Interpolate where applicable\n #combined_data = combined_data.loc[combined_data[\"state\"] == \"Ohio\"]\n combined_data = combined_data.interpolate()\n\n # Fill rest with 0\n combined_data = combined_data.fillna(0)\n\n combined_data = combined_data.set_index(\"date\")\n\n # (Now you should quality check the dataset\n\n combined_data.to_csv(\"test.csv\")\n\n print(len(vaccine_allocations_moderna[\"state\"].unique()))\n print(len(cases_and_deaths[\"state\"].unique()))\n\n # Limit between dates\n #mask_date = (combined_data['date'] > self.date_to) & (combined_data['date'] <= self.date_from)\n combined_data = combined_data.loc[self.date_from:self.date_to]\n\n return combined_data\n\n def dump_dataset(self, is_last=False, filenames=[]):\n postfix = \"latest\" if is_last else str(datetime.datetime.now())\n pop_name = f\"population-{postfix}.csv\"\n data_name = f\"data-{postfix}.csv\"\n filenames.extend([pop_name, data_name])\n\n self.population.to_csv(pop_name)\n self.dataset.to_csv(data_name)\n\n if not is_last:\n return self.dump_dataset(is_last=True, filenames=filenames)\n return filenames\n\nclass QAgent:\n\n def __init__(self):\n self.q = np.zeros(shape=(200, 200), dtype=np.float32)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dump-only\", type=bool, default=True)\n parser.add_argument(\"--clean-dataset\", type=bool, default=False)\n parser.add_argument('--date-from', default=datetime.datetime(2020, 5, 1), type=lambda s: datetime.datetime.strptime(s, '%Y-%m-%d'))\n parser.add_argument('--date-to', default=datetime.datetime(2021, 3, 21), type=lambda s: datetime.datetime.strptime(s, '%Y-%m-%d'))\n args = parser.parse_args()\n\n if args.clean_dataset:\n # Cleans the current data directory (makes a backup in case external sources goes away)\n util.FileUtils.backup_directory(\"data\", \"data\")\n util.FileUtils.remove_directory(\"data\")\n\n\n\n\n\n\n\n env = USCountry(\n date_from=args.date_from,\n date_to=args.date_to\n )\n\n if args.dump_only:\n from git import Repo\n repo = Repo()\n git = repo.git\n\n git.checkout(b='dataset')\n\n\n # Only dump current dataset'\n files = env.dump_dataset()\n\n for f in files:\n git.add(f)\n\n\n\n\n\n logger.info(\"Dataset is dumped. Closing.\")\n exit(0)\n\n\n \"\"\"\n ['New Jersey' 'Kansas' 'Illinois' 'Montana' 'Florida' 'Alaska' 'Kentucky'\n 'Massachusetts' 'Delaware' 'Oklahoma' 'Tennessee' 'Alabama'\n 'South Dakota' 'Vermont' 'Arkansas' 'California' 'Utah' 'Michigan'\n 'Washington' 'Connecticut' 'Wisconsin' 'Rhode Island' 'Nebraska' 'Idaho'\n 'New Mexico' 'Virginia' 'Missouri' 'Iowa' 'Louisiana' 'Minnesota'\n 'Oregon' 'New Hampshire' 'North Dakota' 'Colorado' 'Puerto Rico'\n 'South Carolina' 'Wyoming' 'West Virginia' 'Ohio' 'Georgia'\n 'North Carolina' 'Arizona' 'Texas' 'Pennsylvania' 'District of Columbia'\n 'Nevada' 'Hawaii' 'Mississippi' 'Indiana' 'Maryland' 'Maine']\n \"\"\"\n state_name = \"Texas\"\n ohio = env.dataset[env.dataset[\"state\"] == state_name]\n state_pop = env.population[(env.population[\"state\"] == state_name) & (env.population[\"sex\"] == 0)][\"population\"].sum()\n\n ohio[\"people_vaccinated_per_hundred\"] /= 100.0\n ohio[\"tot_cases\"] /= state_pop\n print(ohio[\"tot_cases\"])\n ohio[[\"tot_cases\", \"people_vaccinated_per_hundred\"]].plot(title=state_name)\n plt.savefig(\"test.png\")\n\n\n\n\n\n\n #qagent = QAgent(shape=(30, 30))\n" ]
[ [ "pandas.reset_option", "pandas.merge", "matplotlib.pyplot.savefig", "pandas.set_option", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
spring-epfl/trickster
[ "070a8ea8894d8bf3e97d0774b12c64458aa2c219", "070a8ea8894d8bf3e97d0774b12c64458aa2c219" ]
[ "scripts/legacy/wfp_adversarial_deterministic_raw_features.py", "scripts/legacy/malware.py" ]
[ "import sys\n\nsys.path.append(\"..\")\n\nimport os\nimport math\nimport pickle\nimport argparse\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom itertools import groupby\nfrom IPython.display import display, HTML\n\nfrom trickster.search import a_star_search\nfrom trickster.wfp_helper import (\n extract,\n load_cell,\n onehot,\n pad_and_onehot,\n reverse_onehot,\n)\n\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.linear_model import LogisticRegression, LogisticRegressionCV\nfrom sklearn.svm import SVC\nfrom scipy.spatial import distance\nfrom tqdm import tqdm\n\nfrom defaultcontext import with_default_context\nfrom profiled import Profiler, profiled\n\nseed = 2018\n\n\ndef load_data(path=\"./data/wfp_traces/\"):\n labels = []\n data = []\n for fn in tqdm(os.listdir(path)):\n file_path = path + fn\n if os.path.isfile(file_path):\n cell_list = load_cell(file_path)\n if \"-\" in str(fn):\n labels.append(1)\n data.append(cell_list)\n else:\n labels.append(0)\n data.append(cell_list)\n labels = np.array(labels)\n data = np.array(data)\n return data, labels\n\n\nX, y = load_data(path=\"./data/wfp_traces_toy/\")\nX, y = X[:500], y[:500]\n\ntrace_len, X = pad_and_onehot(X)\nprint(\"Shape of data: {}, Shape of labels: {}\".format(X.shape, y.shape))\n\n# Split into training and test sets\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2, random_state=seed\n)\nprint(\n \"Number train samples: {}, Number test samples: {}\".format(\n X_train.shape[0], X_test.shape[0]\n )\n)\n\n# Fit logistic regression and perform CV\nclf = LogisticRegression()\nclf.fit(X_train, y_train)\n\n# Get best score and C value\nprint(\"Test score is: {:.2f}%.\".format(clf.score(X_test, y_test) * 100))\n\n\n@with_default_context(use_empty_init=True)\nclass Counter:\n def __init__(self):\n self.cnt = 0\n\n def increment(self):\n self.cnt += 1\n\n def count(self):\n return self.cnt\n\n\n# Define `BruteNode` class transformation code:\n# If at level i a node contains an input of length n, there will be n+1 branches at\n# level i+1 with a new request at every possible index.\n\n\nclass BruteNode:\n def __init__(self, x):\n self.root = list(reverse_onehot(x, trace_len))\n\n def expand(self):\n # Increment the counter of expanded nodes.\n counter = Counter.get_default()\n counter.increment()\n\n children = []\n for idx in range(len(self.root) + 1):\n expanded_node = self.root[:idx] + [1] + self.root[idx:]\n children.append(np.array(expanded_node)[:trace_len])\n\n return onehot(children)\n\n def __repr__(self):\n return \"BruteNode({})\".format(self.root)\n\n\n# All the functions that need to be passed into the search, in the expected format.\n\n\ndef _expand_fn(x, p_norm=1):\n \"\"\"Wrap the example in `Node`, expand the node, and compute the costs.\n\n Returns a list of tuples (child, cost)\n \"\"\"\n children = BruteNode(x).expand()\n # Use Lp distance in the feature space.\n costs = [np.linalg.norm(np.array(x) - np.array(c), ord=p_norm) for c in children]\n\n # Poor man's logging.\n n = Counter().get_default().count()\n if n % 5 == 0:\n print(\"Current level :\", x.level)\n print(\"Branches :\", len(children))\n print(\"Number of expands :\", n)\n print(\n \"Cost stats : %f / %f / %f\"\n % (min(costs), float(sum(costs)) / len(children), max(costs))\n )\n print()\n\n return list(zip(children, costs))\n\n\ndef _goal_fn(x, clf, target_confidence=0.5):\n \"\"\"Tell whether the example has reached the goal.\"\"\"\n return clf.predict_proba([x])[0, 1] >= target_confidence\n\n\ndef _heuristic_fn(x, clf, q_norm=np.inf, eps=1., offset=0):\n \"\"\"Distance to the decision boundary of a logistic regression classifier.\n\n By default the distance is w.r.t. L1 norm. This means that the denominator\n has to be in terms of the Holder dual norm (`q_norm`), so L-inf. I know,\n this interface is horrible.\n\n NOTE: The value has to be zero if the example is already on the target side\n of the boundary.\n \"\"\"\n score = clf.decision_function([x])[0]\n if score >= 0:\n return 0.0\n h = np.abs(score) / np.linalg.norm(clf.coef_, ord=q_norm)\n return eps * (h + offset)\n\n\ndef hash_fn(x):\n \"\"\"Hash function for examples.\"\"\"\n x_str = str(x)\n return hash(x_str)\n\n\n@profiled\ndef find_adversarial(\n x,\n clf,\n p_norm=1,\n q_norm=np.inf,\n target_confidence=0.5,\n eps=1.0,\n offset=0.,\n return_path=False,\n):\n \"\"\"Transform an example until it is classified with target confidence.\"\"\"\n\n if clf.predict_proba([x])[0, 1] >= target_confidence:\n raise Exception(\"Initial example is already classified as positive.\")\n return a_star_search(\n start_node=x,\n expand_fn=lambda x: _expand_fn(x, p_norm=p_norm),\n goal_fn=lambda x: _goal_fn(x, clf, target_confidence),\n heuristic_fn=lambda x: _heuristic_fn(x, clf, q_norm=q_norm),\n iter_lim=int(1e1),\n hash_fn=hash_fn,\n return_path=return_path,\n )\n\n\ndef find_adv_examples(X, target_confidence, out_path, p_norm=1, q_norm=np.inf, eps=1.0):\n \"\"\"Find adversarial examples for a whole dataset\"\"\"\n\n # Dataframe for storing the results.\n results = pd.DataFrame(\n columns=[\n \"index\",\n \"found\",\n \"confidence\",\n \"original_confidence\",\n \"x\",\n \"adv_x\",\n \"real_cost\",\n \"path_cost\",\n \"nodes_expanded\",\n \"runtime\",\n \"conf_level\",\n ]\n )\n\n # Indices of examples classified as negative.\n neg_indices, = np.where(clf.predict_proba(X)[:, 1] < target_confidence)\n\n for i, original_index in enumerate(tqdm(neg_indices)):\n x = X[original_index]\n\n # Instantiate a counter for expanded nodes, and a profiler.\n expanded_counter = Counter()\n per_example_profiler = Profiler()\n\n with expanded_counter.as_default(), per_example_profiler.as_default():\n x_adv, path_cost = find_adversarial(\n x, clf, target_confidence=target_confidence, eps=eps\n )\n\n nodes_expanded = expanded_counter.count()\n runtime = per_example_profiler.compute_stats()[\"find_adversarial\"][\"tot\"]\n original_confidence = clf.predict_proba([x])[0, 1]\n\n # If an adversarial example was not found, only record index, runtime, and\n # the number of expanded nodes.\n if x_adv is None:\n results.loc[i] = [\n original_index,\n False,\n None,\n original_confidence,\n x,\n None,\n None,\n None,\n nodes_expanded,\n runtime,\n target_confidence,\n ]\n else:\n confidence = clf.predict_proba([x_adv])[0, 1]\n real_cost = np.linalg.norm(x_adv, ord=p_norm) - np.linalg.norm(\n x, ord=p_norm\n )\n\n results.loc[i] = [\n original_index,\n True,\n confidence,\n original_confidence,\n x,\n x_adv,\n real_cost,\n path_cost,\n nodes_expanded,\n runtime,\n target_confidence,\n ]\n print(results.loc[i])\n\n with open(out_path, \"wb\") as f:\n pickle.dump(results_graph, f)\n return results\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"wfp deterministic example\")\n parser.add_argument(\n \"--confidence-level\",\n type=float,\n default=0.5,\n metavar=\"N\",\n help=\"confidence level for adversarial example (default: 0.5)\",\n )\n parser.add_argument(\n \"--num-examples\", type=int, default=2, metavar=\"N\", help=\"number of examples\"\n )\n parser.add_argument(\n \"--epsilon\", type=int, default=1., metavar=\"N\", help=\"greediness parameter\"\n )\n args = parser.parse_args()\n\n out_path = \"./data/wfp_raw_det_conf_l_%.2f_eps_%.2f.pkl\" % (\n args.confidence_level,\n args.epsilon,\n )\n results_graph = find_adv_examples(\n X, args.confidence_level, out_path, eps=args.epsilon\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "#!/usr/bin/env python3\n\n# TODO: Update interface usage.\n\nimport sys\n\nsys.path.append(\"..\")\n\n# Ignore warnings.\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\nimport pickle\nimport logging\nimport numpy as np\nimport pandas as pd\nfrom os import listdir\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom scipy.sparse import csr_matrix, save_npz, load_npz, issparse\nfrom tqdm import tqdm, trange\n\nfrom trickster.search import a_star_search, ida_star_search\nfrom trickster.adversarial_helper import *\nfrom trickster.expansions import *\n\nfrom defaultcontext import with_default_context\nfrom profiled import Profiler, profiled\n\n\nCOUNTER_LIM = 50000\nDEBUG_FREQ = 500\n\nlogger = None\nSEED = 1\nnp.random.seed(seed=SEED)\n\n\nclass LogisticRegressionScikitSaliencyOracle:\n def __init__(self, model):\n self.model = model\n\n def eval(self, _):\n return self.model.coef_[0]\n\n\nclass DistortionBoundReachedError(Exception):\n pass\n\n\nclass ExampleDoesNotExistError(Exception):\n pass\n\n\n@profiled\ndef find_adversarial_jsma(\n x,\n clf,\n oracle,\n transformable_feature_idxs,\n target_confidence=0.5,\n k=20,\n return_path=False,\n):\n \"\"\"\n Perform adversarial example search using Grosse et al. algorithm based on JSMA.\n \"\"\"\n if clf.predict_proba([x])[0, 1] <= target_confidence:\n raise Exception(\"Initial example is already classified as bening.\")\n if return_path:\n path = [x]\n\n x_star = np.array(x, dtype=\"float\")\n distortions = 0\n\n while clf.predict_proba([x_star])[0, 1] > target_confidence and distortions < k:\n derivative = oracle.eval(x_star)\n idxs = np.argsort(derivative)\n\n for i, idx in enumerate(idxs):\n # Check if changing the feature is permitted.\n if x_star[idx] == 0 and idx in transformable_feature_idxs:\n x_star[idx] = 1\n if return_path:\n path.append(np.array(x_star))\n break\n if i == len(idxs) - 1:\n e = \"Adversarial example is impossible to create. Tried {} distortions.\".format(\n distortions\n )\n raise ExampleDoesNotExistError(e)\n\n distortions += 1\n\n if distortions == k:\n e = \"Distortion bound {} reached.\".format(k)\n raise DistortionBoundReachedError(e)\n\n if return_path:\n return x_star, distortions, path\n else:\n return x_star, distortions\n\n\n# Define experiment helper functions.\ndef load_transform_data_fn(data_file, **kwargs):\n \"\"\"\n Load and preprocess data, returning the examples and labels as numpy.\n \"\"\"\n # Try loading saved preprocessed data and classifier.\n with open(data_file, \"rb\") as f:\n obj = pickle.load(f)\n X, y = obj[\"X\"], obj[\"y\"]\n\n return X, y, None\n\n\ndef clf_fit_fn(X_train, y_train, data_file, **kwargs):\n # Try loading saved preprocessed data and classifier.\n with open(data_file, \"rb\") as f:\n obj = pickle.load(f)\n clf = obj[\"clf\"]\n return clf\n\n\ndef get_expansions_fn(_, data_file, feat_count, feature_selection_seed, **kwargs):\n \"\"\"\n Define expansions to perform on features and obtain feature indexes.\n \"\"\"\n with open(data_file, \"rb\") as f:\n obj = pickle.load(f)\n label_encoder = obj[\"label_encoder\"]\n\n features = np.array([c.split(\"::\")[0] for c in label_encoder.classes_])\n\n # Find indexes of required features in the original feature space.\n idxs_provider = find_substring_occurences(features, \"provider\")\n idxs_permission = find_substring_occurences(features, \"permission\")\n idxs_activity = find_substring_occurences(features, \"activity\")\n idxs_service_receiver = find_substring_occurences(features, \"service_receiver\")\n idxs_intent = find_substring_occurences(features, \"intent\")\n\n # Concatenate indexes of transformable features.\n transformable_feature_idxs = idxs_provider + idxs_permission + idxs_activity\n transformable_feature_idxs += idxs_service_receiver + idxs_intent\n\n # Choose randomly features to perturb.\n np.random.seed(feature_selection_seed)\n transformable_feature_idxs = np.random.choice(\n transformable_feature_idxs, size=feat_count, replace=False\n )\n transformable_feature_idxs.sort()\n\n # Find indexes of required features in the reduced feature space.\n reduced_features = features[transformable_feature_idxs]\n reduced_transformable_feature_idxs = find_substring_occurences(\n reduced_features, \"provider\"\n )\n reduced_transformable_feature_idxs += find_substring_occurences(\n reduced_features, \"permission\"\n )\n reduced_transformable_feature_idxs += find_substring_occurences(\n reduced_features, \"activity\"\n )\n reduced_transformable_feature_idxs += find_substring_occurences(\n reduced_features, \"service_receiver\"\n )\n reduced_transformable_feature_idxs += find_substring_occurences(\n reduced_features, \"intent\"\n )\n\n # Set required expansions for features.\n expansions = [(reduced_transformable_feature_idxs, expand_collection_set)]\n\n return expansions, transformable_feature_idxs\n\n\ndef baseline_search_fn(\n X,\n idxs,\n clf,\n target_confidence,\n transformable_feature_idxs,\n p_norm,\n logger_name,\n **kwargs\n):\n \"\"\"Perform JSMA adversarial example search to baseline against A* search.\"\"\"\n logger = logging.getLogger(logger_name)\n\n # Dataframe for storing the results.\n results = pd.DataFrame(\n columns=[\n \"index\",\n \"found\",\n \"x\",\n \"init_confidence\",\n \"x_adv\",\n \"adv_confidence\",\n \"real_cost\",\n \"distortions\",\n \"optimal_path\",\n \"difference\",\n \"runtime\",\n ]\n )\n\n # Oracle and distortion bound required by the JSMA algorithm.\n k, oracle = 20, LogisticRegressionScikitSaliencyOracle(clf)\n\n # Find adversarial examples using JSMA and record their costs.\n for i, idx in enumerate(tqdm(idxs, ascii=True)):\n\n logger.debug(\n \"[JSMA] Searching for adversarial example {}/{} using initial observation at index: {}.\".format(\n i, len(idxs), idx\n )\n )\n\n if issparse(X):\n x = X[idx].toarray()[0]\n else:\n x = X[idx]\n\n # Instantiate a profiler to analyse runtime.\n per_example_profiler = Profiler()\n\n x_adv, adv_found = None, None\n adv_confidence, difference = None, None\n real_cost, distortions = None, None\n runtime, optimal_path = None, None\n\n with per_example_profiler.as_default():\n try:\n x_adv, distortions, optimal_path = find_adversarial_jsma(\n x=x,\n clf=clf,\n oracle=oracle,\n transformable_feature_idxs=transformable_feature_idxs,\n target_confidence=target_confidence,\n k=k,\n return_path=True,\n )\n adv_found = False if x_adv is None else True\n\n except (DistortionBoundReachedError, ExampleDoesNotExistError) as e:\n logger.debug(\n \"[JSMA] WARN! For observation at index {}: {}\".format(idx, e)\n )\n\n # Record some basic statistics.\n init_confidence = clf.predict_proba([x])[0, 1]\n runtime_stats = per_example_profiler.compute_stats()\n if \"find_adversarial\" in runtime_stats:\n runtime = runtime_stats[\"find_adversarial\"][\"tot\"]\n\n if x_adv is not None:\n logger.debug(\n \"[JSMA] Adversarial example found {}/{} found using initial observation at index: {}!\".format(\n i, len(idxs), idx\n )\n )\n # Compute further statistics.\n adv_confidence = clf.predict_proba([x_adv])[0, 1]\n real_cost = np.linalg.norm(x - x_adv, ord=p_norm)\n difference, = np.where(x != x_adv)\n\n results.loc[i] = [\n idx,\n adv_found,\n x,\n init_confidence,\n x_adv,\n adv_confidence,\n real_cost,\n distortions,\n optimal_path,\n difference,\n runtime,\n ]\n\n return results\n\n\nif __name__ == \"__main__\":\n # Setup a custom logger.\n log_file = \"../logging/malware_output.log\"\n logger = setup_custom_logger(log_file)\n\n # Define debug parameters (set to None to disable).\n counter_lim = 1000000\n debug_freq = 10000\n\n # Define experiment parameters.\n data_file = \"../scripts/tmp/preprocessed.pickle\"\n target_confidence = 0.5\n confidence_margin = 0.35\n\n p_norm, q_norm = 1, np.inf\n feature_selection_iterations = 25\n feat_counts = np.arange(200, 49, -50)\n\n # Perform the experiments.\n logger.info(\"Starting experiments for the DREBIN malware dataset.\")\n\n for feat_count in feat_counts:\n\n for i in range(feature_selection_iterations):\n\n output_file = \"results/malware_{}_{}.pickle\".format(feat_count, i)\n logger.info(\n \"Experiment iteration {}/{} using {} features.\".format(\n i, feature_selection_iterations, feat_count\n )\n )\n\n result = experiment_wrapper(\n load_transform_data_fn=load_transform_data_fn,\n data_file=data_file,\n feat_count=feat_count,\n feature_selection_seed=SEED + i,\n p_norm=p_norm,\n q_norm=q_norm,\n clf_fit_fn=clf_fit_fn,\n get_expansions_fn=get_expansions_fn,\n expand_quantized_fn=expand_quantized,\n target_confidence=target_confidence,\n confidence_margin=confidence_margin,\n baseline_search_fn=baseline_search_fn,\n zero_to_one=False,\n random_state=SEED,\n counter_lim=counter_lim,\n debug_freq=debug_freq,\n logger=logger,\n )\n\n result[\"feature_count\"] = feat_count\n result[\"feature_selection_iteration\"] = i\n result[\"p_norm\"] = p_norm\n result[\"q_norm\"] = q_norm\n\n assert len(result[\"search_results\"]) == len(result[\"baseline_results\"])\n\n # Compare our approach with JSMA approach.\n N = len(result[\"search_results\"])\n\n for j in range(N):\n astr_series = result[\"search_results\"].loc[j]\n jsma_series = result[\"baseline_results\"].loc[j]\n\n assert astr_series[\"index\"] == jsma_series[\"index\"]\n idx = astr_series[\"index\"]\n\n if astr_series[\"real_cost\"] != jsma_series[\"real_cost\"]:\n logger.info(\n \"Real cost differs for A* and JSMA for example at index: {}!\".format(\n idx\n )\n )\n if astr_series[\"path_cost\"] != jsma_series[\"distortions\"]:\n logger.info(\n \"Path cost differs for A* and JSMA for example at index: {}!\".format(\n idx\n )\n )\n\n # Output results.\n logger.debug(\"Saving results to {}.\".format(output_file))\n with open(output_file, \"wb\") as f:\n pickle.dump(result, f)\n" ]
[ [ "numpy.abs", "sklearn.linear_model.LogisticRegression", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "numpy.linalg.norm", "numpy.array" ], [ "scipy.sparse.issparse", "numpy.random.seed", "numpy.random.choice", "numpy.arange", "numpy.linalg.norm", "pandas.DataFrame", "numpy.argsort", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
jinyongyoo/QACProject
[ "2f5d63f68fb6d4959852b1b997abb4358d416ba4" ]
[ "data.py" ]
[ "import os\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import IterableDataset\n\nclass Dictionary(object):\n def __init__(self, paths):\n self.char2idx = {}\n self.idx2char = []\n self.max_seq_len = 0\n self.build_dictionary(paths)\n\n def build_dictionary(self, paths):\n self.add_char(\"<pad>\")\n for path in paths:\n assert os.path.exists(path)\n with open(path, \"r\", encoding=\"utf8\") as f:\n for line in f:\n chars = list(line)\n chars.append(\"<eos>\")\n self.max_seq_len = max(len(chars), self.max_seq_len)\n for char in chars:\n self.add_char(char.lower())\n\n def add_char(self, char):\n if char not in self.char2idx:\n self.idx2char.append(char)\n self.char2idx[char] = len(self.idx2char) - 1\n\n def get_idx(self, char):\n return self.char2idx[char]\n\n def get_char(self, idx):\n return idx2char[idx]\n\n def __len__(self):\n return len(self.idx2char)\n\nclass QueryDataset(IterableDataset):\n def __init__(self, path, dictionary):\n self.path = path\n self.dictionary = dictionary\n self.len = 0\n with open(self.path, \"r\") as f:\n for line in f:\n self.len+=1\n\n def __len__(self):\n return self.len\n\n def prepare_data(self, text):\n chars = list(text)\n chars.append(\"<eos>\")\n text_length = len(chars)\n pad_tokens_to_add = self.dictionary.max_seq_len - text_length + 1\n chars += [\"<pad>\"] * pad_tokens_to_add\n ids = [self.dictionary.get_idx(c.lower()) for c in chars]\n\n input_tensor = nn.functional.one_hot(torch.LongTensor(ids[:-1]), num_classes=len(self.dictionary)).float()\n target_tensor = torch.LongTensor(ids[1:])\n\n return input_tensor, target_tensor, text_length\n\n def __iter__(self):\n file_itr = open(self.path)\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is None:\n return map(self.prepare_data, file_itr)\n else:\n jump = self.len // worker_info.num_workers * worker_info.id\n for i in range(jump):\n next(file_itr)\n \n return map(self.prepare_data, file_itr)\n" ]
[ [ "torch.LongTensor", "torch.utils.data.get_worker_info" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ubik2/PEGAS-kRPC
[ "8f6628743a48a2cc700d57e62c0a49c94846f8c8" ]
[ "kRPC/plane_error.py" ]
[ "import numpy as np\r\n\r\n\r\ndef plane_error(results, target):\r\n \"\"\"\r\n Computes angle between target orbital plane and actually achieved plane.\r\n \r\n :param results: Results struct as output by flight_manager (NOT flight_sim_3d).\r\n :param target: Target struct as output by launch_targeting.\r\n :return: Angle between the two orbital planes.\r\n \"\"\"\r\n inc = results.powered[results.n-1].orbit.inc\r\n lan = results.powered[results.n-1].orbit.lan\r\n \r\n Rx = np.array([[1, 0, 0],\r\n [0, np.cos(np.deg2rad(inc)), -np.sin(np.deg2rad(inc))],\r\n [0, np.sin(np.deg2rad(inc)), np.cos(np.deg2rad(inc))]])\r\n Rz = np.array([[np.cos(np.deg2rad(lan)), -np.sin(np.deg2rad(lan)), 0],\r\n [np.sin(np.deg2rad(lan)), np.cos(np.deg2rad(lan)), 0],\r\n [0, 0, 1]])\r\n reached = np.matmul(Rz, np.matmul(Rx, np.array([0, 0, -1])))\r\n error = np.rad2deg(np.arccos(np.vdot(target.normal, reached)))\r\n return error\r\n" ]
[ [ "numpy.deg2rad", "numpy.array", "numpy.vdot" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
espittle/aws-deepracer-workshops
[ "aa3679f98d83fac7239e939ad593ca2876559519", "aa3679f98d83fac7239e939ad593ca2876559519" ]
[ "Advanced workshops/AI Driving Olympics 2019/challenge_train_w_PPO/src/markov/utils.py", "Advanced workshops/AI Driving Olympics 2019/challenge_train_w_PPO/src/markov/environments/deepracer_racetrack_env_original.py" ]
[ "import json\nimport logging\nimport os\nimport sys\nimport signal\nimport socket\nimport time\nimport datetime\nimport inspect\nfrom collections import OrderedDict\n\nSIMAPP_VERSION=\"1.0\"\n\nSIMAPP_SIMULATION_WORKER_EXCEPTION = \"simulation_worker.exceptions\"\nSIMAPP_TRAINING_WORKER_EXCEPTION = \"training_worker.exceptions\"\nSIMAPP_S3_DATA_STORE_EXCEPTION = \"s3_datastore.exceptions\"\nSIMAPP_ENVIRONMENT_EXCEPTION = \"environment.exceptions\"\nSIMAPP_MEMORY_BACKEND_EXCEPTION = \"memory_backend.exceptions\"\n\nSIMAPP_EVENT_SYSTEM_ERROR = \"system_error\"\nSIMAPP_EVENT_USER_ERROR = \"user_error\"\n\nSIMAPP_EVENT_ERROR_CODE_500 = \"500\"\nSIMAPP_EVENT_ERROR_CODE_503 = \"503\"\nSIMAPP_EVENT_ERROR_CODE_400 = \"400\"\nSIMAPP_EVENT_ERROR_CODE_401 = \"401\"\n\nclass Logger(object):\n counter = 0\n \"\"\"\n Logger class for all DeepRacer Simulation Application logging\n \"\"\"\n def __init__(self, logger_name=__name__, log_level=logging.INFO):\n self.logger = logging.getLogger(logger_name)\n self.logger.setLevel(log_level)\n\n handler = logging.StreamHandler()\n handler.setLevel(log_level)\n self.logger.addHandler(handler)\n\n def get_logger(self):\n \"\"\"\n Returns the logger object with all the required log settings.\n \"\"\"\n return self.logger\n\nlogger = Logger(__name__, logging.INFO).get_logger()\n\nimport tensorflow as tf\n\nSM_MODEL_OUTPUT_DIR = os.environ.get(\"ALGO_MODEL_DIR\", \"/opt/ml/model\")\n\ndef json_format_logger (msg, *args, **kwargs):\n dict_obj = OrderedDict()\n json_format_log = dict()\n log_error = False\n\n message = msg.format(args)\n dict_obj['version'] = SIMAPP_VERSION\n dict_obj['date'] = str(datetime.datetime.now())\n dict_obj['function'] = inspect.stack()[1][3]\n dict_obj['message'] = message\n for key, value in kwargs.items():\n if key == \"log_level\":\n log_error = kwargs[key] == \"ERROR\"\n else:\n dict_obj[key] = value\n if log_error:\n json_format_log[\"simapp_exception\"] = dict_obj\n logger.error (json.dumps(json_format_log))\n else:\n json_format_log[\"simapp_info\"] = dict_obj\n logger.info (json.dumps(json_format_log))\n\ndef build_system_error_dict(exception_type, errcode):\n \"\"\"\n Creates system exception dictionary to be printed in the logs\n \"\"\"\n return {\"exceptionType\":exception_type,\\\n \"eventType\":SIMAPP_EVENT_SYSTEM_ERROR,\\\n \"errorCode\":errcode, \"log_level\":\"ERROR\"}\n\ndef build_user_error_dict(exception_type, errcode):\n \"\"\"\n Creates user exception dictionary to be printed in the logs\n \"\"\"\n return {\"exceptionType\":exception_type,\\\n \"eventType\":SIMAPP_EVENT_USER_ERROR,\\\n \"errorCode\":errcode, \"log_level\":\"ERROR\"}\n\ndef get_ip_from_host(timeout=100):\n counter = 0\n ip_address = None\n\n host_name = socket.gethostname()\n logger.debug(\"Hostname: %s\" % host_name)\n while counter < timeout and not ip_address:\n try:\n ip_address = socket.gethostbyname(host_name)\n break\n except Exception as e:\n counter += 1\n time.sleep(1)\n\n if counter == timeout and not ip_address:\n error_string = \"Environment Error: Could not retrieve IP address \\\n for %s in past %s seconds. Job failed!\" % (host_name, timeout)\n json_format_logger (error_string,\n **build_system_error_dict(SIMAPP_ENVIRONMENT_EXCEPTION, SIMAPP_EVENT_ERROR_CODE_503))\n sys.exit(1)\n\n return ip_address\n\ndef write_frozen_graph(graph_manager):\n if not os.path.exists(SM_MODEL_OUTPUT_DIR):\n os.makedirs(SM_MODEL_OUTPUT_DIR)\n output_head = ['main_level/agent/main/online/network_1/ppo_head_0/policy']\n frozen = tf.graph_util.convert_variables_to_constants(graph_manager.sess, graph_manager.sess.graph_def, output_head)\n tf.train.write_graph(frozen, SM_MODEL_OUTPUT_DIR, 'model.pb', as_text=False)\n\n\ndef load_model_metadata(s3_client, model_metadata_s3_key, model_metadata_local_path):\n \"\"\"Loads the model metadata.\n \"\"\"\n\n # Try to download the custom model metadata from s3 first\n download_success = False;\n if not model_metadata_s3_key:\n logger.info(\"Custom model metadata key not provided, using defaults.\")\n else:\n # Strip the s3://<bucket> prefix if it exists\n model_metadata_s3_key = model_metadata_s3_key.replace('s3://{}/'.format(s3_client.bucket), '')\n download_success = s3_client.download_file(s3_key=model_metadata_s3_key,\n local_path=model_metadata_local_path)\n if download_success:\n logger.info(\"Successfully downloaded model metadata from {}.\".format(model_metadata_s3_key))\n else:\n logger.info(\"Could not download custom model metadata from {}, using defaults.\".format(model_metadata_s3_key))\n\n # If the download was successful, validate the contents\n if download_success:\n try:\n with open(model_metadata_local_path, 'r') as f:\n model_metadata = json.load(f)\n if 'action_space' not in model_metadata:\n logger.info(\"Custom model metadata does not define an action space.\")\n download_success = False\n except:\n logger.info(\"Could not download custom model metadata, using defaults.\")\n\n # If the download was unsuccessful, load the default model metadata instead\n if not download_success:\n from markov.defaults import model_metadata\n with open(model_metadata_local_path, 'w') as f:\n json.dump(model_metadata, f, indent=4)\n logger.info(\"Loaded default action space.\")\n\n\nclass DoorMan:\n def __init__(self):\n self.terminate_now = False\n signal.signal(signal.SIGINT, self.exit_gracefully)\n signal.signal(signal.SIGTERM, self.exit_gracefully)\n\n def exit_gracefully(self, signum, frame):\n self.terminate_now = True\n", "from __future__ import print_function\n\nimport bisect\nimport boto3\nimport json\nimport logging\nimport math\nimport os\nimport time\nimport traceback\nimport sys\nfrom collections import OrderedDict\n\nimport gym\nimport queue\nimport numpy as np\nfrom gym import spaces\nfrom PIL import Image\nfrom markov import utils\n\nlogger = utils.Logger(__name__, logging.INFO).get_logger()\n\n# Type of worker\nSIMULATION_WORKER = \"SIMULATION_WORKER\"\nSAGEMAKER_TRAINING_WORKER = \"SAGEMAKER_TRAINING_WORKER\"\n\nnode_type = os.environ.get(\"NODE_TYPE\", SIMULATION_WORKER)\nif node_type == SIMULATION_WORKER:\n import rospy\n from std_msgs.msg import Float64\n from gazebo_msgs.msg import ModelState\n from gazebo_msgs.srv import GetLinkState, GetModelState, JointRequest\n from scipy.spatial.transform import Rotation\n from sensor_msgs.msg import Image as sensor_image\n from shapely.geometry import Point, Polygon\n from shapely.geometry.polygon import LinearRing, LineString\n from deepracer_simulation_environment.srv import GetWaypointSrv, ResetCarSrv\n\n# Type of job\nTRAINING_JOB = 'TRAINING'\nEVALUATION_JOB = 'EVALUATION'\n\n# Dimensions of the input training image\nTRAINING_IMAGE_SIZE = (160, 120)\n\n# Local offset of the front of the car\nRELATIVE_POSITION_OF_FRONT_OF_CAR = [0.14, 0, 0]\n\n# Normalized track distance to move with each reset\nROUND_ROBIN_ADVANCE_DIST = 0.05\n\n# Reward to give the car when it \"crashes\"\nCRASHED = 1e-8\n\n# Size of the image queue buffer, we want this to be one so that we consume 1 image\n# at a time, but may want to change this as we add more algorithms\nIMG_QUEUE_BUF_SIZE = 1\n\n# List of required velocity topics, one topic per wheel\nVELOCITY_TOPICS = ['/racecar/left_rear_wheel_velocity_controller/command',\n '/racecar/right_rear_wheel_velocity_controller/command',\n '/racecar/left_front_wheel_velocity_controller/command',\n '/racecar/right_front_wheel_velocity_controller/command']\n\n# List of required steering hinges\nSTEERING_TOPICS = ['/racecar/left_steering_hinge_position_controller/command',\n '/racecar/right_steering_hinge_position_controller/command']\n\n# List of all effort joints\nEFFORT_JOINTS = ['/racecar/left_rear_wheel_joint', '/racecar/right_rear_wheel_joint',\n '/racecar/left_front_wheel_joint','/racecar/right_front_wheel_joint',\n '/racecar/left_steering_hinge_joint','/racecar/right_steering_hinge_joint']\n# Radius of the wheels of the car in meters\nWHEEL_RADIUS = 0.1\n\n# The number of steps to wait before checking if the car is stuck\n# This number should corespond to the camera FPS, since it is pacing the\n# step rate.\nNUM_STEPS_TO_CHECK_STUCK = 15\n\n### Gym Env ###\nclass DeepRacerRacetrackEnv(gym.Env):\n\n def __init__(self):\n\n # Create the observation space\n self.observation_space = spaces.Box(low=0, high=255,\n shape=(TRAINING_IMAGE_SIZE[1], TRAINING_IMAGE_SIZE[0], 3),\n dtype=np.uint8)\n # Create the action space\n self.action_space = spaces.Box(low=np.array([-1, 0]), high=np.array([+1, +1]), dtype=np.float32)\n\n if node_type == SIMULATION_WORKER:\n\n # ROS initialization\n rospy.init_node('rl_coach', anonymous=True)\n\n # wait for required services\n rospy.wait_for_service('/deepracer_simulation_environment/get_waypoints')\n rospy.wait_for_service('/deepracer_simulation_environment/reset_car')\n rospy.wait_for_service('/gazebo/get_model_state')\n rospy.wait_for_service('/gazebo/get_link_state')\n rospy.wait_for_service('/gazebo/clear_joint_forces')\n\n self.get_model_state = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)\n self.get_link_state = rospy.ServiceProxy('/gazebo/get_link_state', GetLinkState)\n self.clear_forces_client = rospy.ServiceProxy('/gazebo/clear_joint_forces',\n JointRequest)\n self.reset_car_client = rospy.ServiceProxy('/deepracer_simulation_environment/reset_car',\n ResetCarSrv)\n get_waypoints_client = rospy.ServiceProxy('/deepracer_simulation_environment/get_waypoints',\n GetWaypointSrv)\n\n # Create the publishers for sending speed and steering info to the car\n self.velocity_pub_dict = OrderedDict()\n self.steering_pub_dict = OrderedDict()\n\n for topic in VELOCITY_TOPICS:\n self.velocity_pub_dict[topic] = rospy.Publisher(topic, Float64, queue_size=1)\n\n for topic in STEERING_TOPICS:\n self.steering_pub_dict[topic] = rospy.Publisher(topic, Float64, queue_size=1)\n\n # Read in parameters\n self.world_name = rospy.get_param('WORLD_NAME')\n self.job_type = rospy.get_param('JOB_TYPE')\n self.aws_region = rospy.get_param('AWS_REGION')\n self.metrics_s3_bucket = rospy.get_param('METRICS_S3_BUCKET')\n self.metrics_s3_object_key = rospy.get_param('METRICS_S3_OBJECT_KEY')\n self.metrics = []\n self.simulation_job_arn = 'arn:aws:robomaker:' + self.aws_region + ':' + \\\n rospy.get_param('ROBOMAKER_SIMULATION_JOB_ACCOUNT_ID') + \\\n ':simulation-job/' + rospy.get_param('AWS_ROBOMAKER_SIMULATION_JOB_ID')\n\n if self.job_type == TRAINING_JOB:\n from custom_files.customer_reward_function import reward_function\n self.reward_function = reward_function\n self.metric_name = rospy.get_param('METRIC_NAME')\n self.metric_namespace = rospy.get_param('METRIC_NAMESPACE')\n self.training_job_arn = rospy.get_param('TRAINING_JOB_ARN')\n self.target_number_of_episodes = rospy.get_param('NUMBER_OF_EPISODES')\n self.target_reward_score = rospy.get_param('TARGET_REWARD_SCORE')\n else:\n from markov.defaults import reward_function\n self.reward_function = reward_function\n self.number_of_trials = 0\n self.target_number_of_trials = rospy.get_param('NUMBER_OF_TRIALS')\n\n # Request the waypoints\n waypoints = None\n try:\n resp = get_waypoints_client()\n waypoints = np.array(resp.waypoints).reshape(resp.row, resp.col)\n except Exception as ex:\n utils.json_format_logger(\"Unable to retrieve waypoints: {}\".format(ex),\n **utils.build_system_error_dict(utils.SIMAPP_ENVIRONMENT_EXCEPTION,\n utils.SIMAPP_EVENT_ERROR_CODE_500))\n\n is_loop = np.all(waypoints[0,:] == waypoints[-1,:])\n if is_loop:\n self.center_line = LinearRing(waypoints[:,0:2])\n self.inner_border = LinearRing(waypoints[:,2:4])\n self.outer_border = LinearRing(waypoints[:,4:6])\n self.road_poly = Polygon(self.outer_border, [self.inner_border])\n else:\n self.center_line = LineString(waypoints[:,0:2])\n self.inner_border = LineString(waypoints[:,2:4])\n self.outer_border = LineString(waypoints[:,4:6])\n self.road_poly = Polygon(np.vstack((self.outer_border, np.flipud(self.inner_border))))\n self.center_dists = [self.center_line.project(Point(p), normalized=True) for p in self.center_line.coords[:-1]] + [1.0]\n self.track_length = self.center_line.length\n # Queue used to maintain image consumption synchronicity\n self.image_queue = queue.Queue(IMG_QUEUE_BUF_SIZE)\n rospy.Subscriber('/camera/zed/rgb/image_rect_color', sensor_image, self.callback_image)\n\n # Initialize state data\n self.episodes = 0\n self.start_ndist = 0.0\n self.reverse_dir = False\n self.change_start = rospy.get_param('CHANGE_START_POSITION', (self.job_type == TRAINING_JOB))\n self.alternate_dir = rospy.get_param('ALTERNATE_DRIVING_DIRECTION', False)\n self.is_simulation_done = False\n self.steering_angle = 0\n self.speed = 0\n self.action_taken = 0\n self.prev_progress = 0\n self.prev_point = Point(0, 0)\n self.prev_point_2 = Point(0, 0)\n self.next_state = None\n self.reward = None\n self.reward_in_episode = 0\n self.done = False\n self.steps = 0\n self.simulation_start_time = 0\n self.allow_servo_step_signals = False\n\n def reset(self):\n if node_type == SAGEMAKER_TRAINING_WORKER:\n return self.observation_space.sample()\n \n # Simulation is done - so RoboMaker will start to shut down the app.\n # Till RoboMaker shuts down the app, do nothing more else metrics may show unexpected data.\n if (node_type == SIMULATION_WORKER) and self.is_simulation_done:\n while True:\n time.sleep(1)\n\n self.steering_angle = 0\n self.speed = 0\n self.action_taken = 0\n self.prev_progress = 0\n self.prev_point = Point(0, 0)\n self.prev_point_2 = Point(0, 0)\n self.next_state = None\n self.reward = None\n self.reward_in_episode = 0\n self.done = False\n # Reset the car and record the simulation start time\n if self.allow_servo_step_signals:\n self.send_action(0, 0)\n\n self.racecar_reset()\n self.steps = 0\n self.simulation_start_time = time.time()\n self.infer_reward_state(0, 0)\n\n return self.next_state\n\n def set_next_state(self):\n # Make sure the first image is the starting image\n image_data = self.image_queue.get(block=True, timeout=None)\n # Read the image and resize to get the state\n image = Image.frombytes('RGB', (image_data.width, image_data.height), image_data.data, 'raw', 'RGB', 0, 1)\n image = image.resize(TRAINING_IMAGE_SIZE, resample=2)\n self.next_state = np.array(image)\n\n def racecar_reset(self):\n try:\n for joint in EFFORT_JOINTS:\n self.clear_forces_client(joint)\n prev_index, next_index = self.find_prev_next_waypoints(self.start_ndist)\n self.reset_car_client(self.start_ndist, next_index)\n # First clear the queue so that we set the state to the start image\n _ = self.image_queue.get(block=True, timeout=None)\n self.set_next_state()\n\n except Exception as ex:\n utils.json_format_logger(\"Unable to reset the car: {}\".format(ex),\n **utils.build_system_error_dict(utils.SIMAPP_ENVIRONMENT_EXCEPTION,\n utils.SIMAPP_EVENT_ERROR_CODE_500))\n\n def set_allow_servo_step_signals(self, allow_servo_step_signals):\n self.allow_servo_step_signals = allow_servo_step_signals\n\n def step(self, action):\n if node_type == SAGEMAKER_TRAINING_WORKER:\n return self.observation_space.sample(), 0, False, {}\n\n # Initialize next state, reward, done flag\n self.next_state = None\n self.reward = None\n self.done = False\n\n # Send this action to Gazebo and increment the step count\n self.steering_angle = float(action[0])\n self.speed = float(action[1])\n if self.allow_servo_step_signals:\n self.send_action(self.steering_angle, self.speed)\n self.steps += 1\n\n # Compute the next state and reward\n self.infer_reward_state(self.steering_angle, self.speed)\n return self.next_state, self.reward, self.done, {}\n\n def callback_image(self, data):\n try:\n self.image_queue.put_nowait(data)\n except queue.Full:\n pass\n except Exception as ex:\n utils.json_format_logger(\"Error retrieving frame from gazebo: {}\".format(ex),\n **utils.build_system_error_dict(utils.SIMAPP_ENVIRONMENT_EXCEPTION, utils.SIMAPP_EVENT_ERROR_CODE_500))\n\n def send_action(self, steering_angle, speed):\n # Simple v/r to computes the desired rpm\n wheel_rpm = speed/WHEEL_RADIUS\n\n for _, pub in self.velocity_pub_dict.items():\n pub.publish(wheel_rpm)\n\n for _, pub in self.steering_pub_dict.items():\n pub.publish(steering_angle)\n\n def infer_reward_state(self, steering_angle, speed):\n try:\n self.set_next_state()\n except Exception as ex:\n utils.json_format_logger(\"Unable to retrieve image from queue: {}\".format(ex),\n **utils.build_system_error_dict(utils.SIMAPP_ENVIRONMENT_EXCEPTION, utils.SIMAPP_EVENT_ERROR_CODE_500))\n\n # Read model state from Gazebo\n model_state = self.get_model_state('racecar', '')\n model_orientation = Rotation.from_quat([\n model_state.pose.orientation.x,\n model_state.pose.orientation.y,\n model_state.pose.orientation.z,\n model_state.pose.orientation.w])\n model_location = np.array([\n model_state.pose.position.x,\n model_state.pose.position.y,\n model_state.pose.position.z]) + \\\n model_orientation.apply(RELATIVE_POSITION_OF_FRONT_OF_CAR)\n model_point = Point(model_location[0], model_location[1])\n model_heading = model_orientation.as_euler('zyx')[0]\n\n # Read the wheel locations from Gazebo\n left_rear_wheel_state = self.get_link_state('racecar::left_rear_wheel', '')\n left_front_wheel_state = self.get_link_state('racecar::left_front_wheel', '')\n right_rear_wheel_state = self.get_link_state('racecar::right_rear_wheel', '')\n right_front_wheel_state = self.get_link_state('racecar::right_front_wheel', '')\n wheel_points = [\n Point(left_rear_wheel_state.link_state.pose.position.x,\n left_rear_wheel_state.link_state.pose.position.y),\n Point(left_front_wheel_state.link_state.pose.position.x,\n left_front_wheel_state.link_state.pose.position.y),\n Point(right_rear_wheel_state.link_state.pose.position.x,\n right_rear_wheel_state.link_state.pose.position.y),\n Point(right_front_wheel_state.link_state.pose.position.x,\n right_front_wheel_state.link_state.pose.position.y)\n ]\n\n # Project the current location onto the center line and find nearest points\n current_ndist = self.center_line.project(model_point, normalized=True)\n prev_index, next_index = self.find_prev_next_waypoints(current_ndist)\n distance_from_prev = model_point.distance(Point(self.center_line.coords[prev_index]))\n distance_from_next = model_point.distance(Point(self.center_line.coords[next_index]))\n closest_waypoint_index = (prev_index, next_index)[distance_from_next < distance_from_prev]\n\n # Compute distance from center and road width\n nearest_point_center = self.center_line.interpolate(current_ndist, normalized=True)\n nearest_point_inner = self.inner_border.interpolate(self.inner_border.project(nearest_point_center))\n nearest_point_outer = self.outer_border.interpolate(self.outer_border.project(nearest_point_center))\n distance_from_center = nearest_point_center.distance(model_point)\n distance_from_inner = nearest_point_inner.distance(model_point)\n distance_from_outer = nearest_point_outer.distance(model_point)\n track_width = nearest_point_inner.distance(nearest_point_outer)\n is_left_of_center = (distance_from_outer < distance_from_inner) if self.reverse_dir \\\n else (distance_from_inner < distance_from_outer)\n\n # Convert current progress to be [0,100] starting at the initial waypoint\n if self.reverse_dir:\n current_progress = self.start_ndist - current_ndist\n else:\n current_progress = current_ndist - self.start_ndist\n if current_progress < 0.0: current_progress = current_progress + 1.0\n current_progress = 100 * current_progress\n if current_progress < self.prev_progress:\n # Either: (1) we wrapped around and have finished the track,\n delta1 = current_progress + 100 - self.prev_progress\n # or (2) for some reason the car went backwards (this should be rare)\n delta2 = self.prev_progress - current_progress\n current_progress = (self.prev_progress, 100)[delta1 < delta2]\n\n # Car is off track if all wheels are outside the borders\n wheel_on_track = [self.road_poly.contains(p) for p in wheel_points]\n all_wheels_on_track = all(wheel_on_track)\n any_wheels_on_track = any(wheel_on_track)\n\n # Compute the reward\n if any_wheels_on_track:\n done = False\n params = {\n 'all_wheels_on_track': all_wheels_on_track,\n 'x': model_point.x,\n 'y': model_point.y,\n 'heading': model_heading * 180.0 / math.pi,\n 'distance_from_center': distance_from_center,\n 'progress': current_progress,\n 'steps': self.steps,\n 'speed': speed,\n 'steering_angle': steering_angle * 180.0 / math.pi,\n 'track_width': track_width,\n 'waypoints': list(self.center_line.coords),\n 'closest_waypoints': [prev_index, next_index],\n 'is_left_of_center': is_left_of_center,\n 'is_reversed': self.reverse_dir\n }\n try:\n reward = float(self.reward_function(params))\n except Exception as e:\n utils.json_format_logger(\"Exception {} in customer reward function. Job failed!\".format(e),\n **utils.build_user_error_dict(utils.SIMAPP_SIMULATION_WORKER_EXCEPTION, utils.SIMAPP_EVENT_ERROR_CODE_400))\n traceback.print_exc()\n sys.exit(1)\n else:\n done = True\n reward = CRASHED\n\n # Reset if the car position hasn't changed in the last 2 steps\n prev_pnt_dist = min(model_point.distance(self.prev_point), model_point.distance(self.prev_point_2))\n\n if prev_pnt_dist <= 0.0001 and self.steps % NUM_STEPS_TO_CHECK_STUCK == 0:\n done = True\n reward = CRASHED # stuck\n\n # Simulation jobs are done when progress reaches 100\n if current_progress >= 100:\n done = True\n\n # Keep data from the previous step around\n self.prev_point_2 = self.prev_point\n self.prev_point = model_point\n self.prev_progress = current_progress\n\n # Set the reward and done flag\n self.reward = reward\n self.reward_in_episode += reward\n self.done = done\n\n # Trace logs to help us debug and visualize the training runs\n # btown TODO: This should be written to S3, not to CWL.\n logger.info('SIM_TRACE_LOG:%d,%d,%.4f,%.4f,%.4f,%.2f,%.2f,%d,%.4f,%s,%s,%.4f,%d,%.2f,%s\\n' % (\n self.episodes, self.steps, model_location[0], model_location[1], model_heading,\n self.steering_angle,\n self.speed,\n self.action_taken,\n self.reward,\n self.done,\n all_wheels_on_track,\n current_progress,\n closest_waypoint_index,\n self.track_length,\n time.time()))\n\n # Terminate this episode when ready\n if done and node_type == SIMULATION_WORKER:\n self.finish_episode(current_progress)\n\n def find_prev_next_waypoints(self, ndist):\n if self.reverse_dir:\n next_index = bisect.bisect_left(self.center_dists, ndist) - 1\n prev_index = next_index + 1\n if next_index == -1: next_index = len(self.center_dists) - 1\n else:\n next_index = bisect.bisect_right(self.center_dists, ndist)\n prev_index = next_index - 1\n if next_index == len(self.center_dists): next_index = 0\n return prev_index, next_index\n\n def stop_car(self):\n self.steering_angle = 0\n self.speed = 0\n self.action_taken = 0\n self.send_action(0, 0)\n self.racecar_reset()\n\n def finish_episode(self, progress):\n # Increment episode count, update start position and direction\n self.episodes += 1\n if self.change_start:\n self.start_ndist = (self.start_ndist + ROUND_ROBIN_ADVANCE_DIST) % 1.0\n if self.alternate_dir:\n self.reverse_dir = not self.reverse_dir\n # Reset the car\n self.stop_car()\n\n # Update metrics based on job type\n if self.job_type == TRAINING_JOB:\n self.send_reward_to_cloudwatch(self.reward_in_episode)\n self.update_training_metrics()\n self.write_metrics_to_s3()\n if self.is_training_done():\n self.cancel_simulation_job()\n elif self.job_type == EVALUATION_JOB:\n self.number_of_trials += 1\n self.update_eval_metrics(progress)\n self.write_metrics_to_s3()\n\n def update_eval_metrics(self, progress):\n eval_metric = {}\n eval_metric['completion_percentage'] = int(progress)\n eval_metric['metric_time'] = int(round(time.time() * 1000))\n eval_metric['start_time'] = int(round(self.simulation_start_time * 1000))\n eval_metric['elapsed_time_in_milliseconds'] = int(round((time.time() - self.simulation_start_time) * 1000))\n eval_metric['trial'] = int(self.number_of_trials)\n self.metrics.append(eval_metric)\n\n def update_training_metrics(self):\n training_metric = {}\n training_metric['reward_score'] = int(round(self.reward_in_episode))\n training_metric['metric_time'] = int(round(time.time() * 1000))\n training_metric['start_time'] = int(round(self.simulation_start_time * 1000))\n training_metric['elapsed_time_in_milliseconds'] = int(round((time.time() - self.simulation_start_time) * 1000))\n training_metric['episode'] = int(self.episodes)\n self.metrics.append(training_metric)\n\n def write_metrics_to_s3(self):\n session = boto3.session.Session()\n s3_client = session.client('s3', region_name=self.aws_region)\n metrics_body = json.dumps({'metrics': self.metrics})\n s3_client.put_object(\n Bucket=self.metrics_s3_bucket,\n Key=self.metrics_s3_object_key,\n Body=bytes(metrics_body, encoding='utf-8')\n )\n\n def is_training_done(self):\n if ((self.target_number_of_episodes > 0) and (self.target_number_of_episodes == self.episodes)) or \\\n ((isinstance(self.target_reward_score, (int, float))) and (self.target_reward_score <= self.reward_in_episode)):\n self.is_simulation_done = True\n return self.is_simulation_done\n\n def cancel_simulation_job(self):\n session = boto3.session.Session()\n robomaker_client = session.client('robomaker', region_name=self.aws_region)\n robomaker_client.cancel_simulation_job(\n job=self.simulation_job_arn\n )\n\n def send_reward_to_cloudwatch(self, reward):\n session = boto3.session.Session()\n cloudwatch_client = session.client('cloudwatch', region_name=self.aws_region)\n cloudwatch_client.put_metric_data(\n MetricData=[\n {\n 'MetricName': self.metric_name,\n 'Dimensions': [\n {\n 'Name': 'TRAINING_JOB_ARN',\n 'Value': self.training_job_arn\n },\n ],\n 'Unit': 'None',\n 'Value': reward\n },\n ],\n Namespace=self.metric_namespace\n )\n\nclass DeepRacerRacetrackCustomActionSpaceEnv(DeepRacerRacetrackEnv):\n def __init__(self):\n DeepRacerRacetrackEnv.__init__(self)\n try:\n # Try loading the custom model metadata (may or may not be present)\n with open('./custom_files/model_metadata.json', 'r') as f:\n model_metadata = json.load(f)\n self.json_actions = model_metadata['action_space']\n logger.info(\"Loaded action space from file: {}\".format(self.json_actions))\n except Exception as ex:\n # Failed to load, fall back on the default action space\n from markov.defaults import model_metadata\n self.json_actions = model_metadata['action_space']\n logger.info(\"Exception {} on loading custom action space, using default: {}\".format(ex, self.json_actions))\n self.action_space = spaces.Discrete(len(self.json_actions))\n\n def step(self, action):\n self.steering_angle = float(self.json_actions[action]['steering_angle']) * math.pi / 180.0\n self.speed = float(self.json_actions[action]['speed'])\n self.action_taken = action\n return super().step([self.steering_angle, self.speed])\n" ]
[ [ "tensorflow.graph_util.convert_variables_to_constants", "tensorflow.train.write_graph" ], [ "numpy.all", "numpy.array", "numpy.flipud", "scipy.spatial.transform.Rotation.from_quat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.5", "1.2", "1.3", "1.4" ], "tensorflow": [] } ]
Ung0d/NeuroAlign
[ "c73fd6f2d9c2fdb2e627a13ea1c45fb069e36ca4" ]
[ "code/TestProteinGNN.py" ]
[ "#third party imports\nimport tensorflow as tf\n\nfrom graph_nets import utils_np\nfrom graph_nets import utils_tf\nfrom graph_nets.demos import models\nfrom graph_nets import modules\nfrom graph_nets import blocks\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport argparse\nimport multiprocessing\nimport os\nimport random\nimport numpy as np\n\n#project imports\nimport ProteinGraphNN\nimport ModelParameters\nimport sys\nsys.path.append('./ProcessSeq')\nimport AnchorSet\nimport PatternSet\n\nparser = argparse.ArgumentParser(description='Computes edge sets of alignment graphs for all balibase ref alignments')\nparser.add_argument(\"-r\", type=int, default=10, help=\"the kmer radius used for training\")\nparser.add_argument(\"-model_steps\", type=int, default=0, help=\"stepcount of the model to load\")\nparser.add_argument(\"-draw\", type=int, default=0, help=\"if 1, each prediction is also rendered for debugging; comps are paused while a plot is open\")\nparser.add_argument(\"-t\", type=int, default=0, help=\"treshold\")\nparser.add_argument(\"-a\", type=int, default=200, help=\"maximum number of anchors allowed\")\nparser.add_argument(\"-start_at\", type=int, default=0, help=\"starting test instances, skips previous\")\nargs = parser.parse_args()\n\n#threads for data loading\ndata_threads = 4\n\n#number of message passing iterations testing\ntest_mp_iterations = 30\n\nprint(\"Data reading and preprocessing. This may take some time...\")\n\nfilenames = []\nwith open(\"../data/model_proteinGNN/test_instances.txt\", \"r\") as f:\n for line in f:\n filenames.append(line.strip())\n\n#load a previously trained model and make predictions\nanchor_set = AnchorSet.anchor_set_from_file(\"../data/\"+filenames[0])\nAnchorSet.read_solution(\"../data/data/\"+filenames[0].split('/')[1]+\".fasta\", anchor_set)\npattern_set = PatternSet.find_patterns(anchor_set)\nPatternSet.compute_targets(pattern_set)\nexample_seq_graph, example_pattern_graph, example_target_graph = ProteinGraphNN.pattern_set_to_input_target_dicts(pattern_set)\n\n\npredictor = ProteinGraphNN.Predictor(ModelParameters.param,\n example_seq_graph,\n example_pattern_graph,\n example_target_graph)\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nsaver = tf.train.Saver()\nsaver.restore(sess, \"../data/model_proteinGNN/it_\"+str(args.model_steps)+\".ckpt\")\n\nglobal_pred = np.zeros((0))\nglobal_ref = np.zeros((0))\nfor f in filenames[args.start_at:]:\n print(\"testing \", f)\n anchor_set = AnchorSet.anchor_set_from_file(\"../data/\"+f)\n AnchorSet.read_solution(\"../data/data/\"+f.split('/')[1]+\".fasta\", anchor_set)\n pattern_set = PatternSet.find_patterns(anchor_set)\n PatternSet.compute_targets(pattern_set)\n seq_graph, pattern_graph, target_graph = ProteinGraphNN.pattern_set_to_input_target_dicts(pattern_set)\n rppred, _ = predictor.predict(sess, seq_graph, pattern_graph)\n\n prediction = PatternSet.get_anchor_probs(pattern_set, rppred[:len(pattern_set.region_node_list)])\n greedy_selection = AnchorSet.greedy_best(anchor_set, prediction)\n unwrapped_selection = AnchorSet.unwrap_selection(anchor_set, greedy_selection)\n unwrapped_anchors = AnchorSet.unwrap_anchor_set(anchor_set)\n name = f.split('/')[1]\n AnchorSet.read_solution(\"../data/data/\"+name+\".fasta\", unwrapped_anchors)\n reference_solution = unwrapped_anchors.solution\n score_pred = AnchorSet.jaccard_index(unwrapped_selection, reference_solution)\n print(\"prediction score: \", score_pred)\n\n global_pred = np.concatenate((global_pred, unwrapped_selection), axis=0)\n global_ref = np.concatenate((global_ref, reference_solution), axis=0)\n\n if args.draw == 1:\n PatternSet.attach_prediction(pattern_set, rppred[:len(pattern_set.region_node_list)], rppred[len(pattern_set.region_node_list):])\n fig = plt.figure(1, figsize=(20, 6))\n fig.clf()\n ax = fig.add_subplot(1, 1, 1)\n pattern_set.draw_pattern_graph(ax)\n plt.title('score:'+str(score_pred))\n plt.draw()\n plt.show()\n\nprint(\"global prediction score:\", AnchorSet.jaccard_index(global_pred, global_ref))\n" ]
[ [ "matplotlib.pyplot.draw", "numpy.concatenate", "tensorflow.global_variables_initializer", "tensorflow.Session", "tensorflow.train.Saver", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
emily101-gif/immport-galaxy
[ "8f353d1f9b4e0d044e1a9d0b1f928b440df78b8c", "8f353d1f9b4e0d044e1a9d0b1f928b440df78b8c" ]
[ "tools/flowtools/ftxt_tools/auto_collapse_pops.py", "tools/flowtools/ftxt_tools/flowclrstats.py" ]
[ "#!/usr/bin/env python\n\n######################################################################\n# Copyright (c) 2016 Northrop Grumman.\n# All rights reserved.\n######################################################################\nfrom __future__ import print_function\nimport sys\nimport pandas as pd\nfrom argparse import ArgumentParser\n\n\ndef auto_collapse(input_file, profile_file, output, report):\n profile_pop_list = {}\n pop_to_collapse = []\n markers = []\n with open(profile_file, \"r\") as pf:\n pffl = pf.readline()\n markers = pffl.strip().split(\"\\t\")\n for pfline in pf:\n line = pfline.strip().split(\"\\t\")\n pop = line[0]\n profil = \"\\t\".join(line[1:-2])\n if profil in profile_pop_list:\n profile_pop_list[profil].append(pop)\n else:\n profile_pop_list[profil] = [pop]\n i = 1\n with open(report, \"w\") as rt:\n rt.write(\"New_Population\\tFormer_Populations\\t\")\n rt.write(\"\\t\".join(markers[1:-2]) + \"\\n\")\n for profs in profile_pop_list:\n pop_to_collapse.append(profile_pop_list[profs])\n pop_ls = \", \".join(profile_pop_list[profs])\n rt.write(\"\\t\".join([str(i), pop_ls, profs]) + \"\\n\")\n i += 1\n df = pd.read_table(input_file, dtype={'Population': object})\n df['new_population'] = df.Population\n for i, sets_pop in enumerate(pop_to_collapse):\n df.loc[df['Population'].isin(sets_pop), ['new_population']] = i + 1\n\n df.Population = df.new_population\n df.drop(['new_population'], inplace=True, axis=1)\n df.to_csv(output, sep=\"\\t\", index=False)\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(\n prog=\"auto_pop_collapse_from_profile\",\n description=\"collapses FLOCK populations based on profile.\")\n\n parser.add_argument(\n '-i',\n dest=\"input_file\",\n required=True,\n help=\"FLOCK output file\")\n\n parser.add_argument(\n '-o',\n dest=\"output\",\n required=True,\n help=\"Name of the output file.\")\n\n parser.add_argument(\n '-r',\n dest=\"report\",\n required=True,\n help=\"Name of the report file.\")\n\n parser.add_argument(\n '-p',\n dest=\"profile\",\n required=True,\n help=\"File location for the profile.txt from FLOCK.\")\n\n args = parser.parse_args()\n\n auto_collapse(args.input_file, args.profile, args.output, args.report)\n", "#!/usr/bin/env python\n\n######################################################################\n# Copyright (c) 2016 Northrop Grumman.\n# All rights reserved.\n######################################################################\n\nfrom __future__ import print_function\nimport sys\nfrom argparse import ArgumentParser\nimport pandas as pd\n\n\ndef get_FLOCK_stats(input_file, output_file, out_file2):\n df = pd.read_table(input_file)\n summary = df.groupby('Population').describe().round(1)\n counts = df['Population'].value_counts()\n percent = (df['Population'].value_counts(normalize=True) * 100).round(decimals=2)\n tot_count = len(df['Population'])\n\n to_rm = summary.loc(axis=0)[:, ['count']].index.tolist()\n df1 = summary[~summary.index.isin(to_rm)]\n df1.to_csv(out_file2, sep=\"\\t\")\n\n with open(output_file, \"w\") as outf:\n outf.write(\"Population\\tCount\\tPercentage\\n\")\n for pops in set(df.Population):\n outf.write(\"\\t\".join([str(pops), str(counts.loc[pops]), str(percent.loc[pops])]) + \"\\n\")\n outf.write(\"Total\\t\" + str(tot_count) + \"\\t \\n\")\n return\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(\n prog=\"flowstats\",\n description=\"Gets statistics on FLOCK run\")\n\n parser.add_argument(\n '-i',\n dest=\"input_file\",\n required=True,\n help=\"File locations for flow clr file.\")\n\n parser.add_argument(\n '-o',\n dest=\"out_file\",\n required=True,\n help=\"Path to the directory for the output file.\")\n\n parser.add_argument(\n '-p',\n dest=\"out_file2\",\n required=True,\n help=\"Path to the directory for the output file.\")\n args = parser.parse_args()\n\n get_FLOCK_stats(args.input_file, args.out_file, args.out_file2)\n" ]
[ [ "pandas.read_table" ], [ "pandas.read_table" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
angusl95/darts-kbc
[ "85fc6f4bdb7ba73c07d96ce47e96634599b346f9" ]
[ "kbc/combined/train_search.py" ]
[ "import os\nimport sys\nimport time\nimport glob\nimport tqdm\nimport numpy as np\nimport torch\nimport utils\nimport logging\nimport argparse\nimport torch.nn as nn\nimport torch.utils\nimport torch.utils.data\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\n\nfrom torch import optim\nfrom typing import Dict\nfrom datasets import Dataset\nfrom regularizers import N2, N3, Regularizer\nfrom torch.autograd import Variable\nfrom model_search import Network\nfrom architect import Architect\n\n\nparser = argparse.ArgumentParser(\"cifar\")\nparser.add_argument('--data', type=str, default='../data', help='location of the data corpus')\nparser.add_argument('--batch_size', type=int, default=64, help='batch size')\nparser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')\nparser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')\nparser.add_argument('--momentum', type=float, default=0.9, help='momentum')\nparser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')\nparser.add_argument('--report_freq', type=float, default=5, help='report frequency')\nparser.add_argument('--gpu', type=int, default=0, help='gpu device id')\nparser.add_argument('--epochs', type=int, default=100, help='num of training epochs')\nparser.add_argument('--channels', type=int, default=16, help='num of channels')\nparser.add_argument('--layers', type=int, default=8, help='total number of layers')\nparser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')\nparser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')\nparser.add_argument('--save', type=str, default='EXP', help='experiment name')\nparser.add_argument('--seed', type=int, default=2, help='random seed')\nparser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')\nparser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')\nparser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')\nparser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')\nparser.add_argument('--steps', type=int, default=4, help='number of steps in learned cell')\nparser.add_argument('--interleaved', action='store_true', default=False, help='interleave subject and relation embeddings rather than stacking')\ndatasets = ['FB15K', 'WN', 'WN18RR', 'FB237', 'YAGO3-10']\nparser.add_argument('--dataset', choices=datasets, help=\"Dataset in {}\".format(datasets))\nregularizers = ['N3', 'N2']\nparser.add_argument('--regularizer', choices=regularizers, default='N3', help=\"Regularizer in {}\".format(regularizers))\nparser.add_argument('--emb_dim', default=1000, type=int, help=\"Embedding dimension\")\nparser.add_argument('--init', default=1e-3, type=float, help=\"Initial scale\")\nparser.add_argument('--reg', default=0, type=float, help=\"Regularization weight\")\noptimizers = ['Adagrad', 'Adam', 'SGD']\nparser.add_argument('--optimizer', choices=optimizers, default='Adagrad', help=\"Optimizer in {}\".format(optimizers))\nparser.add_argument('--decay1', default=0.9, type=float, help=\"decay rate for the first moment estimate in Adam\")\nparser.add_argument('--decay2', default=0.999, type=float,help=\"decay rate for second moment estimate in Adam\")\nargs = parser.parse_args()\n\nargs.save = 'search-{}-{}'.format(args.save, time.strftime(\"%Y%m%d-%H%M%S\"))\nutils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))\n\nlog_format = '%(asctime)s %(message)s'\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO,\n format=log_format, datefmt='%m/%d %I:%M:%S %p')\nfh = logging.FileHandler(os.path.join(args.save, 'log.txt'))\nfh.setFormatter(logging.Formatter(log_format))\nlogging.getLogger().addHandler(fh)\n\nclass CrossEntropyLabelSmooth(nn.Module):\n\n def __init__(self, num_classes, epsilon):\n super(CrossEntropyLabelSmooth, self).__init__()\n self.num_classes = num_classes\n self.epsilon = epsilon\n self.logsoftmax = nn.LogSoftmax(dim=1)\n\n def forward(self, inputs, targets):\n log_probs = self.logsoftmax(inputs)\n targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)\n targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes\n loss = (-targets * log_probs).mean(0).sum()\n return loss\n\ndef main():\n if not torch.cuda.is_available():\n logging.info('no gpu device available')\n sys.exit(1)\n\n np.random.seed(args.seed)\n torch.cuda.set_device(args.gpu)\n cudnn.benchmark = True\n torch.manual_seed(args.seed)\n cudnn.enabled=True\n torch.cuda.manual_seed(args.seed)\n logging.info('gpu device = %d' % args.gpu)\n logging.info(\"args = %s\", args)\n\n dataset = Dataset(args.dataset)\n train_examples = torch.from_numpy(dataset.get_train().astype('int64'))\n valid_examples = torch.from_numpy(dataset.get_valid().astype('int64'))\n\n CLASSES = dataset.get_shape()[0]\n\n #criterion = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)\n criterion = nn.CrossEntropyLoss(reduction='mean')\n criterion = criterion.cuda()\n\n regularizer = {\n 'N2': N2(args.reg),\n 'N3': N3(args.reg),\n }[args.regularizer]\n\n model = Network(args.channels, CLASSES, args.layers, criterion, \n regularizer, args.interleaved, dataset.get_shape(), args.emb_dim, args.init, args.steps)\n model = model.cuda()\n logging.info(\"param size = %fMB\", utils.count_parameters_in_MB(model))\n\n optimizer = {\n 'Adagrad': lambda: optim.Adagrad(\n model.parameters(), \n lr=args.learning_rate),\n #momentum=args.momentum,\n #weight_decay=args.weight_decay)\n 'Adam': lambda: optim.Adam(model.parameters(), lr=args.learning_rate, betas=(args.decay1, args.decay2)),\n 'SGD': lambda: optim.SGD(model.parameters(), lr=args.learning_rate)\n }[args.optimizer]()\n\n # optimizer = torch.optim.SGD(\n # model.parameters(),\n # args.learning_rate,\n # #TODO can we reintroduce these?\n # momentum=args.momentum,\n # weight_decay=args.weight_decay)\n\n train_queue = torch.utils.data.DataLoader(\n train_examples, batch_size=args.batch_size,\n shuffle = True,\n #sampler=torch.utils.data.sampler.RandomSampler(),\n pin_memory=True, num_workers=2)\n\n valid_queue = torch.utils.data.DataLoader(\n valid_examples, batch_size=args.batch_size,\n shuffle = True,\n #sampler=torch.utils.data.sampler.RandomSampler(),\n pin_memory=True, num_workers=2)\n\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, float(args.epochs), eta_min=args.learning_rate_min)\n best_acc = 0\n patience = 0\n curve = {'valid': [], 'test': []}\n\n architect = Architect(model, args)\n\n for epoch in range(args.epochs):\n scheduler.step()\n lr = scheduler.get_lr()[0]\n logging.info('epoch %d lr %e', epoch, lr)\n\n genotype = model.genotype()\n logging.info('genotype = %s', genotype)\n\n print(F.softmax(model.alphas_normal, dim=-1))\n\n train_epoch(train_examples, train_queue, valid_queue, model, \n architect, criterion, optimizer, regularizer, args.batch_size, args.learning_rate)\n\n if (epoch + 1) % args.report_freq == 0:\n valid, test = [\n avg_both(*dataset.eval(model, split, -1 if split != 'train' else 50000))\n for split in ['valid', 'test']\n ]\n curve['valid'].append(valid)\n curve['test'].append(test)\n #curve['train'].append(train)\n\n #print(\"\\t TRAIN: \", train)\n print(\"\\t VALID : \", valid)\n print(\"\\t TEST: \", test)\n\n is_best = False\n if valid['MRR'] > best_acc:\n best_acc = valid['MRR']\n is_best = True\n patience = 0\n else:\n patience +=1\n\n #utils.save(model, os.path.join(args.save, 'weights.pt'))\n\ndef train_epoch(train_examples,train_queue, valid_queue,\n model, architect, criterion, optimizer: optim.Optimizer, \n regularizer: Regularizer, batch_size: int, lr, verbose: bool = True):\n loss = nn.CrossEntropyLoss(reduction='mean')\n print('avg entity embedding norm', torch.norm(model.embeddings[0].weight,dim=1).mean())\n print('avg relation embedding norm', torch.norm(model.embeddings[1].weight,dim=1).mean())\n with tqdm.tqdm(total=train_examples.shape[0], unit='ex', disable=not verbose) as bar:\n bar.set_description(f'train loss')\n for step, input in enumerate(train_queue):\n\n model.train()\n\n input_var = Variable(input, requires_grad=False).cuda()\n target_var = Variable(input[:,2], requires_grad=False).cuda()#async=True)\n\n input_search = next(iter(valid_queue))\n input_search_var = Variable(input_search, requires_grad=False).cuda()\n target_search_var = Variable(input_search[:,2], requires_grad=False).cuda()#async=True)\n\n architect.step(input_var, target_var, input_search_var, target_search_var, lr, optimizer, unrolled=args.unrolled)\n optimizer.zero_grad()\n\n predictions, factors = model.forward(input_var)\n #truth = input_var[:, 2]\n\n l_fit = loss(predictions, target_var)\n l_reg = regularizer.forward(factors)\n l = l_fit + l_reg\n\n l.backward()\n nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)\n optimizer.step()\n\n bar.update(input_var.shape[0])\n bar.set_postfix(loss=f'{l.item():.0f}')\n\n #TODO from DARTS train fn - grad clipping, accuracy metrics?\n \ndef avg_both(mrrs: Dict[str, float], hits: Dict[str, torch.FloatTensor]):\n \"\"\"\n aggregate metrics for missing lhs and rhs\n :param mrrs: d\n :param hits:\n :return:\n \"\"\"\n m = (mrrs['lhs'] + mrrs['rhs']) / 2.\n h = (hits['lhs'] + hits['rhs']) / 2.\n return {'MRR': m, 'hits@[1,3,10]': h}\n\nif __name__ == '__main__':\n main() \n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.LogSoftmax", "torch.nn.functional.softmax", "torch.norm", "torch.cuda.set_device", "torch.cuda.manual_seed", "numpy.random.seed", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.zeros_like", "torch.cuda.is_available", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hujianhang2996/slambook_python
[ "26eabfe5a8d6f3e534452f6ccf5b43af838ffc8f" ]
[ "ch7/triangulation.py" ]
[ "import cv2 as cv\nimport numpy as np\nfrom ch7.pose_estimation_2d2d import find_feature_matches, pose_estimation_2d2d, pixel2cam\n\nK = np.array([[520.9, 0, 325.1],\n [0, 521.0, 249.7],\n [0, 0, 1]])\n\n\ndef triangulation(kp_1, kp_2, ms, r_mat, t_vec):\n T1 = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0]])\n T2 = np.concatenate((r_mat, t_vec), axis=1)\n\n pts_1 = np.array([pixel2cam(kp_1[match.queryIdx].pt, K) for match in ms]).squeeze().transpose()\n pts_2 = np.array([pixel2cam(kp_2[match.trainIdx].pt, K) for match in ms]).squeeze().transpose()\n\n pts_4d = cv.triangulatePoints(T1, T2, pts_1, pts_2)\n points = pts_4d[:3, :] / pts_4d[3, :]\n return points.transpose()\n\n\nif __name__ == '__main__':\n img_1 = cv.imread('1.png')\n img_2 = cv.imread('2.png')\n\n key_points_1, key_points_2, matches = find_feature_matches(img_1, img_2)\n print('一共找到了', len(matches), '组匹配点')\n R, t, E = pose_estimation_2d2d(key_points_1, key_points_2, matches)\n points = triangulation(key_points_1, key_points_2, matches, R, t)\n\n for match, point in zip(matches, points):\n print('-------------------------------------------------')\n pt1_cam = pixel2cam(key_points_1[match.queryIdx].pt, K)\n pt1_cam_3d = [point[0] / point[2], point[1] / point[2]]\n print('point in the first camera frame: ', pt1_cam.transpose().squeeze())\n print('point projected from 3D ', pt1_cam_3d, ', d=', point[2])\n\n pt2_cam = pixel2cam(key_points_2[match.trainIdx].pt, K)\n pt2_trans = np.matmul(R, point[:, np.newaxis]) + t\n pt2_trans = pt2_trans / pt2_trans[2, 0]\n print('point in the second camera frame: ', pt2_cam.transpose().squeeze())\n print('point reprojected from second frame: ', pt2_trans.transpose().squeeze())\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.matmul" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
leelew/HRSEPP
[ "b841b1abe529e66b428bd7a265292cc1746b431d" ]
[ "src/factory/loss.py" ]
[ "import tensorflow as tf\nfrom tensorflow.keras import losses\nimport tensorflow.python.keras.backend as K\n\nclass ImageGradientDifferenceLoss(losses.Loss):\n def __init__(self):\n super().__init__()\n\n def call(self, y_true, y_pred):\n # for 5D inputs\n gdl = 0\n for i in range(y_true.shape[1]):\n dy_true, dx_true = tf.image.image_gradients(y_true[:,i])\n dy_pred, dx_pred = tf.image.image_gradients(y_pred[:,i])\n gdl+=K.mean(K.abs(dy_pred - dy_true) + K.abs(dx_pred - dx_true))\n print(gdl)\n return gdl\n\nclass LPLoss(losses.Loss):\n def __init__(self, l_num=2):\n self.l_num = l_num #NOTE: tensorflow.loss must set at __init__.\n super().__init__()\n\n def call(self, y_true, y_pred, l_num=2):\n mse = tf.math.reduce_mean((y_true - y_pred)**self.l_num, axis=0)\n return tf.math.reduce_mean(mse)\n\nclass MaskSeq2seqLoss(losses.Loss):\n def __init__(self, mask):\n super().__init__()\n self.mask = mask\n\n def call(self, y_true, y_pred):\n pass\n\nclass MaskMSELoss(tf.keras.losses.Loss):\n def __init__(self, mask):\n super().__init__()\n self.mask = mask\n\n def call(self, y_true, y_pred):\n mse = tf.math.reduce_mean(tf.square(y_true - y_pred), axis=0)\n mask_mse = tf.math.multiply(mse, self.mask)\n return tf.math.reduce_mean(mask_mse)\n\n\nclass MaskSSIMLoss(tf.keras.losses.Loss):\n def __init__(self, mask):\n super().__init__()\n self.mask = mask\n\n def call(self, y_true, y_pred):\n y_true_ = tf.math.multiply(y_true, self.mask)\n y_pred_ = tf.math.multiply(y_pred, self.mask)\n\n return 1 - tf.reduce_mean(\n tf.image.ssim(y_true_, y_pred_, max_val=1.0, filter_size=3))\n" ]
[ [ "tensorflow.math.reduce_mean", "tensorflow.math.multiply", "tensorflow.python.keras.backend.abs", "tensorflow.square", "tensorflow.image.image_gradients", "tensorflow.image.ssim" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
kazu0914/ssd_keras_anotation
[ "079ffb053125c38ee163c78ba0caac235161f1b2" ]
[ "moto/ssd_layers.py" ]
[ "\"\"\"Some special pupropse layers for SSD.\"\"\"\n\nimport keras.backend as K\nfrom keras.engine.topology import InputSpec\nfrom keras.engine.topology import Layer\nimport numpy as np\nimport tensorflow as tf\n\n\nclass Normalize(Layer):\n \"\"\"Normalization layer as described in ParseNet paper.\n\n # Arguments\n scale: Default feature scale.\n\n # Input shape\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if dim_ordering='th'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if dim_ordering='tf'.\n\n # Output shape\n Same as input\n\n # References\n http://cs.unc.edu/~wliu/papers/parsenet.pdf\n\n #TODO\n Add possibility to have one scale for all features.\n \"\"\"\n def __init__(self, scale, **kwargs):\n if K.image_dim_ordering() == 'tf':\n self.axis = 3\n else:\n self.axis = 1\n self.scale = scale\n super(Normalize, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.input_spec = [InputSpec(shape=input_shape)]\n shape = (input_shape[self.axis],)\n init_gamma = self.scale * np.ones(shape)\n self.gamma = K.variable(init_gamma, name='{}_gamma'.format(self.name))\n self.trainable_weights = [self.gamma]\n\n def call(self, x, mask=None):\n output = K.l2_normalize(x, self.axis)\n output *= self.gamma\n return output\n\n\nclass PriorBox(Layer):\n \"\"\"Generate the prior boxes of designated sizes and aspect ratios.\n\n # Arguments\n img_size: Size of the input image as tuple (w, h).\n min_size: Minimum box size in pixels.\n max_size: Maximum box size in pixels.\n aspect_ratios: List of aspect ratios of boxes.\n flip: Whether to consider reverse aspect ratios.\n variances: List of variances for x, y, w, h.\n clip: Whether to clip the prior's coordinates\n such that they are within [0, 1].\n\n # Input shape\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if dim_ordering='th'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if dim_ordering='tf'.\n\n # Output shape\n 3D tensor with shape:\n (samples, num_boxes, 8)\n\n # References\n https://arxiv.org/abs/1512.02325\n\n #TODO\n Add possibility not to have variances.\n Add Theano support\n \"\"\"\n def __init__(self, img_size, min_size, max_size=None, aspect_ratios=None,\n flip=True, variances=[0.1], clip=True, **kwargs):\n if K.image_dim_ordering() == 'tf':\n self.waxis = 2\n self.haxis = 1\n else:\n self.waxis = 3\n self.haxis = 2\n self.img_size = img_size\n if min_size <= 0:\n raise Exception('min_size must be positive.')\n self.min_size = min_size\n self.max_size = max_size\n self.aspect_ratios = [1.0]\n if max_size:\n if max_size < min_size:\n raise Exception('max_size must be greater than min_size.')\n self.aspect_ratios.append(1.0)\n if aspect_ratios:\n for ar in aspect_ratios:\n if ar in self.aspect_ratios:\n continue\n self.aspect_ratios.append(ar)\n if flip:\n self.aspect_ratios.append(1.0 / ar)\n self.variances = np.array(variances)\n self.clip = True\n super(PriorBox, self).__init__(**kwargs)\n\n # def get_output_shape_for(self, input_shape):\n def compute_output_shape(self, input_shape):\n num_priors_ = len(self.aspect_ratios)\n layer_width = input_shape[self.waxis]\n layer_height = input_shape[self.haxis]\n num_boxes = num_priors_ * layer_width * layer_height\n return (input_shape[0], num_boxes, 8)\n\n def call(self, x, mask=None):\n if hasattr(x, '_keras_shape'):\n input_shape = x._keras_shape\n elif hasattr(K, 'int_shape'):\n input_shape = K.int_shape(x)\n layer_width = input_shape[self.waxis]\n layer_height = input_shape[self.haxis]\n img_width = self.img_size[0]\n img_height = self.img_size[1]\n # define prior boxes shapes\n box_widths = []\n box_heights = []\n for ar in self.aspect_ratios:\n if ar == 1 and len(box_widths) == 0:\n box_widths.append(self.min_size)\n box_heights.append(self.min_size)\n elif ar == 1 and len(box_widths) > 0:\n box_widths.append(np.sqrt(self.min_size * self.max_size))\n box_heights.append(np.sqrt(self.min_size * self.max_size))\n elif ar != 1:\n box_widths.append(self.min_size * np.sqrt(ar))\n box_heights.append(self.min_size / np.sqrt(ar))\n box_widths = 0.5 * np.array(box_widths)\n box_heights = 0.5 * np.array(box_heights)\n # define centers of prior boxes\n step_x = img_width / layer_width\n step_y = img_height / layer_height\n linx = np.linspace(0.5 * step_x, img_width - 0.5 * step_x,\n layer_width)\n liny = np.linspace(0.5 * step_y, img_height - 0.5 * step_y,\n layer_height)\n centers_x, centers_y = np.meshgrid(linx, liny)\n centers_x = centers_x.reshape(-1, 1)\n centers_y = centers_y.reshape(-1, 1)\n # define xmin, ymin, xmax, ymax of prior boxes\n num_priors_ = len(self.aspect_ratios)\n prior_boxes = np.concatenate((centers_x, centers_y), axis=1)\n prior_boxes = np.tile(prior_boxes, (1, 2 * num_priors_))\n prior_boxes[:, ::4] -= box_widths\n prior_boxes[:, 1::4] -= box_heights\n prior_boxes[:, 2::4] += box_widths\n prior_boxes[:, 3::4] += box_heights\n prior_boxes[:, ::2] /= img_width\n prior_boxes[:, 1::2] /= img_height\n prior_boxes = prior_boxes.reshape(-1, 4)\n if self.clip:\n prior_boxes = np.minimum(np.maximum(prior_boxes, 0.0), 1.0)\n # define variances\n num_boxes = len(prior_boxes)\n if len(self.variances) == 1:\n variances = np.ones((num_boxes, 4)) * self.variances[0]\n elif len(self.variances) == 4:\n variances = np.tile(self.variances, (num_boxes, 1))\n else:\n raise Exception('Must provide one or four variances.')\n prior_boxes = np.concatenate((prior_boxes, variances), axis=1)\n prior_boxes_tensor = K.expand_dims(K.variable(prior_boxes), 0)\n if K.backend() == 'tensorflow':\n pattern = [tf.shape(x)[0], 1, 1]\n prior_boxes_tensor = tf.tile(prior_boxes_tensor, pattern)\n elif K.backend() == 'theano':\n #TODO\n pass\n return prior_boxes_tensor\n" ]
[ [ "numpy.maximum", "numpy.sqrt", "numpy.linspace", "numpy.meshgrid", "tensorflow.shape", "numpy.tile", "numpy.ones", "numpy.concatenate", "numpy.array", "tensorflow.tile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
probabilisticdeeplearning/swa_gaussian
[ "033f2b956e98f7050793a0d8a4155feb98931a3d", "033f2b956e98f7050793a0d8a4155feb98931a3d" ]
[ "experiments/uncertainty/temp_scaling.py", "experiments/segmentation/train.py" ]
[ "# The code here is based on the code at\n# https://github.com/gpleiss/temperature_scaling/blob/master/temperature_scaling.py\n\nimport torch\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\nimport numpy as np\n\ndef logits_from_probs(prob_arr):\n return np.log(prob_arr)\n\ndef optimal_temp_scale(probs_arr, labels_arr, lr=0.01, max_iter=50):\n probs = torch.from_numpy(probs_arr).float()\n labels = torch.from_numpy(labels_arr.astype(int))\n logits = torch.log(probs + 1e-12)\n nll_criterion = nn.CrossEntropyLoss()\n \n before_temperature_nll = nll_criterion(logits, labels).item()\n print('Before temperature - NLL: %.3f' % (before_temperature_nll))\n \n T = Variable(torch.ones(1,), requires_grad=True)\n \n optimizer = optim.LBFGS([T], lr=lr, max_iter=max_iter)\n def eval():\n loss = nll_criterion(logits / T, labels)\n loss.backward(retain_graph=True)\n return loss\n\n optimizer.step(eval)\n \n after_temperature_nll = nll_criterion(logits / T, labels).item()\n print('After temperature - NLL: %.3f' % (after_temperature_nll), \", Temperature:\", T)\n \n return T.item(), F.softmax(logits / T).data.numpy()\n\ndef rescale_temp(probs_arr, temp):\n logits = np.log(probs_arr)\n logits /= temp\n probs = np.exp(logits)\n probs /= np.sum(probs, axis=1)[:, None]\n return probs\n", "\"\"\"\n training script for segmentation models\n partial port of our own train/run_swag.py file\n note: no options to train swag-diag\n\"\"\"\n\nimport time\nfrom pathlib import Path\nimport numpy as np\nimport os, sys\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\nimport torchvision.transforms as transforms\n\nfrom functools import partial\n\nimport utils.training as train_utils\n\nfrom swag import models, losses, data\nfrom swag.posteriors import SWAG\nfrom swag.utils import bn_update, adjust_learning_rate, schedule, save_checkpoint\n\nparser = argparse.ArgumentParser(description='SGD/SWA training')\n\nparser.add_argument('--dataset', type=str, default='CamVid')\nparser.add_argument('--data_path', type=str, default='/home/wesley/Documents/Code/SegNet-Tutorial/CamVid/', metavar='PATH',\n help='path to datasets location (default: None)')\nparser.add_argument('--dir', type=str, default=None, required=True, help='training directory (default: None)')\n\nparser.add_argument('--epochs', type=int, default=850, metavar='N', help='number of epochs to train (default: 850)')\nparser.add_argument('--save_freq', type=int, default=10, metavar='N', help='save frequency (default: 10)')\nparser.add_argument('--eval_freq', type=int, default=5, metavar='N', help='evaluation frequency (default: 5)')\n\nparser.add_argument('--model', type=str, default=None, required=True, metavar='MODEL',\n help='model name (default: None)')\n\nparser.add_argument('--batch_size', type=int, default=2, metavar='N', help='input batch size (default: 2)')\nparser.add_argument('--lr_init', type=float, default=1e-4, metavar='LR', help='initial learning rate (default: 0.01)')\nparser.add_argument('--lr_decay', type=float, default=0.995, help='amount of learning rate decay per epoch (default: 0.995)')\nparser.add_argument('--wd', type=float, default=1e-4, help='weight decay (default: 1e-4)')\nparser.add_argument('--optimizer', type=str, choices=['RMSProp', 'SGD'], default='RMSProp')\nparser.add_argument('--num_workers', type=int, default=4, metavar='N', help='number of workers (default: 4)')\n\nparser.add_argument('--ft_start', type=int, default=750, help='begin fine-tuning with full sized images (default: 750)')\nparser.add_argument('--ft_batch_size', type=int, default=1, help='fine-tuning batch size (default: 1)')\n\nparser.add_argument('--swa', action='store_true', help='swa usage flag (default: off)')\nparser.add_argument('--swa_start', type=float, default=800, metavar='N', help='SWA start epoch number (default: 161)')\nparser.add_argument('--swa_lr', type=float, default=0.02, metavar='LR', help='SWA LR (default: 0.02)')\nparser.add_argument('--swa_c_epochs', type=int, default=1, metavar='N',\n help='SWA model collection frequency/cycle length in epochs (default: 1)')\n\nparser.add_argument('--resume', type=str, default=None, metavar='CKPT',\n help='checkpoint to resume training from (default: None)')\nparser.add_argument('--swa_resume', type=str, default=None, metavar='CKPT',\n help='checkpoint to restor SWA from (default: None)')\nparser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')\n\nparser.add_argument('--loss', type=str, choices=['cross_entropy', 'aleatoric'], default='cross_entropy')\nparser.add_argument('--use_weights', action='store_true', help='whether to use weighted loss')\n\nargs = parser.parse_args()\nif torch.cuda.is_available():\n args.device = torch.device('cuda')\nelse:\n args.device = torch.device('cpu')\n \ntorch.backends.cudnn.benchmark = True\ntorch.manual_seed(args.seed)\ntorch.cuda.manual_seed(args.seed)\n\nprint('Preparing directory %s' % args.dir)\nos.makedirs(args.dir, exist_ok=True)\nwith open(os.path.join(args.dir, 'command.sh'), 'w') as f:\n f.write(' '.join(sys.argv))\n f.write('\\n')\n\nprint('Using model %s' % args.model)\nmodel_cfg = getattr(models, args.model)\n\nloaders, num_classes = data.loaders(args.dataset, args.data_path, args.batch_size, args.num_workers, ft_batch_size=args.ft_batch_size, \n transform_train=model_cfg.transform_train, transform_test=model_cfg.transform_test, \n joint_transform=model_cfg.joint_transform, ft_joint_transform=model_cfg.ft_joint_transform,\n target_transform=model_cfg.target_transform)\nprint('Beginning with cropped images')\ntrain_loader = loaders['train']\n\nprint('Preparing model')\nmodel = model_cfg.base(*model_cfg.args, num_classes=num_classes, **model_cfg.kwargs, use_aleatoric=args.loss=='aleatoric')\nmodel.cuda()\nmodel.apply(train_utils.weights_init)\n\nif args.optimizer == 'RMSProp':\n optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr_init, weight_decay=args.wd)\n scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma = args.lr_decay)\nelse:\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr_init, weight_decay = args.wd, momentum = 0.9)\n\nstart_epoch = 1\n\nif args.loss == 'cross_entropy':\n criterion = losses.seg_cross_entropy\nelse:\n criterion = losses.seg_ale_cross_entropy\n\nif args.use_weights:\n class_weights = torch.FloatTensor([\n 0.58872014284134, 0.51052379608154, 2.6966278553009,\n 0.45021694898605, 1.1785038709641, 0.77028578519821, 2.4782588481903,\n 2.5273461341858, 1.0122526884079, 3.2375309467316, 4.1312313079834]).cuda()\n\n criterion = partial(criterion, weight=class_weights)\n\nif args.resume is not None:\n print('Resume training from %s' % args.resume)\n checkpoint = torch.load(args.resume)\n start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n del checkpoint\n\n\nif args.swa:\n print('SWAG training')\n swag_model = SWAG(model_cfg.base, no_cov_mat=False, max_num_models=20, *model_cfg.args, num_classes=num_classes, \n use_aleatoric=args.loss=='aleatoric', **model_cfg.kwargs)\n swag_model.to(args.device)\nelse:\n print('SGD training')\n\nif args.swa and args.swa_resume is not None:\n checkpoint = torch.load(args.swa_resume)\n swag_model = SWAG(model_cfg.base, no_cov_mat=False, max_num_models=20, *model_cfg.args, \n num_classes=num_classes, use_aleatoric=args.loss=='aleatoric', **model_cfg.kwargs)\n swag_model.to(args.device)\n swag_model.load_state_dict(checkpoint['state_dict'])\n\nfor epoch in range(start_epoch, args.epochs+1):\n since = time.time()\n\n ### Train ###\n if epoch == args.ft_start:\n print('Now replacing data loader with fine-tuned data loader.')\n train_loader = loaders['fine_tune']\n\n trn_loss, trn_err = train_utils.train(\n model, train_loader, optimizer, criterion)\n print('Epoch {:d}\\nTrain - Loss: {:.4f}, Acc: {:.4f}'.format(\n epoch, trn_loss, 1-trn_err)) \n time_elapsed = time.time() - since \n print('Train Time {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n \n if epoch % args.eval_freq is 0:\n ### Test ###\n val_loss, val_err, val_iou = train_utils.test(model, loaders['val'], criterion)\n print('Val - Loss: {:.4f} | Acc: {:.4f} | IOU: {:.4f}'.format(val_loss, 1-val_err, val_iou))\n \n time_elapsed = time.time() - since \n print('Total Time {:.0f}m {:.0f}s\\n'.format(\n time_elapsed // 60, time_elapsed % 60)) \n\n if args.swa and (epoch + 1) > args.swa_start and (epoch + 1 - args.swa_start) % args.swa_c_epochs == 0:\n print('Saving SWA model at epoch: ', epoch)\n swag_model.collect_model(model)\n \n if epoch % args.eval_freq is 0:\n swag_model.sample(0.0)\n bn_update(train_loader, swag_model)\n val_loss, val_err, val_iou = train_utils.test(swag_model, loaders['val'], criterion)\n print('SWA Val - Loss: {:.4f} | Acc: {:.4f} | IOU: {:.4f}'.format(val_loss, 1-val_err, val_iou))\n \n ### Checkpoint ###\n if epoch % args.save_freq is 0:\n print('Saving model at Epoch: ', epoch)\n save_checkpoint(dir=args.dir, \n epoch=epoch, \n state_dict=model.state_dict(), \n optimizer=optimizer.state_dict()\n )\n if args.swa and (epoch + 1) > args.swa_start:\n save_checkpoint(\n dir=args.dir,\n epoch=epoch,\n name='swag',\n state_dict=swag_model.state_dict(),\n )\n\n if args.optimizer=='RMSProp':\n ### Adjust Lr ###\n if epoch < args.ft_start:\n scheduler.step(epoch=epoch)\n else:\n scheduler.step(epoch=-1) #reset to args.lr_init\n \n elif args.optimizer=='SGD':\n lr = schedule(epoch, args.lr_init, args.epochs, args.swa, args.swa_start, args.swa_lr)\n adjust_learning_rate(optimizer, lr)\n \n### Test set ###\nif args.swa:\n swag_model.sample(0.0)\n bn_update(train_loader, swag_model)\n test_loss, test_err, test_iou = train_utils.test(swag_model, loaders['test'], criterion) \n print('SWA Test - Loss: {:.4f} | Acc: {:.4f} | IOU: {:.4f}'.format(test_loss, 1-test_err, test_iou))\n\ntest_loss, test_err, test_iou = train_utils.test(model, loaders['test'], criterion) \nprint('SGD Test - Loss: {:.4f} | Acc: {:.4f} | IOU: {:.4f}'.format(test_loss, 1-test_err, test_iou))\n" ]
[ [ "torch.nn.CrossEntropyLoss", "numpy.log", "torch.ones", "torch.nn.functional.softmax", "torch.from_numpy", "torch.optim.LBFGS", "torch.log", "numpy.exp", "numpy.sum" ], [ "torch.cuda.manual_seed", "torch.load", "torch.manual_seed", "torch.optim.lr_scheduler.ExponentialLR", "torch.FloatTensor", "torch.cuda.is_available", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sutd-visual-computing-group/dag-gans
[ "68a76153650df6de2a6919a93a2d3b98ca6407e6", "68a76153650df6de2a6919a93a2d3b98ca6407e6" ]
[ "pytorch/examples/wgan-gp/gandag_cifar10.py", "tensorflow/utils.py" ]
[ "import os, sys\nsys.path.append(os.getcwd())\n\nimport time\nimport tflib as lib\nimport tflib.save_images\nimport tflib.mnist\nimport tflib.cifar10\nimport tflib.plot\nimport tflib.inception_score\n\nimport os\nimport numpy as np\n\n\nimport torch\nimport torchvision\nfrom torch import nn\nfrom torch import autograd\nfrom torch import optim\n\nfrom dag.dag import DAG\n\n# Download CIFAR-10 (Python version) at\n# https://www.cs.toronto.edu/~kriz/cifar.html and fill in the path to the\n# extracted files here!\nDATA_DIR = './data/cifar-10-batches-py/'\nif len(DATA_DIR) == 0:\n raise Exception('Please specify path to data directory in gan_cifar.py!')\nOUR_DIR = './tmp/cifar10_dag_fliprot+cropping/'\nif not os.path.exists(OUR_DIR):\n os.mkdir(OUR_DIR)\n\nMODE = 'wgan-gp' # Valid options are dcgan, wgan, or wgan-gp\nDIM = 128 # This overfits substantially; you're probably better off with 64\nLAMBDA = 10 # Gradient penalty lambda hyperparameter\nCRITIC_ITERS = 5 # How many critic iterations per generator iteration\nBATCH_SIZE = 64 # Batch size\nITERS = 200000 # How many generator iterations to train for\nOUTPUT_DIM = 3072 # Number of pixels in CIFAR10 (3*32*32)\n\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n preprocess = nn.Sequential(\n nn.Linear(128, 4 * 4 * 4 * DIM),\n nn.BatchNorm1d(4 * 4 * 4 * DIM),\n nn.ReLU(True),\n )\n\n block1 = nn.Sequential(\n nn.ConvTranspose2d(4 * DIM, 2 * DIM, 2, stride=2),\n nn.BatchNorm2d(2 * DIM),\n nn.ReLU(True),\n )\n block2 = nn.Sequential(\n nn.ConvTranspose2d(2 * DIM, DIM, 2, stride=2),\n nn.BatchNorm2d(DIM),\n nn.ReLU(True),\n )\n deconv_out = nn.ConvTranspose2d(DIM, 3, 2, stride=2)\n\n self.preprocess = preprocess\n self.block1 = block1\n self.block2 = block2\n self.deconv_out = deconv_out\n self.tanh = nn.Tanh()\n\n def forward(self, input):\n output = self.preprocess(input)\n output = output.view(-1, 4 * DIM, 4, 4)\n output = self.block1(output)\n output = self.block2(output)\n output = self.deconv_out(output)\n output = self.tanh(output)\n return output.view(-1, 3, 32, 32)\n\n\nclass Discriminator(nn.Module):\n def __init__(self, n_augments):\n super(Discriminator, self).__init__()\n self.n_augments = n_augments\n main = nn.Sequential(\n nn.Conv2d(3, DIM, 3, 2, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(DIM, 2 * DIM, 3, 2, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(2 * DIM, 4 * DIM, 3, 2, padding=1),\n nn.LeakyReLU(),\n )\n self.main = main\n self.linear = nn.Linear(4*4*4*DIM, 1)\n self.linears_dag = []\n for i in range(self.n_augments):\n self.linears_dag.append(nn.Linear(4*4*4*DIM, 1))\n self.linears_dag = nn.ModuleList(self.linears_dag)\n\n def forward(self, input):\n output = self.main(input)\n feature = output.view(-1, 4*4*4*DIM)\n # original head\n output = self.linear(feature)\n # dag heads\n outputs_dag = []\n for i in range(self.n_augments):\n outputs_dag.append(self.linears_dag[i](feature))\n return output, outputs_dag\n\n\ndef calc_gradient_penalty(netD, real_data, fake_data, dag=False, dag_idx=0):\n # print \"real_data: \", real_data.size(), fake_data.size()\n alpha = torch.rand(BATCH_SIZE, 1)\n alpha = alpha.expand(BATCH_SIZE, int(real_data.nelement()/BATCH_SIZE)).contiguous().view(BATCH_SIZE, 3, 32, 32)\n alpha = alpha.cuda(gpu) if use_cuda else alpha\n\n interpolates = alpha * real_data + ((1 - alpha) * fake_data)\n \n if use_cuda:\n interpolates = interpolates.cuda(gpu)\n interpolates = autograd.Variable(interpolates, requires_grad=True)\n\n if dag==False:\n disc_interpolates, _ = netD(interpolates)\n else:\n _, disc_interpolates = netD(interpolates)\n disc_interpolates = disc_interpolates[dag_idx]\n\n gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,\n grad_outputs=torch.ones(disc_interpolates.size()).cuda(gpu) if use_cuda else torch.ones(\n disc_interpolates.size()),\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n gradients = gradients.view(gradients.size(0), -1)\n\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA\n return gradient_penalty\n\ndef D_loss_func(x_real, x_fake, netD, dag=False, dag_idx=0):\n # real\n if dag==False:\n d_real, _ = netD(x_real)\n d_fake, _ = netD(x_fake)\n else:\n _, d_reals = netD(x_real)\n d_real = d_reals[dag_idx]\n _, d_fakes = netD(x_fake)\n d_fake = d_fakes[dag_idx]\n d_real = d_real.mean()\n d_fake = d_fake.mean()\n # train with gradient penalty\n gp = calc_gradient_penalty(netD, x_real, x_fake, dag=dag, dag_idx=dag_idx)\n # D cost\n d_cost = d_fake - d_real + gp\n return d_cost\n\ndef G_loss_func(x_real, x_fake, netD, dag=False, dag_idx=0):\n # fake \n if dag==False:\n d_fake, _ = netD(x_fake)\n else:\n _, d_fakes = netD(x_fake)\n d_fake = d_fakes[dag_idx] \n d_fake = d_fake.mean()\n # D cost\n g_cost = -d_fake\n return g_cost\n\n# define DAG\ndag = DAG(D_loss_func, G_loss_func, augument_type=['rotation'])\nn_augments = dag.get_num_of_augments()\n\nnetG = Generator()\nnetD = Discriminator(n_augments=n_augments)\nprint(netG)\nprint(netD)\n\nuse_cuda = torch.cuda.is_available()\nif use_cuda:\n gpu = 0\nif use_cuda:\n print('Using cuda ...')\n netD = netD.cuda(gpu)\n netG = netG.cuda(gpu)\n\n#one = torch.FloatTensor([1])\none = torch.tensor(1, dtype=torch.float)\nmone = one * -1\nif use_cuda:\n one = one.cuda(gpu)\n mone = mone.cuda(gpu)\n\noptimizerD = optim.Adam(netD.parameters(), lr=1e-4, betas=(0.5, 0.9))\noptimizerG = optim.Adam(netG.parameters(), lr=1e-4, betas=(0.5, 0.9))\n\n# For generating samples\ndef generate_image(frame, netG):\n fixed_noise_128 = torch.randn(128, 128)\n if use_cuda:\n fixed_noise_128 = fixed_noise_128.cuda(gpu)\n noisev = autograd.Variable(fixed_noise_128, volatile=True)\n samples = netG(noisev)\n samples = samples.view(-1, 3, 32, 32)\n samples = samples.mul(0.5).add(0.5)\n samples = samples.cpu().data.numpy()\n\n lib.save_images.save_images(samples, OUR_DIR + '/samples_{}.png'.format(frame))\n\n# For calculating inception score\ndef get_inception_score(G, ):\n all_samples = []\n for i in xrange(10):\n samples_100 = torch.randn(100, 128)\n if use_cuda:\n samples_100 = samples_100.cuda(gpu)\n samples_100 = autograd.Variable(samples_100, volatile=True)\n all_samples.append(G(samples_100).cpu().data.numpy())\n\n all_samples = np.concatenate(all_samples, axis=0)\n all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')\n all_samples = all_samples.reshape((-1, 3, 32, 32)).transpose(0, 2, 3, 1)\n return lib.inception_score.get_inception_score(list(all_samples))\n\n# Dataset iterator\ntrain_gen, dev_gen = lib.cifar10.load(BATCH_SIZE, data_dir=DATA_DIR)\ndef inf_train_gen():\n while True:\n #for images, target in train_gen():\n for images in train_gen():\n #yield images.astype('float32').reshape(BATCH_SIZE, 3, 32, 32).transpose(0, 2, 3, 1)\n yield images\ngen = inf_train_gen()\npreprocess = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\nfor iteration in range(ITERS):\n start_time = time.time()\n ############################\n # (1) Update D network\n ###########################\n for p in netD.parameters(): # reset requires_grad\n p.requires_grad = True # they are set to False below in netG update\n for i in range(CRITIC_ITERS):\n _data = next(gen)\n netD.zero_grad()\n\n # train with real\n _data = _data.reshape(BATCH_SIZE, 3, 32, 32).transpose(0, 2, 3, 1)\n real_data = torch.stack([preprocess(item) for item in _data])\n\n if use_cuda:\n real_data = real_data.cuda(gpu)\n real_data_v = autograd.Variable(real_data)\n\n # import torchvision\n # filename = os.path.join(\"test_train_data\", str(iteration) + str(i) + \".jpg\")\n # torchvision.utils.save_image(real_data, filename)\n\n # train with fake\n noise = torch.randn(BATCH_SIZE, 128)\n if use_cuda:\n noise = noise.cuda(gpu)\n noisev = autograd.Variable(noise, volatile=True) # totally freeze netG\n fake = autograd.Variable(netG(noisev).data)\n inputv = fake\n\n D_cost = D_loss_func(real_data_v, inputv, netD) + 0.2 * dag.compute_discriminator_loss(real_data_v, inputv, netD)\n D_cost.backward()\n optimizerD.step()\n ############################\n # (2) Update G network\n ###########################\n for p in netD.parameters():\n p.requires_grad = False # to avoid computation\n netG.zero_grad()\n\n noise = torch.randn(BATCH_SIZE, 128)\n if use_cuda:\n noise = noise.cuda(gpu)\n noisev = autograd.Variable(noise)\n fake = netG(noisev)\n\n G_cost = G_loss_func(None, fake, netD) + 0.2 * dag.compute_generator_loss(real_data_v, inputv, netD)\n G_cost.backward()\n optimizerG.step()\n\n # Write logs and save samples\n lib.plot.plot(OUR_DIR + 'train disc cost', D_cost.cpu().data.numpy())\n lib.plot.plot(OUR_DIR + 'time', time.time() - start_time)\n lib.plot.plot(OUR_DIR + 'train gen cost', G_cost.cpu().data.numpy())\n #lib.plot.plot('./tmp/cifar10/wasserstein distance', Wasserstein_D.cpu().data.numpy())\n\n # Calculate inception score every 1K iters\n if False and iteration % 1000 == 999:\n inception_score = get_inception_score(netG)\n lib.plot.plot(OUR_DIR + 'inception score', inception_score[0])\n\n # Calculate dev loss and generate samples every 100 iters\n if iteration % 100 == 99:\n dev_disc_costs = []\n for images in dev_gen():\n images = images.reshape(BATCH_SIZE, 3, 32, 32).transpose(0, 2, 3, 1)\n imgs = torch.stack([preprocess(item) for item in images])\n\n # imgs = preprocess(images)\n if use_cuda:\n imgs = imgs.cuda(gpu)\n imgs_v = autograd.Variable(imgs, volatile=True)\n\n D = netD(imgs_v)\n #_dev_disc_cost = -D.mean().cpu().data.numpy()\n #dev_disc_costs.append(_dev_disc_cost)\n #lib.plot.plot('./tmp/cifar10/dev disc cost', np.mean(dev_disc_costs))\n\n generate_image(iteration, netG)\n\n # Save logs every 100 iters\n if (iteration < 5) or (iteration % 100 == 99):\n lib.plot.flush()\n lib.plot.tick()\n", "import tensorflow as tf\nimport numpy as np\nimport math\n\ndef rotation(x, degs):\n x_rot = []\n angle = math.pi / 180\n for deg in degs:\n if deg == 0:\n x_rot.append(x)\n elif deg == 90:\n x_rot.append(tf.contrib.image.rotate(x, 90 * angle))\n elif deg == 180:\n x_rot.append(tf.contrib.image.rotate(x, 180 * angle))\n elif deg == 270:\n x_rot.append(tf.contrib.image.rotate(x, 270 * angle))\n return x_rot\n \ndef fliprot(x, aug):\n x_flip = []\n x_flip.append(x)\n x_hflip = tf.image.flip_left_right(x)\n x_flip.append(x_hflip)\n x_flip.append(tf.image.flip_up_down(x))\n x_flip.append(tf.image.flip_up_down(x_hflip))\n return x_flip\n\ndef image_crop(x, offset_h, offset_w, target_h, target_w, size=[32,32]):\n x_crop = tf.image.crop_to_bounding_box(x, offset_h, offset_w, target_h, target_w)\n x_crop = tf.image.resize_bilinear(x_crop, size=size, align_corners=True)\n return x_crop\n\ndef cropping(x, aug):\n b, h, w, c = np.shape(x).as_list()\n img_size = [h, w]\n boxes = [[0, 0, h, w],\n [0, 0, h*0.75, w*0.75],\n [0, w*0.25, h*0.75, w*0.75],\n [h*0.25, 0, h*0.75, w*0.75],\n [h*0.25, w*0.25, h*0.75, w*0.75]]\n x_crop = []\n for i in range(np.shape(boxes)[0]):\n cropped = image_crop(x, int(boxes[i][0]), int(boxes[i][1]), int(boxes[i][2]), int(boxes[i][3]), size=img_size)\n x_crop.append(cropped)\n return x_crop\n\ndef augmenting_data(x, aug, aug_list):\n if aug == 'rotation':\n return rotation(x, aug_list)\n elif aug == 'fliprot':\n return fliprot(x, aug_list)\n elif aug == 'cropping':\n return cropping(x, aug_list)\n else:\n print('utils.augmenting_data: the augmentation type is not supported. Exiting ...')\n exit()\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.ConvTranspose2d", "numpy.multiply", "torch.randn", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.tensor", "numpy.concatenate", "torch.nn.Tanh", "torch.nn.Linear", "torch.rand", "torch.cuda.is_available", "torch.nn.BatchNorm2d", "torch.nn.LeakyReLU", "torch.nn.ReLU", "torch.autograd.Variable" ], [ "tensorflow.image.resize_bilinear", "tensorflow.image.crop_to_bounding_box", "tensorflow.contrib.image.rotate", "tensorflow.image.flip_left_right", "tensorflow.image.flip_up_down", "numpy.shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
oeg-upm/tada-num-dist
[ "d7845898c5405680a63bc86e60c994816fb4562a" ]
[ "tests/test_clus.py" ]
[ "import unittest\nimport os\nimport pandas as pd\nfrom tadaqq.clus import Clusterer\nfrom collections import Counter\nfrom tadaqq.clus import Clusterer, PMap\nfrom tadaqq.slabmer import SLabMer\n\n\ndef get_test_df():\n dbp = \"http://dbpedia.org/property/\"\n df = pd.DataFrame(\n [\n ['AAA', 'fnameaa1', 0, \"1,2,3\", \"%spropa1;%spropa11\" % (dbp, dbp)],\n ['AAA', 'fnameaa2', 0, \"1,2,3,4\", \"%spropa11;%spropa12\" % (dbp, dbp)],\n ['AAA', 'fnameaa3', 1, \"1,6,5\", \"%spropa13;%spropa14;%spropa1\" % (dbp, dbp, dbp)],\n\n ['AAA', 'fnameaa7', 2, \"70,60,50\", \"%spropa3;%spropa31\" % (dbp, dbp)],\n ['AAA', 'fnameaa8', 3, \"70,60,50\", \"%spropa31\" % dbp],\n ['AAA', 'fnameaa9', 1, \"80,20,40\", \"%spropa31;%spropa34;%spropa3\" % (dbp, dbp, dbp)],\n\n ['BBB', 'fnamebb1', 0, \"1,2,3\", \"%spropb1;%spropb11\" % (dbp, dbp)],\n ['BBB', 'fnamebb2', 0, \"1,2,3,4\", \"%spropb11;%spropb12\" % (dbp, dbp)],\n ['BBB', 'fnamebb3', 1, \"1,6,5\", \"%spropb13;%spropb14;%spropb1\" % (dbp, dbp, dbp)],\n\n ['CCC', 'fnamecc1', 0, \"1000,2000,3000\", \"%spropc1;%spropc11\" % (dbp, dbp)],\n ],\n columns=['concept', 'filename', 'columnid', 'col', 'property']\n )\n return df\n\n\ndef apply_cluster(df, fetch_method, err_meth, same_class, err_cutoff):\n clusterer = Clusterer(save_memory=False)\n pmap = PMap()\n for idx, row_and_i in enumerate(df.iterrows()):\n i, row = row_and_i\n pmap.add(row['property'].split(';'))\n col = [int(d) for d in row['col'].split(',')]\n ele = {\n 'class_uri': 'http://dbpedia.org/ontology/' + row['concept'],\n 'col_id': row['columnid'],\n 'fname': row['filename'],\n 'col': col,\n 'num': len(col),\n 'concept': row['concept'],\n 'property': pmap.get(row['property'].split(';')[0]),\n 'properties': row['property'].split(';')\n }\n clusterer.column_group_matching(ele, fetch_method, err_meth, err_cutoff, same_class)\n return clusterer\n\n\nclass ClusTest(unittest.TestCase):\n\n def setUp(self) -> None:\n pass\n\n def test_evaluation(self):\n b = [\"A\", \"A\", \"A\", \"B\", \"B\", \"B\", \"B\", \"B\", \"C\", \"C\", \"C\"]\n c = Counter(b)\n a = [[\"A\", \"A\", \"A\", \"B\", \"B\", \"B\"], [\"B\", \"B\"], [\"C\", \"C\", \"C\"]]\n groups = []\n for g in a:\n clus = [{'gs_clus': cl} for cl in g]\n groups.append(clus)\n clusterer = Clusterer()\n clusterer.groups = groups\n p, r, f1 = clusterer.evaluate(c)\n self.assertEqual(p, 2/3)\n self.assertEqual(r, 2.6/3)\n\n def test_group_matching(self):\n groups = []\n eles = []\n ele = {'col': [1, 2, 3]}\n eles.append(ele)\n ele = {'col': [1, 2, 4]}\n eles.append(ele)\n ele = {'col': [20, 20, 30]}\n eles.append(ele)\n ele = {'col': [21, 21, 31]}\n eles.append(ele)\n ele = {'col': [24, 24, 34]}\n eles.append(ele)\n ele = {'col': [140, 240, 340]}\n eles.append(ele)\n err_cutoff = 0.3\n err_meth = \"mean_err\"\n fetch_method = \"max\"\n clusterer = Clusterer()\n clusterer.groups = groups\n for ele in eles:\n ele['num'] = len(ele['col'])\n clusterer.column_group_matching(ele, fetch_method, err_meth, err_cutoff, False)\n self.assertEqual(len(groups), 3)\n self.assertEqual(len(groups[0]), 2)\n self.assertEqual(len(groups[1]), 3)\n self.assertEqual(len(groups[2]), 1)\n\n def test_clusterer(self):\n df = get_test_df()\n clusterer = apply_cluster(df, fetch_method=\"max\", err_meth=\"mean_err\", err_cutoff=0.3, same_class=False)\n self.assertEqual(len(clusterer.groups), 3)\n clusterer = apply_cluster(df, fetch_method=\"max\", err_meth=\"mean_err\", err_cutoff=0.3, same_class=True)\n self.assertEqual(len(clusterer.groups), 4)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
sebasgo/OpenMDAO
[ "b78d840780b73209dc3a00a2fb3dbf729bfeb8d5", "b78d840780b73209dc3a00a2fb3dbf729bfeb8d5" ]
[ "openmdao/solvers/tests/test_solver_parametric_suite.py", "openmdao/visualization/scaling_viewer/scaling_report.py" ]
[ "\"\"\"Runs a parametric test over several of the linear solvers.\"\"\"\n\nimport numpy as np\nimport unittest\n\nfrom openmdao.core.group import Group\nfrom openmdao.core.problem import Problem\nfrom openmdao.core.implicitcomponent import ImplicitComponent\nfrom openmdao.utils.assert_utils import assert_near_equal\nfrom openmdao.solvers.nonlinear.newton import NewtonSolver\nfrom openmdao.solvers.linear.direct import DirectSolver\nfrom openmdao.test_suite.groups.implicit_group import TestImplicitGroup\nfrom openmdao.test_suite.parametric_suite import parametric_suite\n\n\nclass ImplComp4Test(ImplicitComponent):\n\n def setup(self):\n self.add_input('x', np.ones(2))\n self.add_output('y', np.ones(2))\n self.mtx = np.array([\n [3., 4.],\n [2., 3.],\n ])\n # Inverse is\n # [ 3.,-4.],\n # [-2., 3.],\n\n #self.declare_partials('y', 'x', val=-np.eye(2))\n #self.declare_partials('y', 'y', val=self.mtx)\n self.declare_partials('*', '*')\n\n def apply_nonlinear(self, inputs, outputs, residuals):\n residuals['y'] = self.mtx.dot(outputs['y']) - inputs['x']\n\n def linearize(self, inputs, outputs, partials):\n partials['y', 'x'] = -np.eye(2)\n partials['y', 'y'] = self.mtx\n\n\nclass TestLinearSolverParametricSuite(unittest.TestCase):\n\n def test_direct_solver_comp(self):\n \"\"\"\n Test the direct solver on a component.\n \"\"\"\n for jac in [None, 'csc', 'dense']:\n prob = Problem(model=ImplComp4Test())\n prob.model.nonlinear_solver = NewtonSolver(solve_subsystems=False)\n if jac in ('csc', 'dense'):\n prob.model.options['assembled_jac_type'] = jac\n prob.model.linear_solver = DirectSolver(assemble_jac=jac in ('csc','dense'))\n prob.set_solver_print(level=0)\n\n prob.setup()\n\n prob.run_model()\n assert_near_equal(prob['y'], [-1., 1.])\n\n d_inputs, d_outputs, d_residuals = prob.model.get_linear_vectors()\n\n d_residuals.set_val(2.0)\n d_outputs.set_val(0.0)\n prob.model.run_solve_linear(['linear'], 'fwd')\n result = d_outputs.asarray()\n assert_near_equal(result, [-2., 2.])\n\n d_outputs.set_val(2.0)\n d_residuals.set_val(0.0)\n prob.model.run_solve_linear(['linear'], 'rev')\n result = d_residuals.asarray()\n assert_near_equal(result, [2., -2.])\n\n def test_direct_solver_group(self):\n \"\"\"\n Test the direct solver on a group.\n \"\"\"\n prob = Problem(model=TestImplicitGroup(lnSolverClass=DirectSolver))\n\n prob.setup()\n\n # Set this to False because we have matrix-free component(s).\n prob.model.linear_solver.options['assemble_jac'] = False\n\n # Conclude setup but don't run model.\n prob.final_setup()\n\n prob.model.run_linearize()\n\n d_inputs, d_outputs, d_residuals = prob.model.get_linear_vectors()\n\n d_residuals.set_val(1.0)\n d_outputs.set_val(0.0)\n prob.model.run_solve_linear(['linear'], 'fwd')\n result = d_outputs.asarray()\n assert_near_equal(result, prob.model.expected_solution, 1e-15)\n\n d_outputs.set_val(1.0)\n d_residuals.set_val(0.0)\n prob.model.run_solve_linear(['linear'], 'rev')\n result = d_residuals.asarray()\n assert_near_equal(result, prob.model.expected_solution, 1e-15)\n\n @parametric_suite(\n assembled_jac=[False, True],\n jacobian_type=['dense'],\n partial_type=['array', 'sparse', 'aij'],\n num_var=[2, 3],\n var_shape=[(2, 3), (2,)],\n connection_type=['implicit', 'explicit'],\n run_by_default=False,\n )\n def test_subset(self, param_instance):\n param_instance.linear_solver_class = DirectSolver\n param_instance.linear_solver_options = {} # defaults not valid for DirectSolver\n\n param_instance.setup()\n problem = param_instance.problem\n model = problem.model\n\n expected_values = model.expected_values\n if expected_values:\n actual = {key: problem[key] for key in expected_values}\n assert_near_equal(actual, expected_values, 1e-8)\n\n expected_totals = model.expected_totals\n if expected_totals:\n # Forward Derivatives Check\n totals = param_instance.compute_totals('fwd')\n assert_near_equal(totals, expected_totals, 1e-8)\n\n # Reverse Derivatives Check\n totals = param_instance.compute_totals('rev')\n assert_near_equal(totals, expected_totals, 1e-8)\n\nif __name__ == \"__main__\":\n unittest.main()\n", "\n\"\"\"Define a function to view driver scaling.\"\"\"\nimport os\nimport sys\nimport json\nfrom itertools import chain\nfrom collections import defaultdict\n\nimport numpy as np\n\nimport openmdao\nfrom openmdao.core.constants import _SetupStatus\nimport openmdao.utils.coloring as coloring_mod\nimport openmdao.utils.hooks as hooks\nfrom openmdao.utils.units import convert_units\nfrom openmdao.utils.mpi import MPI\nfrom openmdao.utils.webview import webview\nfrom openmdao.utils.general_utils import printoptions, ignore_errors, default_noraise\nfrom openmdao.utils.file_utils import _load_and_exec, _to_filename\n\n\ndef _val2str(val):\n if isinstance(val, np.ndarray):\n if val.size > 5:\n return 'array %s' % str(val.shape)\n else:\n return np.array2string(val)\n\n return str(val)\n\n\ndef _unscale(val, scaler, adder, default=''):\n if val is None:\n return default\n if scaler is not None:\n val = val * (1.0 / scaler)\n if adder is not None:\n val = val - adder\n return val\n\n\ndef _scale(val, scaler, adder, unset=''):\n if val is None:\n return unset\n if adder is not None:\n val = val + adder\n if scaler is not None:\n val = val * scaler\n return val\n\n\ndef _getdef(val, unset):\n if val is None:\n return unset\n if np.isscalar(val) and (val == openmdao.INF_BOUND or val == -openmdao.INF_BOUND):\n return unset\n return val\n\n\ndef _get_val_and_size(val, unset=''):\n # return val (or max abs val) and the size of the value\n val = _getdef(val, unset)\n if np.isscalar(val) or val.size == 1:\n return [val, 1]\n return [np.max(np.abs(val)), val.size]\n\n\ndef _get_flat(val, size, unset=''):\n if val is None:\n return val\n if np.isscalar(val):\n if (val == openmdao.INF_BOUND or val == -openmdao.INF_BOUND):\n val = unset\n return np.full(size, val)\n if val.size > 1:\n return val.flatten()\n return np.full(size, val[0])\n\n\ndef _add_child_rows(row, mval, dval, scaler=None, adder=None, ref=None, ref0=None,\n lower=None, upper=None, equals=None, inds=None):\n if not (np.isscalar(mval) or mval.size == 1):\n rowchild = row.copy()\n children = row['_children'] = []\n rowchild['name'] = ''\n rowchild['size'] = ''\n dval_flat = dval.flatten()\n mval_flat = mval.flatten()\n scaler_flat = _get_flat(scaler, mval.size)\n adder_flat = _get_flat(adder, mval.size)\n ref_flat = _get_flat(ref, mval.size)\n ref0_flat = _get_flat(ref0, mval.size)\n upper_flat = _get_flat(upper, mval.size)\n lower_flat = _get_flat(lower, mval.size)\n equals_flat = _get_flat(equals, mval.size)\n\n if inds is None:\n inds = list(range(dval.size))\n else:\n inds = np.atleast_1d(inds).flatten()\n\n for i, idx in enumerate(inds):\n d = rowchild.copy()\n d['index'] = idx\n d['driver_val'] = [dval_flat[i], 1]\n d['model_val'] = [mval_flat[i], 1]\n if scaler_flat is not None:\n d['scaler'] = [scaler_flat[i], 1]\n if adder_flat is not None:\n d['adder'] = [adder_flat[i], 1]\n if ref_flat is not None:\n d['ref'] = [ref_flat[i], 1]\n if ref0_flat is not None:\n d['ref0'] = [ref0_flat[i], 1]\n if upper_flat is not None:\n d['upper'] = [upper_flat[i], 1]\n if lower_flat is not None:\n d['lower'] = [lower_flat[i], 1]\n if equals_flat is not None:\n d['equals'] = [equals_flat[i], 1]\n children.append(d)\n\n\ndef _compute_jac_view_info(totals, data, dv_vals, response_vals, coloring):\n start = end = 0\n data['ofslices'] = slices = {}\n for n, v in response_vals.items():\n end += v.size\n slices[n] = [start, end]\n start = end\n\n start = end = 0\n data['wrtslices'] = slices = {}\n for n, v in dv_vals.items():\n end += v.size\n slices[n] = [start, end]\n start = end\n\n nonempty_submats = set() # submats with any nonzero values\n\n var_matrix = np.zeros((len(data['ofslices']), len(data['wrtslices'])))\n\n matrix = np.abs(totals)\n\n if coloring is not None: # factor in the sparsity\n mask = np.zeros(totals.shape, dtype=bool)\n mask[coloring._nzrows, coloring._nzcols] = 1\n\n for i, of in enumerate(response_vals):\n ofstart, ofend = data['ofslices'][of]\n for j, wrt in enumerate(dv_vals):\n wrtstart, wrtend = data['wrtslices'][wrt]\n # use max of abs value here instead of norm to keep coloring consistent between\n # top level jac and subjacs\n var_matrix[i, j] = np.max(matrix[ofstart:ofend, wrtstart:wrtend])\n if var_matrix[i, j] > 0. or (coloring and\n np.any(mask[ofstart:ofend, wrtstart:wrtend])):\n nonempty_submats.add((of, wrt))\n\n matlist = [None] * matrix.size\n idx = 0\n for i in range(matrix.shape[0]):\n for j in range(matrix.shape[1]):\n val = matrix[i, j]\n if coloring and not mask[i, j]:\n val = None\n else:\n if val == 0.:\n val = 0 # set to int 0\n matlist[idx] = [i, j, val]\n idx += 1\n\n data['mat_list'] = matlist\n\n varmatlist = [None] * var_matrix.size\n\n # setup up sparsity of var matrix\n idx = 0\n for i, of in enumerate(data['oflabels']):\n for j, wrt in enumerate(data['wrtlabels']):\n if coloring is not None and (of, wrt) not in nonempty_submats:\n val = None\n else:\n val = var_matrix[i, j]\n varmatlist[idx] = [of, wrt, val]\n idx += 1\n\n data['var_mat_list'] = varmatlist\n\n\ndef view_driver_scaling(driver, outfile='driver_scaling_report.html', show_browser=True,\n title=None, jac=True):\n \"\"\"\n Generate a self-contained html file containing a detailed connection viewer.\n\n Optionally pops up a web browser to view the file.\n\n Parameters\n ----------\n driver : Driver\n The driver used for the scaling report.\n outfile : str, optional\n The name of the output html file. Defaults to 'connections.html'.\n show_browser : bool, optional\n If True, pop up a browser to view the generated html file.\n Defaults to True.\n title : str, optional\n Sets the title of the web page.\n jac : bool\n If True, show jacobian information.\n\n Returns\n -------\n dict\n Data to used to generate html file.\n \"\"\"\n if MPI and MPI.COMM_WORLD.rank != 0:\n return\n\n dv_table = []\n con_table = []\n obj_table = []\n\n dv_vals = driver.get_design_var_values(get_remote=True)\n obj_vals = driver.get_objective_values(driver_scaling=True)\n con_vals = driver.get_constraint_values(driver_scaling=True)\n\n mod_meta = driver._problem().model._var_allprocs_abs2meta['output']\n\n if driver._problem()._metadata['setup_status'] < _SetupStatus.POST_FINAL_SETUP:\n raise RuntimeError(\"Driver scaling report cannot be generated before calling final_setup \"\n \"on the Problem.\")\n\n default = ''\n\n idx = 1 # unique ID for use by Tabulator\n\n # set up design vars table data\n for name, meta in driver._designvars.items():\n scaler = meta['total_scaler']\n adder = meta['total_adder']\n ref = meta['ref']\n ref0 = meta['ref0']\n lower = meta['lower']\n upper = meta['upper']\n\n dval = dv_vals[name]\n mval = _unscale(dval, scaler, adder, default)\n\n if dval.size == 1:\n index = meta['indices']\n if index is not None:\n index = index[0]\n index = _getdef(index, '')\n else:\n index = ''\n\n dct = {\n 'id': idx,\n 'name': name,\n 'size': meta['size'],\n 'driver_val': _get_val_and_size(dval),\n 'driver_units': _getdef(meta['units'], default),\n 'model_val': _get_val_and_size(mval),\n 'model_units': _getdef(mod_meta[meta['ivc_source']]['units'], default),\n 'ref': _get_val_and_size(ref, default),\n 'ref0': _get_val_and_size(ref0, default),\n 'scaler': _get_val_and_size(scaler, default),\n 'adder': _get_val_and_size(adder, default),\n 'lower': _get_val_and_size(lower, default), # scaled\n 'upper': _get_val_and_size(upper, default), # scaled\n 'index': index,\n }\n\n dv_table.append(dct)\n\n _add_child_rows(dct, mval, dval, scaler=scaler, adder=adder, ref=ref, ref0=ref0,\n lower=lower, upper=upper, inds=meta['indices'])\n\n idx += 1\n\n # set up constraints table data\n for name, meta in driver._cons.items():\n scaler = meta['total_scaler']\n adder = meta['total_adder']\n ref = meta['ref']\n ref0 = meta['ref0']\n lower = meta['lower']\n upper = meta['upper']\n equals = meta['equals']\n\n dval = con_vals[name]\n mval = _unscale(dval, scaler, adder, default)\n\n if dval.size == 1:\n index = meta['indices']\n if index is not None:\n index = index[0]\n index = _getdef(index, '')\n else:\n index = ''\n\n dct = {\n 'id': idx,\n 'name': name,\n 'size': meta['size'],\n 'index': index,\n 'driver_val': _get_val_and_size(dval),\n 'driver_units': _getdef(meta['units'], default),\n 'model_val': _get_val_and_size(mval),\n 'model_units': _getdef(mod_meta[meta['ivc_source']]['units'], default),\n 'ref': _get_val_and_size(meta['ref'], default),\n 'ref0': _get_val_and_size(meta['ref0'], default),\n 'scaler': _get_val_and_size(scaler, default),\n 'adder': _get_val_and_size(adder, default),\n 'lower': _get_val_and_size(meta['lower'], default), # scaled\n 'upper': _get_val_and_size(meta['upper'], default), # scaled\n 'equals': _get_val_and_size(meta['equals'], default), # scaled\n 'linear': meta['linear'],\n }\n\n con_table.append(dct)\n _add_child_rows(dct, mval, dval, scaler=scaler, adder=adder, ref=ref, ref0=ref0,\n lower=lower, upper=upper, equals=equals, inds=meta['indices'])\n\n idx += 1\n\n # set up objectives table data\n for name, meta in driver._objs.items():\n scaler = meta['total_scaler']\n adder = meta['total_adder']\n ref = meta['ref']\n ref0 = meta['ref0']\n\n dval = obj_vals[name]\n mval = _unscale(dval, scaler, adder, default)\n\n if dval.size == 1:\n index = meta['indices']\n if index is not None:\n index = index[0]\n index = _getdef(index, '')\n else:\n index = ''\n\n dct = {\n 'id': idx,\n 'name': name,\n 'size': meta['size'],\n 'index': index,\n 'driver_val': _get_val_and_size(dval),\n 'driver_units': _getdef(meta['units'], default),\n 'model_val': _get_val_and_size(mval),\n 'model_units': _getdef(mod_meta[meta['ivc_source']]['units'], default),\n 'ref': _get_val_and_size(meta['ref'], default),\n 'ref0': _get_val_and_size(meta['ref0'], default),\n 'scaler': _get_val_and_size(scaler, default),\n 'adder': _get_val_and_size(adder, default),\n }\n\n obj_table.append(dct)\n _add_child_rows(dct, mval, dval, scaler=scaler, adder=adder, ref=ref, ref0=ref0,\n inds=meta['indices'])\n\n idx += 1\n\n data = {\n 'title': _getdef(title, ''),\n 'dv_table': dv_table,\n 'con_table': con_table,\n 'obj_table': obj_table,\n 'oflabels': [],\n 'wrtlabels': [],\n 'var_mat_list': [],\n 'linear': {\n 'oflabels': [],\n }\n }\n\n if jac and not driver._problem().model._use_derivatives:\n print(\"\\nCan't display jacobian because derivatives are turned off.\\n\")\n jac = False\n\n if jac:\n # save old totals\n save = driver._total_jac\n driver._total_jac = None\n\n coloring = driver._get_static_coloring()\n if coloring_mod._use_total_sparsity:\n if coloring is None and driver._coloring_info['dynamic']:\n coloring = coloring_mod.dynamic_total_coloring(driver)\n\n # assemble data for jacobian visualization\n data['oflabels'] = driver._get_ordered_nl_responses()\n data['wrtlabels'] = list(dv_vals)\n\n try:\n totals = driver._compute_totals(of=data['oflabels'], wrt=data['wrtlabels'],\n return_format='array')\n finally:\n driver._total_jac = save\n\n data['linear'] = lindata = {}\n lindata['oflabels'] = [n for n, meta in driver._cons.items() if meta['linear']]\n lindata['wrtlabels'] = data['wrtlabels'] # needs to mimic data structure\n\n # check for separation of linear constraints\n if lindata['oflabels']:\n if set(lindata['oflabels']).intersection(data['oflabels']):\n # linear cons are found in data['oflabels'] so they're not separated\n lindata['oflabels'] = []\n lindata['wrtlables'] = []\n\n full_response_vals = con_vals.copy()\n full_response_vals.update(obj_vals)\n response_vals = {n: full_response_vals[n] for n in data['oflabels']}\n\n _compute_jac_view_info(totals, data, dv_vals, response_vals, coloring)\n\n if lindata['oflabels']:\n # prevent reuse of nonlinear totals\n save = driver._total_jac\n driver._total_jac = None\n\n try:\n lintotals = driver._compute_totals(of=lindata['oflabels'], wrt=data['wrtlabels'],\n return_format='array')\n lin_response_vals = {n: full_response_vals[n] for n in lindata['oflabels']}\n finally:\n driver._total_jac = save\n\n _compute_jac_view_info(lintotals, lindata, dv_vals, lin_response_vals, None)\n\n if driver._problem().comm.rank == 0:\n\n viewer = 'scaling_table.html'\n\n code_dir = os.path.dirname(os.path.abspath(__file__))\n libs_dir = os.path.join(os.path.dirname(code_dir), 'common', 'libs')\n style_dir = os.path.join(os.path.dirname(code_dir), 'common', 'style')\n\n with open(os.path.join(code_dir, viewer), \"r\") as f:\n template = f.read()\n\n with open(os.path.join(libs_dir, 'tabulator.min.js'), \"r\") as f:\n tabulator_src = f.read()\n\n with open(os.path.join(style_dir, 'tabulator.min.css'), \"r\") as f:\n tabulator_style = f.read()\n\n with open(os.path.join(libs_dir, 'd3.v6.min.js'), \"r\") as f:\n d3_src = f.read()\n\n jsontxt = json.dumps(data, default=default_noraise)\n\n with open(outfile, 'w') as f:\n s = template.replace(\"<tabulator_src>\", tabulator_src)\n s = s.replace(\"<tabulator_style>\", tabulator_style)\n s = s.replace(\"<d3_src>\", d3_src)\n s = s.replace(\"<scaling_data>\", jsontxt)\n f.write(s)\n\n if show_browser:\n webview(outfile)\n\n return data\n\n\ndef _scaling_setup_parser(parser):\n \"\"\"\n Set up the openmdao subparser for the 'openmdao driver_scaling' command.\n\n Parameters\n ----------\n parser : argparse subparser\n The parser we're adding options to.\n \"\"\"\n parser.add_argument('file', nargs=1, help='Python file containing the model.')\n parser.add_argument('-o', default='driver_scaling_report.html', action='store', dest='outfile',\n help='html output file.')\n parser.add_argument('-t', '--title', action='store', dest='title',\n help='title of web page.')\n parser.add_argument('--no_browser', action='store_true', dest='no_browser',\n help=\"don't display in a browser.\")\n parser.add_argument('-p', '--problem', action='store', dest='problem', help='Problem name')\n parser.add_argument('--no-jac', action='store_true', dest='nojac',\n help=\"Don't show jacobian info\")\n\n\n_run_driver_called = False\n_run_model_start = False\n_run_model_done = False\n\n\ndef _exitfunc():\n if not _run_driver_called:\n print(\"\\n\\nNo driver scaling report was generated because run_driver() was not called \"\n \"on the required Problem.\\n\")\n\n\ndef _scaling_cmd(options, user_args):\n \"\"\"\n Return the post_setup hook function for 'openmdao driver_scaling'.\n\n Parameters\n ----------\n options : argparse Namespace\n Command line options.\n user_args : list of str\n Args to be passed to the user script.\n \"\"\"\n def _set_run_driver_flag(problem):\n global _run_driver_called\n _run_driver_called = True\n\n def _set_run_model_start(problem):\n global _run_model_start\n _run_model_start = True\n\n def _set_run_model_done(problem):\n global _run_model_done\n _run_model_done = True\n\n def _scaling_check(problem):\n if _run_driver_called:\n # If run_driver has been called, we know no more user changes are coming.\n if not _run_model_start:\n problem.run_model()\n if _run_model_done:\n _scaling(problem)\n\n def _scaling(problem):\n hooks._unregister_hook('final_setup', 'Problem') # avoid recursive loop\n hooks._unregister_hook('run_driver', 'Problem')\n hooks._unregister_hook('run_model', 'Problem')\n driver = problem.driver\n if options.title:\n title = options.title\n else:\n title = \"Driver scaling for %s\" % os.path.basename(options.file[0])\n view_driver_scaling(driver, outfile=options.outfile, show_browser=not options.no_browser,\n title=title, jac=not options.nojac)\n exit()\n\n # register the hooks\n hooks._register_hook('final_setup', class_name='Problem', inst_id=options.problem,\n post=_scaling_check)\n\n hooks._register_hook('run_model', class_name='Problem', inst_id=options.problem,\n pre=_set_run_model_start, post=_set_run_model_done)\n\n hooks._register_hook('run_driver', class_name='Problem', inst_id=options.problem,\n pre=_set_run_driver_flag)\n\n # register an atexit function to check if scaling report was triggered during the script\n import atexit\n atexit.register(_exitfunc)\n\n ignore_errors(True)\n _load_and_exec(options.file[0], user_args)\n" ]
[ [ "numpy.eye", "numpy.array", "numpy.ones" ], [ "numpy.abs", "numpy.full", "numpy.atleast_1d", "numpy.max", "numpy.any", "numpy.isscalar", "numpy.array2string", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
King-Zach/3D-point-cloud-generation
[ "c06ed9bbe70e4c27d9d6bfc0fef3ac46ae1c8afe", "c06ed9bbe70e4c27d9d6bfc0fef3ac46ae1c8afe" ]
[ "util.py", "render/render_fixed.py" ]
[ "import numpy as np\nimport scipy.misc\nimport tensorflow as tf\nimport os\nimport termcolor\n\n# compute projection from source to target\ndef projection(Vs,Vt):\n\tVsN = tf.shape(Vs)[0]\n\tVtN = tf.shape(Vt)[0]\n\tVt_rep = tf.tile(Vt[None,:,:],[VsN,1,1]) # [VsN,VtN,3]\n\tVs_rep = tf.tile(Vs[:,None,:],[1,VtN,1]) # [VsN,VtN,3]\n\tdiff = Vt_rep-Vs_rep\n\tdist = tf.sqrt(tf.reduce_sum(diff**2,axis=[2])) # [VsN,VtN]\n\tidx = tf.to_int32(tf.argmin(dist,axis=1))\n\tproj = tf.gather_nd(Vt_rep,tf.stack([tf.range(VsN),idx],axis=1))\n\tminDist = tf.gather_nd(dist,tf.stack([tf.range(VsN),idx],axis=1))\n\treturn proj,minDist\n\ndef mkdir(path):\n\tif not os.path.exists(path): os.makedirs(path)\ndef imread(fname):\n\treturn scipy.misc.imread(fname)/255.0\ndef imsave(fname,array):\n\tscipy.misc.toimage(array,cmin=0.0,cmax=1.0).save(fname)\n\n# convert to colored strings\ndef toRed(content): return termcolor.colored(content,\"red\",attrs=[\"bold\"])\ndef toGreen(content): return termcolor.colored(content,\"green\",attrs=[\"bold\"])\ndef toBlue(content): return termcolor.colored(content,\"blue\",attrs=[\"bold\"])\ndef toCyan(content): return termcolor.colored(content,\"cyan\",attrs=[\"bold\"])\ndef toYellow(content): return termcolor.colored(content,\"yellow\",attrs=[\"bold\"])\ndef toMagenta(content): return termcolor.colored(content,\"magenta\",attrs=[\"bold\"])\n\n# make image summary from image batch\ndef imageSummary(opt,tag,image,H,W):\n\tblockSize = opt.visBlockSize\n\timageOne = tf.batch_to_space(image[:blockSize**2],crops=[[0,0],[0,0]],block_size=blockSize)\n\timagePermute = tf.reshape(imageOne,[H,blockSize,W,blockSize,-1])\n\timageTransp = tf.transpose(imagePermute,[1,0,3,2,4])\n\timageBlocks = tf.reshape(imageTransp,[1,H*blockSize,W*blockSize,-1])\n\tsummary = tf.summary.image(tag,imageBlocks)\n\treturn summary\n\n# restore model\ndef restoreModelFromIt(opt,sess,saver,it):\n\tsaver.restore(sess,\"models_{0}/{1}_it{2}.ckpt\".format(opt.group,opt.model,it))\n# restore model\ndef restoreModel(opt,sess,saver):\n\tsaver.restore(sess,\"models_{0}/{1}.ckpt\".format(opt.group,opt.load))\n# save model\ndef saveModel(opt,sess,saver,it):\n\tsaver.save(sess,\"models_{0}/{1}_it{2}.ckpt\".format(opt.group,opt.model,it))\n\n", "import os,sys,time\r\nimport bpy\r\nimport numpy as np\r\nimport shutil\r\nimport scipy.io\r\n\r\ncurpath = os.path.abspath(os.path.dirname(\".\"))\r\nsys.path.insert(0,curpath)\r\nimport util\r\n\r\n# usage: blender blank.blend -b -P render_fixed.py -- SHAPENETPATH CATEGORY MODEL_LIST RESOLUTION FIXED\r\n\r\n# redirect output to log file\r\nlogfile = \"/tmp/blender_render.log\"\r\n\r\nSHAPENETPATH = sys.argv[-5]\r\nCATEGORY = sys.argv[-4]\r\nMODEL_LIST = sys.argv[-3]\r\nRESOLUTION = int(sys.argv[-2])\r\nFIXED = int(sys.argv[-1])\r\n\r\nscene,camera,fo = util.setupBlender(\"buffer_fixed\",RESOLUTION)\r\ncamPosAll = util.getFixedViews(FIXED)\r\n\r\nlistFile = open(MODEL_LIST)\r\nfor line in listFile:\r\n\tMODEL = line.strip()\r\n\ttimeStart = time.time()\r\n\ttrans = []\r\n\r\n\tdepth_path = \"output/{1}_depth_fixed{2}/exr_{0}\".format(MODEL,CATEGORY,FIXED)\r\n\tif not os.path.isdir(depth_path):\r\n\t\tos.makedirs(depth_path)\r\n\r\n\t# suppress output\r\n\topen(logfile,\"a\").close()\r\n\told = os.dup(1)\r\n\tsys.stdout.flush()\r\n\tos.close(1)\r\n\tos.open(logfile,os.O_WRONLY)\r\n\r\n\tshape_file = \"{2}/{0}/{1}/models/model_normalized.obj\".format(CATEGORY,MODEL,SHAPENETPATH)\r\n\tbpy.ops.import_scene.obj(filepath=shape_file)\r\n\r\n\tfor m in bpy.data.materials:\r\n\t\tm.use_shadeless = True\r\n\r\n\tfor i in range(FIXED):\r\n\t\ttheta = 0\r\n\t\tcamPos = camPosAll[i]\r\n\t\tq1 = util.camPosToQuaternion(camPos)\r\n\t\tq2 = util.camRotQuaternion(camPos,theta)\r\n\t\tq = util.quaternionProduct(q2,q1)\r\n\r\n\t\tutil.setCameraExtrinsics(camera,camPos,q)\r\n\t\tq_extr,t_extr = util.cameraExtrinsicMatrix(q,camPos)\r\n\r\n\t\t# for ShapeNetCore.v2 all the objects are rotated 90 degrees\r\n\t\t# comment out this block if ShapeNetCore.v1 is used\r\n\t\tif i==0:\r\n\t\t\tfor o in bpy.data.objects:\r\n\t\t\t\tif o==camera: o.select = False\r\n\t\t\t\telse: o.select = True\r\n\t\t\tbpy.ops.transform.rotate(value=-np.pi/2,axis=(0,0,1))\r\n\r\n\t\tbpy.ops.render.render(write_still=False)\r\n\r\n\t\tshutil.copyfile(\"{0}/Z0001.exr\".format(fo.base_path),\r\n\t\t\t\t\t\t\"{0}/{1}.exr\".format(depth_path,i))\r\n\t\ttrans.append(np.array(q_extr))\r\n\r\n\t# show output\r\n\tos.close(1)\r\n\tos.dup(old)\r\n\tos.close(old)\r\n\r\n\t# clean up\r\n\tfor o in bpy.data.objects:\r\n\t\tif o==camera: continue\r\n\t\to.select = True\r\n\tbpy.ops.object.delete()\r\n\tfor m in bpy.data.meshes:\r\n\t\tbpy.data.meshes.remove(m)\r\n\tfor m in bpy.data.materials:\r\n\t m.user_clear()\r\n\t bpy.data.materials.remove(m)\r\n\r\n\tprint(\"{1} done, time={0:.4f} sec\".format(time.time()-timeStart,MODEL))\r\n\r\ntrans = np.array(trans,dtype=np.float32)\r\nnp.save(\"output/trans_fuse{0}.npy\".format(FIXED),trans)\r\n" ]
[ [ "tensorflow.transpose", "tensorflow.range", "tensorflow.shape", "tensorflow.summary.image", "tensorflow.reduce_sum", "tensorflow.reshape", "tensorflow.batch_to_space", "tensorflow.tile", "tensorflow.argmin" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tripathiaakash/OCTIS
[ "1fb85f42020dd53cd3b3c7e5bfab4907ee47d8e8" ]
[ "octis/dashboard/server.py" ]
[ "import argparse\nimport webbrowser\nimport octis.dashboard.frameworkScanner as fs\nimport octis.configuration.defaults as defaults\nfrom multiprocessing import Process, Pool\nimport json\nfrom flask import Flask, render_template, request, send_file\nimport tkinter as tk\nimport pandas as pd\nimport numpy as np\nfrom tkinter import filedialog\nimport os\n\n\napp = Flask(__name__)\nqueueManager = \"\"\n\n\[email protected]('/downloadSingleExp',\n methods=['GET'])\ndef downloadSingleExp():\n \"\"\"\n Download the report of the given experiment\n\n :erturn: file with the report\n :rtype: File\n \"\"\"\n experimentId = request.args.get(\"experimentId\")\n batchId = request.args.get(\"batchId\")\n\n paused = False\n if (experimentId + batchId) == queueManager.running:\n paused = True\n queueManager.pause()\n\n expPath = \"\"\n createdPath = os.path.join(\n queueManager.getExperiment(batchId, experimentId)[\"path\"],\n experimentId,\n experimentId)\n jsonReport = {}\n if os.path.isfile(createdPath+\".json\"):\n expPath = createdPath\n\n with open(expPath+\".json\") as p:\n jsonReport = json.load(p)\n\n info = queueManager.getExperimentInfo(batchId, experimentId)\n\n n_row = info[\"current_iteration\"]\n n_extra_metrics = len(jsonReport[\"extra_metric_names\"])\n\n df = pd.DataFrame()\n df['dataset'] = [jsonReport[\"dataset_name\"]] * n_row\n df['surrogate model'] = [jsonReport[\"surrogate_model\"]] * n_row\n df['acquisition function'] = [jsonReport[\"acq_func\"]] * n_row\n df['num_iteration'] = [i for i in range(n_row)]\n df['time'] = [jsonReport['time_eval'][i] for i in range(n_row)]\n df['Median(model_runs)'] = [np.median(\n jsonReport['dict_model_runs'][jsonReport['metric_name']]['iteration_' + str(i)]) for i in range(n_row)]\n df['Mean(model_runs)'] = [np.mean(\n jsonReport['dict_model_runs'][jsonReport['metric_name']]['iteration_' + str(i)]) for i in range(n_row)]\n df['Standard_Deviation(model_runs)'] = [np.std(\n jsonReport['dict_model_runs'][jsonReport['metric_name']]['iteration_' + str(i)]) for i in range(n_row)]\n\n for hyperparameter in list(jsonReport[\"x_iters\"]):\n df[hyperparameter] = jsonReport[\"x_iters\"][hyperparameter][0:n_row]\n\n for metric, i in zip(jsonReport[\"extra_metric_names\"], range(n_extra_metrics)):\n df[metric + '(median, not optimized)'] = [np.median(\n jsonReport[\"dict_model_runs\"][metric]['iteration_' + str(i)]) for i in range(n_row)]\n\n df[metric + '(Mean, not optimized)'] = [np.mean(\n jsonReport[\"dict_model_runs\"][metric]['iteration_' + str(i)]) for i in range(n_row)]\n\n df[metric + '(Standard_Deviation, not optimized)'] = [np.std(\n jsonReport[\"dict_model_runs\"][metric]['iteration_' + str(i)]) for i in range(n_row)]\n\n name_file = expPath + \".csv\"\n\n df.to_csv(name_file, index=False, na_rep='Unkown')\n\n if paused:\n queueManager.start()\n\n return send_file(expPath+\".csv\",\n mimetype=\"text/csv\",\n attachment_filename=\"report.csv\",\n as_attachment=True)\n\n\n@ app.route(\"/selectPath\", methods=['POST'])\ndef selectPath():\n \"\"\"\n Select a path from the server and return it to the page\n\n :return: path\n :rtype: Dict\n \"\"\"\n window = tk.Tk()\n path = filedialog.askdirectory()\n window.destroy()\n return {\"path\": path}\n\n\n@ app.route(\"/serverClosed\")\ndef serverClosed():\n \"\"\"\n Reroute to the serverClosed page before server shutdown\n\n :return: template\n :rtype: render template\n \"\"\"\n return render_template(\"serverClosed.html\")\n\n\n@ app.route(\"/shutdown\")\ndef shutdown():\n \"\"\"\n Save the state of the QueueManager and perform server shutdown\n\n :return: Ack signal\n :rtype: Dict\n \"\"\"\n queueManager.stop()\n shutdown_server()\n return {\"DONE\": \"YES\"}\n\n\n@ app.route('/')\ndef home():\n \"\"\"\n Return the octis landing page\n\n :return: template\n :rtype: render template\n \"\"\"\n return render_template(\"index.html\")\n\n\n@ app.route('/startExperiment', methods=['POST'])\ndef startExperiment():\n \"\"\"\n Add a new experiment to the queue\n\n :return: template\n :rtype: render template\n \"\"\"\n data = request.form.to_dict(flat=False)\n batch = data[\"batchId\"][0]\n experimentId = data[\"expId\"][0]\n if queueManager.getExperiment(batch, experimentId):\n return VisualizeExperiments()\n\n expParams = dict()\n expParams[\"partitioning\"] = (\"partitioning\" in data)\n expParams[\"path\"] = data[\"path\"][0]\n expParams[\"dataset\"] = data[\"dataset\"][0]\n expParams[\"model\"] = {\"name\": data[\"model\"][0]}\n expParams[\"optimization\"] = {\n \"iterations\": typed(data[\"iterations\"][0]),\n \"model_runs\": typed(data[\"runs\"][0]),\n \"surrogate_model\": data[\"surrogateModel\"][0],\n \"n_random_starts\": typed(data[\"n_random_starts\"][0]),\n \"acquisition_function\": data[\"acquisitionFunction\"][0],\n \"search_spaces\": {}\n }\n expParams[\"optimize_metrics\"] = []\n expParams[\"track_metrics\"] = []\n\n model_parameters_to_optimize = []\n\n for key, value in data.items():\n if \"_check\" in key:\n model_parameters_to_optimize.append(key.replace(\"_check\", ''))\n\n for key, value in data.items():\n if \"model.\" in key:\n if any(par in key for par in model_parameters_to_optimize):\n if \"_xminx\" in key:\n name = key.replace(\"_xminx\", '').replace(\"model.\", '')\n if name not in expParams[\"optimization\"][\"search_spaces\"]:\n expParams[\"optimization\"][\"search_spaces\"][name] = {}\n expParams[\"optimization\"][\"search_spaces\"][name][\"low\"] = typed(\n value[0])\n elif \"_xmaxx\" in key:\n name = key.replace(\"_xmaxx\", '').replace(\"model.\", '')\n if name not in expParams[\"optimization\"][\"search_spaces\"]:\n expParams[\"optimization\"][\"search_spaces\"][name] = {}\n expParams[\"optimization\"][\"search_spaces\"][name][\"high\"] = typed(\n value[0])\n elif \"_check\" not in key:\n expParams[\"optimization\"][\"search_spaces\"][key.replace(\n \"model.\", '')] = request.form.getlist(key)\n else:\n if \"name\" in key:\n expParams[\"model\"][key.replace(\"model.\", '')] = value[0]\n else:\n if \"parameters\" not in expParams[\"model\"]:\n expParams[\"model\"][\"parameters\"] = {}\n expParams[\"model\"][\"parameters\"][key.replace(\n \"model.\", '')] = typed(value[0])\n\n if \"metric.\" in key:\n optimize = True\n metric = {\"name\": key.replace(\"metric.\", ''), \"parameters\": {}}\n for singleValue in value:\n\n for key, content in json.loads(singleValue).items():\n if key != \"metric\" and key != \"type\":\n metric[\"parameters\"][key] = typed(content)\n\n if key == \"type\" and content == \"track\":\n optimize = False\n if optimize:\n expParams[\"optimize_metrics\"].append(metric)\n else:\n expParams[\"track_metrics\"].append(metric)\n\n\n\n if expParams[\"optimize_metrics\"][0][\"name\"]==\"F1Score\" and not expParams[\"partitioning\"]:\n return VisualizeExperiments()\n for trackedMetric in expParams[\"track_metrics\"]:\n if trackedMetric[\"name\"] == \"F1Score\" and not expParams[\"partitioning\"]:\n return VisualizeExperiments()\n\n print(expParams)\n\n queueManager.add_experiment(batch, experimentId, expParams)\n return CreateExperiments()\n\n\n@ app.route(\"/getBatchExperiments\", methods=['POST'])\ndef getBatchExperiments():\n \"\"\"\n return the information related to the experiments of a batch\n\n :return: informations of the experiment\n :rtype: Dict\n \"\"\"\n data = request.json['data']\n experiments = []\n for key in data:\n batch_experiments = queueManager.getBatchExperiments(key)\n for experiment in batch_experiments:\n new_exp = experiment\n new_exp[\"optimization_data\"] = queueManager.getExperimentInfo(\n experiment[\"batchId\"],\n experiment[\"experimentId\"])\n experiments.append(new_exp)\n return json.dumps(experiments)\n\n\n@ app.route('/CreateExperiments')\ndef CreateExperiments():\n \"\"\"\n Serve the experiment creation page\n\n :return: template\n :rtype: render template\n \"\"\"\n models = defaults.model_hyperparameters\n models_descriptions = defaults.model_descriptions\n datasets = fs.scanDatasets()\n metrics = defaults.metric_parameters\n optimization = defaults.optimization_parameters\n return render_template(\"CreateExperiments.html\", datasets=datasets, models=models, metrics=metrics,\n optimization=optimization, models_descriptions=models_descriptions)\n\n\n@ app.route('/VisualizeExperiments')\ndef VisualizeExperiments():\n \"\"\"\n Serve the experiments visualization page\n\n :return: template\n :rtype: render template\n \"\"\"\n batch_names = queueManager.getBatchNames()\n return render_template(\"VisualizeExperiments.html\",\n batchNames=batch_names)\n\n\n@ app.route('/ManageExperiments')\ndef ManageExperiments():\n \"\"\"\n Serve the ManageExperiments page\n\n :return: template\n :rtype: render template\n \"\"\"\n exp_list = queueManager.getToRun()\n for exp in exp_list:\n exp_info = queueManager.getExperimentInfo(\n exp_list[exp][\"batchId\"], exp_list[exp][\"experimentId\"])\n if exp_info is not None:\n exp_list[exp].update(exp_info)\n order = queueManager.getOrder()\n running = queueManager.getRunning()\n return render_template(\"ManageExperiments.html\", order=order, experiments=exp_list, running=running)\n\n\n@ app.route(\"/pauseExp\", methods=[\"POST\"])\ndef pauseExp():\n \"\"\"\n Pause the current experiment\n\n :return: ack signal\n :rtype: Dict\n \"\"\"\n queueManager.pause()\n return {\"DONE\": \"YES\"}\n\n\n@ app.route(\"/startExp\", methods=[\"POST\"])\ndef startExp():\n \"\"\"\n Start the next experiment in the queue\n\n :return: ack signal\n :rtype: Dict\n \"\"\"\n print(queueManager.getRunning())\n if queueManager.getRunning() == None:\n queueManager.next()\n return {\"DONE\": \"YES\"}\n\n\n@ app.route(\"/deleteExp\", methods=[\"POST\"])\ndef deleteExp():\n \"\"\"\n Delete the selected experiment from the queue\n\n :return: ack signal\n :rtype: Dict\n \"\"\"\n data = request.json['data']\n print(queueManager.getRunning())\n if queueManager.getRunning() is not None and queueManager.getRunning() == data:\n queueManager.pause()\n queueManager.deleteFromOrder(data)\n else:\n queueManager.deleteFromOrder(data)\n return {\"DONE\": \"YES\"}\n\n\n@ app.route(\"/updateOrder\", methods=[\"POST\"])\ndef updateOrder():\n \"\"\"\n Update the order of the experiments in the queue\n\n :return: ack signal\n :rtype: Dict\n \"\"\"\n data = request.json['data']\n queueManager.editOrder(data)\n return {\"DONE\": \"YES\"}\n\n\n@ app.route(\"/getDocPreview\", methods=[\"POST\"])\ndef getDocPreview():\n \"\"\"\n Returns the first 40 words of the selected document\n\n :return: first 40 words of the document\n :rtype: Dict\n \"\"\"\n data = request.json['data']\n return json.dumps({\"doc\": fs.getDocPreview(data[\"dataset\"], int(data[\"document\"]))})\n\n\n@ app.route('/SingleExperiment/<batch>/<exp_id>')\ndef SingleExperiment(batch=\"\", exp_id=\"\"):\n \"\"\"\n Serve the single experiment page\n\n :return: template\n :rtype: render template\n \"\"\"\n models = defaults.model_hyperparameters\n output = queueManager.getModel(batch, exp_id, 0, 0)\n global_info = queueManager.getExperimentInfo(batch, exp_id)\n iter_info = queueManager.getExperimentIterationInfo(batch, exp_id, 0)\n exp_info = queueManager.getExperiment(batch, exp_id)\n exp_ids = queueManager.getAllExpIds()\n vocabulary_path = os.path.join(exp_info[\"path\"],\n exp_info[\"experimentId\"],\n \"models\",\n \"vocabulary.json\")\n vocabulary = fs.getVocabulary(vocabulary_path)\n\n return render_template(\"SingleExperiment.html\", batchName=batch, experimentName=exp_id,\n output=output, globalInfo=global_info, iterationInfo=iter_info,\n expInfo=exp_info, expIds=exp_ids, datasetMetadata=fs.getDatasetMetadata(\n exp_info[\"dataset\"]), vocabulary=vocabulary, models=models)\n\n\n@ app.route(\"/getIterationData\", methods=[\"POST\"])\ndef getIterationData():\n \"\"\"\n Return data of a single iteration and model run of an experiment\n\n :return: data of a single iteration and model run of an experiment\n :rtype: Dict\n \"\"\"\n data = request.json['data']\n output = queueManager.getModel(data[\"batchId\"], data[\"experimentId\"],\n int(data[\"iteration\"]), data[\"model_run\"])\n iter_info = queueManager.getExperimentIterationInfo(data[\"batchId\"], data[\"experimentId\"],\n int(data[\"iteration\"]))\n return {\"iterInfo\": iter_info, \"output\": output}\n\n\ndef typed(value):\n \"\"\"\n Handles typing of data\n\n :param value: value to cast\n :type value: *\n\n :raises ValueError: cannot cast data\n\n :return: data with the right type\n :rtype: *\n \"\"\"\n try:\n t = int(value)\n return t\n except ValueError:\n try:\n t = float(value)\n return t\n except ValueError:\n return value\n\n\ndef shutdown_server():\n \"\"\"\n Perform server shutdown\n\n :raise RuntimeError: wrong server environment used\n \"\"\"\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n\n\nif __name__ == '__main__':\n \"\"\"\n Initialize the server\n \"\"\"\n from octis.dashboard.queueManager import QueueManager\n\n queueManager = QueueManager()\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--port\", type=int, help=\"port\", default=5000)\n parser.add_argument(\"--host\", type=str, help=\"host\", default='localhost')\n\n args = parser.parse_args()\n\n url = 'http://' + str(args.host) + ':' + str(args.port)\n webbrowser.open_new(url)\n app.run(port=args.port)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
jensengroup/elementary_step_om
[ "7ae7e5226f6be1f3ace3e3886a0284c4f8923ee9" ]
[ "elementary_step_om/io/io_gaussian.py" ]
[ "import numpy as np\n\ndef read_gaussian_out(content, property='energy'):\n \"\"\"Reads gaussian output file\n \n - quantity = 'structure' - final structure form output.\n - quantity = 'atomic_numbers' - atmoic numbers\n - quantity = 'energy' - final energy from output.\n - quantity = 'frequencies' - frequencies if freq is called\n - quantity = 'intensities' - freqency intensities\n - quantity = 'normal_coordinates' - freqency normal coordinates\n \"\"\"\n \n if property == 'structure':\n return read_structures(content)\n\n if property == 'irc_structure':\n return read_irc_structures(content)\n \n elif property == 'atomic_numbers':\n return read_atomic_numbers(content)\n\n elif property == 'energy':\n return read_energy(content)\n \n elif property == 'energy_external':\n return read_energy_external(content)\n \n elif property == 'frequencies':\n return read_frequencies(content)\n\n elif property == 'intensities':\n return read_intensities(content)\n \n elif property == 'normal_coordinates':\n return read_normal_coordinates(content)\n\n elif property == 'converged':\n return read_converged(content)\n\n\ndef read_energy(content):\n \"\"\"Read electronic energy \"\"\"\n for lines in content.split(\"\\n\"): \n if \"E(\" in lines:\n energy = float(lines.split()[4])\n return energy\n\n\ndef read_energy_external(content):\n \"\"\" Read energy from external \"\"\"\n for line in content.split(\"\\n\"):\n if \"Zero-point correction=\" in line:\n break\n if \"Energy=\" in line and not \"Predicted change in Energy\" in line:\n energy = float(line.split()[1])\n return energy\n\n\ndef read_structures(content):\n \"\"\"Read optimised structure from content. \"\"\"\n\n # only interested in opt structure, hence -1.\n temp_items = content.split('Standard orientation')[1:] \n \n for item_i in temp_items:\n lines = [ line for line in item_i.split('\\n') if len(line) > 0]\n\n #first 5 lines are headers\n del lines[:5]\n \n atom_positions = [] \n for line in lines:\n line = line.strip() \n \n #if only - in line it is the end\n if set(line).issubset(set(['-', ' '])):\n break\n \n tmp_line = line.split()\n if not len(tmp_line) == 6:\n raise RuntimeError('Length of line does not match structure!')\n \n # read atoms and positions:\n try:\n atom_position = list(map(float, tmp_line[3:]))\n except:\n raise ValueError('Expected a line with three integers and three floats.')\n \n atom_positions.append(atom_position)\n \n return np.asarray(atom_positions, dtype=float)\n\n\ndef read_irc_structures(content):\n \"\"\" \"\"\"\n # only interested in opt structure, hence -1.\n temp_items = content.split('Input orientation:')[1:] \n \n for item_i in temp_items:\n lines = [ line for line in item_i.split('\\n') if len(line) > 0]\n\n #first 5 lines are headers\n del lines[:5]\n \n atom_positions = [] \n for line in lines:\n line = line.strip() \n \n #if only - in line it is the end\n if set(line).issubset(set(['-', ' '])):\n break\n \n tmp_line = line.split()\n if not len(tmp_line) == 6:\n raise RuntimeError('Length of line does not match structure!')\n \n # read atoms and positions:\n try:\n atom_position = list(map(float, tmp_line[3:]))\n except:\n raise ValueError('Expected a line with three integers and three floats.')\n \n atom_positions.append(atom_position)\n \n return np.asarray(atom_positions, dtype=float)\n\n\ndef read_atomic_numbers(content):\n \"\"\"Read optimised structure from content. \"\"\"\n\n # only interested in opt structure, hence -1.\n temp_items = content.split('Standard orientation')[1:] \n \n for item_i in temp_items:\n lines = [ line for line in item_i.split('\\n') if len(line) > 0]\n \n #first 5 lines are headers\n del lines[:5]\n \n atom_nums = []\n\n for line in lines:\n line = line.strip() \n \n #if only - in line it is the end\n if set(line).issubset(set(['-', ' '])):\n break\n \n tmp_line = line.split()\n if not len(tmp_line) == 6:\n raise RuntimeError('Length of line does not match structure!')\n \n atom_n = int(tmp_line[1])\n \n atom_nums.append(atom_n)\n \n return atom_nums\n\n\ndef read_frequencies(content):\n \"\"\"Read frequencies and IR intensities\"\"\"\n \n frequencies = []\n\n freq_block = content.split('and normal coordinates:')[-1]\n freq_block = [ line for line in freq_block.split('\\n') if len(line) > 0]\n \n for line in freq_block:\n line = line.strip() \n\n #if only - in line it is the end\n if set(line).issubset(set('-')):\n break\n \n if 'Frequencies' in line:\n frequencies += list(map(float, line.split()[2:]))\n \n return frequencies\n\n\ndef read_intensities(content):\n \"\"\"Read frequencies and IR intensities\"\"\"\n \n intensities = []\n\n freq_block = content.split('and normal coordinates:')[-1]\n freq_block = [ line for line in freq_block.split('\\n') if len(line) > 0]\n \n for line in freq_block:\n line = line.strip() \n\n #if only - in line it is the end\n if set(line).issubset(set('-')):\n break\n \n if 'IR Inten' in line:\n intensities += list(map(float, line.split()[3:]))\n \n return intensities\n\ndef read_normal_coordinates(content):\n \"\"\"Read normal coordinates from frequency calculation.\"\"\"\n \n normal_coordinates = []\n \n # two freq blocks, but only need last one.\n freq_block = content.split('and normal coordinates:')[-1] \n temp_freq_blocks = freq_block.split('Frequencies --')[1:]\n\n for block in temp_freq_blocks:\n lines = [ line for line in block.split('\\n') if len(line) > 0]\n \n #first 5 lines are headers\n del lines[:5]\n \n # 3 normal coordinates per line. \n freq1_coords = []\n freq2_coords = []\n freq3_coords = []\n\n for line in lines:\n line = line.strip().split()\n \n # if not at least 3 normal coords (min len 9) then end.\n if len(line) < 5:\n break\n\n freq1_coords.append(line[2:5])\n freq2_coords.append(line[5:8])\n freq3_coords.append(line[8:11])\n \n normal_coordinates += [freq1_coords, freq2_coords, freq3_coords]\n \n return normal_coordinates\n\n\ndef read_converged(content):\n \"\"\"Check if calculation terminated correctly\"\"\"\n if \"Normal termination of Gaussian\" in content.strip().split(\"\\n\")[-1]:\n return True\n return False\n\n\nif __name__ == \"__main__\":\n import sys\n\n with open(sys.argv[1], 'r') as t:\n content = t.read()\n \n print(read_irc_structures(content))" ]
[ [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
smartguo/mars
[ "5fdd6e2d520fcdc3b7441379e0abaf0e07c6212a" ]
[ "mars/learn/cluster/_k_means_init.py" ]
[ "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes\nfrom ... import tensor as mt\nfrom ...operands import OutputType, OperandStage\nfrom ...serialize import KeyField, Int32Field, TupleField\nfrom ...tensor.array_utils import as_same_device, device\nfrom ...tensor.core import TensorOrder\nfrom ...tensor.random.core import _on_serialize_random_state, \\\n _on_deserialize_random_state\nfrom ...tiles import TilesError\nfrom ...utils import recursive_tile, check_chunks_unknown_shape\nfrom ..metrics import euclidean_distances\nfrom ..operands import LearnOperand, LearnOperandMixin\n\n\ndef _kmeans_plus_plus_init(X,\n x_squared_norms,\n random_state,\n n_clusters: int,\n n_local_trials: int = None):\n n_samples, n_features = X.shape\n\n centers = mt.empty((n_clusters, n_features), dtype=X.dtype)\n\n assert x_squared_norms is not None, 'x_squared_norms None in _k_init'\n\n # Set the number of local seeding trials if none is given\n if n_local_trials is None:\n # This is what Arthur/Vassilvitskii tried, but did not report\n # specific results for other than mentioning in the conclusion\n # that it helped.\n n_local_trials = 2 + int(np.log(n_clusters))\n\n # Pick first center randomly\n center_id = random_state.randint(n_samples)\n if X.issparse(): # pragma: no cover\n centers[0] = X[center_id].todense()\n else:\n centers[0] = X[center_id]\n\n # Initialize list of closest distances and calculate current potential\n closest_dist_sq = euclidean_distances(\n centers[0, mt.newaxis], X, Y_norm_squared=x_squared_norms,\n squared=True)\n current_pot = closest_dist_sq.sum()\n\n # Pick the remaining n_clusters-1 points\n for c in range(1, n_clusters):\n # Choose center candidates by sampling with probability proportional\n # to the squared distance to the closest existing center\n rand_vals = random_state.random_sample(n_local_trials) * current_pot\n candidate_ids = mt.searchsorted(closest_dist_sq.cumsum(),\n rand_vals)\n # XXX: numerical imprecision can result in a candidate_id out of range\n candidate_ids = mt.clip(candidate_ids, None, closest_dist_sq.size - 1)\n\n # Compute distances to center candidates\n distance_to_candidates = euclidean_distances(\n X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)\n\n # update closest distances squared and potential for each candidate\n distance_to_candidates = mt.minimum(closest_dist_sq, distance_to_candidates)\n\n candidates_pot = distance_to_candidates.sum(axis=1)\n\n # Decide which candidate is the best\n best_candidate = mt.argmin(candidates_pot)\n current_pot = candidates_pot[best_candidate]\n closest_dist_sq = distance_to_candidates[best_candidate]\n best_candidate = candidate_ids[best_candidate]\n\n # Permanently add best center candidate found in local tries\n if X.issparse(): # pragma: no cover\n c_center = X[best_candidate].todense()\n else:\n c_center = X[best_candidate]\n\n centers[c] = c_center\n\n return centers\n\n\nclass KMeansPlusPlusInit(LearnOperand, LearnOperandMixin):\n _op_type_ = opcodes.KMEANS_PLUS_PLUS_INIT\n\n _x = KeyField('x')\n _n_clusters = Int32Field('n_clusters')\n _x_squared_norms = KeyField('x_squared_norms')\n _state = TupleField('state', on_serialize=_on_serialize_random_state,\n on_deserialize=_on_deserialize_random_state)\n _n_local_trials = Int32Field('n_local_trials')\n\n def __init__(self, x=None, n_clusters=None, x_squared_norms=None,\n state=None, n_local_trials=None, output_types=None, **kw):\n super().__init__(_x=x, _n_clusters=n_clusters, _x_squared_norms=x_squared_norms,\n _state=state, _n_local_trials=n_local_trials,\n _output_types=output_types, **kw)\n if self._output_types is None:\n self._output_types = [OutputType.tensor]\n\n @property\n def x(self):\n return self._x\n\n @property\n def n_clusters(self):\n return self._n_clusters\n\n @property\n def x_squared_norms(self):\n return self._x_squared_norms\n\n @property\n def state(self):\n return self._state\n\n @property\n def n_local_trials(self):\n return self._n_local_trials\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._x = self._inputs[0]\n self._x_squared_norms = self._inputs[-1]\n\n def __call__(self):\n inputs = [self._x, self._x_squared_norms]\n kw = {\n 'shape': (self._n_clusters, self._x.shape[1]),\n 'dtype': self._x.dtype,\n 'order': TensorOrder.C_ORDER\n }\n return self.new_tileable(inputs, kws=[kw])\n\n @classmethod\n def _tile_one_chunk(cls, op: \"KMeansPlusPlusInit\"):\n out = op.outputs[0]\n\n chunk_op = op.copy().reset_key()\n chunk_kw = out.params.copy()\n chunk_kw['index'] = (0, 0)\n chunk_inputs = [op.x.chunks[0], op.x_squared_norms.chunks[0]]\n chunk = chunk_op.new_chunk(chunk_inputs, kws=[chunk_kw])\n\n kw = out.params\n kw['chunks'] = [chunk]\n kw['nsplits'] = tuple((s,) for s in out.shape)\n new_op = op.copy()\n return new_op.new_tileables(op.inputs, kws=[kw])\n\n @classmethod\n def tile(cls, op: \"KMeansPlusPlusInit\"):\n if len(op.x.chunks) == 1:\n assert len(op.x_squared_norms.chunks) == 1\n return cls._tile_one_chunk(op)\n else:\n return cls._tile_k_init(op)\n\n @classmethod\n def _tile_k_init(cls, op: \"KMeansPlusPlusInit\"):\n X = op.x\n n_clusters = op.n_clusters\n x_squared_norms = op.x_squared_norms\n random_state = op.state\n n_local_trials = op.n_local_trials\n\n centers = _kmeans_plus_plus_init(X, x_squared_norms, random_state,\n n_clusters, n_local_trials)\n return recursive_tile(centers)\n\n @classmethod\n def execute(cls, ctx, op: \"KMeansPlusPlusInit\"):\n try:\n from sklearn.cluster._kmeans import _kmeans_plusplus\n except ImportError: # pragma: no cover\n try:\n from sklearn.cluster._kmeans import _k_init\n except ImportError:\n from sklearn.cluster.k_means_ import _k_init\n\n def _kmeans_plusplus(*args, **kwargs):\n return _k_init(*args, **kwargs), None\n\n (x, x_squared_norms), device_id, _ = as_same_device(\n [ctx[inp.key] for inp in op.inputs], device=op.device, ret_extra=True)\n\n with device(device_id):\n ctx[op.outputs[0].key] = _kmeans_plusplus(\n x, op.n_clusters, x_squared_norms=x_squared_norms, random_state=op.state,\n n_local_trials=op.n_local_trials)[0]\n\n\n###############################################################################\n# Initialization heuristic\n\n\ndef _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):\n \"\"\"Init n_clusters seeds according to k-means++\n\n Parameters\n ----------\n X : array or sparse matrix, shape (n_samples, n_features)\n The data to pick seeds for. To avoid memory copy, the input data\n should be double precision (dtype=np.float64).\n\n n_clusters : integer\n The number of seeds to choose\n\n x_squared_norms : array, shape (n_samples,)\n Squared Euclidean norm of each data point.\n\n random_state : int, RandomState instance\n The generator used to initialize the centers. Use an int to make the\n randomness deterministic.\n See :term:`Glossary <random_state>`.\n\n n_local_trials : integer, optional\n The number of seeding trials for each center (except the first),\n of which the one reducing inertia the most is greedily chosen.\n Set to None to make the number of trials depend logarithmically\n on the number of seeds (2+log(k)); this is the default.\n\n Notes\n -----\n Selects initial cluster centers for k-mean clustering in a smart way\n to speed up convergence. see: Arthur, D. and Vassilvitskii, S.\n \"k-means++: the advantages of careful seeding\". ACM-SIAM symposium\n on Discrete algorithms. 2007\n\n Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,\n which is the implementation used in the aforementioned paper.\n \"\"\"\n op = KMeansPlusPlusInit(x=X, n_clusters=n_clusters, x_squared_norms=x_squared_norms,\n state=random_state, n_local_trials=n_local_trials)\n return op()\n\n\nclass KMeansScalablePlusPlusInit(LearnOperand, LearnOperandMixin):\n _op_type_ = opcodes.KMEANS_SCALABLE_PLUS_PLUS_INIT\n\n _x = KeyField('x')\n _n_clusters = Int32Field('n_clusters')\n _x_squared_norms = KeyField('x_squared_norms')\n _state = TupleField('state', on_serialize=_on_serialize_random_state,\n on_deserialize=_on_deserialize_random_state)\n _init_iter = Int32Field('init_iter')\n _oversampling_factor = Int32Field('oversampling_factor')\n\n def __init__(self, x=None, n_clusters=None, x_squared_norms=None,\n state=None, init_iter=None, oversampling_factor=None,\n output_types=None, stage=None, **kw):\n super().__init__(_x=x, _n_clusters=n_clusters, _x_squared_norms=x_squared_norms,\n _state=state, _init_iter=init_iter,\n _oversampling_factor=oversampling_factor,\n _stage=stage, _output_types=output_types, **kw)\n if self._output_types is None:\n self._output_types = [OutputType.tensor]\n\n @property\n def x(self):\n return self._x\n\n @property\n def n_clusters(self):\n return self._n_clusters\n\n @property\n def x_squared_norms(self):\n return self._x_squared_norms\n\n @property\n def state(self):\n return self._state\n\n @property\n def init_iter(self):\n return self._init_iter\n\n @property\n def oversampling_factor(self):\n return self._oversampling_factor\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n if self._x is not None:\n self._x = self._inputs[0]\n if self._x_squared_norms is not None:\n self._x_squared_norms = self._inputs[-1]\n\n def __call__(self):\n inputs = [self._x, self._x_squared_norms]\n kw = {\n 'shape': (self._n_clusters, self._x.shape[1]),\n 'dtype': self._x.dtype,\n 'order': TensorOrder.C_ORDER\n }\n return self.new_tileable(inputs, kws=[kw])\n\n @classmethod\n def tile(cls, op: \"KMeansScalablePlusPlusInit\"):\n check_chunks_unknown_shape(op.inputs, TilesError)\n\n x = mt.tensor(op.x)\n x_squared_norms = mt.atleast_2d(op.x_squared_norms)\n out = op.outputs[0]\n\n random_state = op.state\n rs = mt.random.RandomState.from_numpy(random_state)\n\n n_samples, n_features = x.shape\n n_clusters = op.n_clusters\n\n # step 1, sample a centroid\n centers = x[random_state.randint(n_samples, size=1)]\n\n for _ in range(op.init_iter):\n distances = euclidean_distances(\n x, centers, X_norm_squared=x_squared_norms, squared=True)\n\n # calculate the cost of data with respect to current centers\n cost = mt.sum(mt.min(distances, axis=1))\n\n # calculate the distribution to sample new centers\n distribution = mt.full(len(distances), 1 / len(distances))\n mt.true_divide(mt.min(distances, axis=1), cost,\n where=cost != 0, out=distribution)\n\n # pick new centers\n new_centers_size = op.oversampling_factor * n_clusters\n new_centers = x[rs.choice(n_samples, new_centers_size, p=distribution)]\n\n centers = mt.concatenate([centers, new_centers])\n\n # rechunk centers into one chunk\n centers = recursive_tile(centers).rechunk(centers.shape)\n\n distances = recursive_tile(euclidean_distances(\n x, centers, X_norm_squared=x_squared_norms, squared=True))\n\n map_index_to_chunks = {}\n # calculate weight for each chunk\n for c in distances.chunks:\n map_chunk_op = KMeansScalablePlusPlusInit(stage=OperandStage.map)\n map_chunk_kw = {\n 'shape': (len(centers),),\n 'dtype': np.dtype(np.int64),\n 'order': TensorOrder.C_ORDER,\n 'index': c.index\n }\n map_chunk = map_chunk_op.new_chunk([c], kws=[map_chunk_kw])\n map_index_to_chunks[c.index] = map_chunk\n\n combine_chunks = []\n for i in range(distances.chunk_shape[0]):\n map_chunks = [map_index_to_chunks[i, j]\n for j in range(distances.chunk_shape[1])]\n combine_chunk_op = KMeansScalablePlusPlusInit(stage=OperandStage.combine)\n combine_chunk_kw = {\n 'shape': (len(centers),),\n 'dtype': np.dtype(np.int64),\n 'order': TensorOrder.C_ORDER,\n 'index': (i,)\n }\n combine_chunk = combine_chunk_op.new_chunk(\n map_chunks, kws=[combine_chunk_kw])\n combine_chunks.append(combine_chunk)\n\n reduce_chunk_op = KMeansScalablePlusPlusInit(n_clusters=op.n_clusters,\n state=random_state,\n stage=OperandStage.reduce)\n reduce_chunk_kw = out.params\n reduce_chunk_kw['index'] = (0, 0)\n reduce_chunk = reduce_chunk_op.new_chunk([centers.chunks[0]] + combine_chunks,\n kws=[reduce_chunk_kw])\n\n new_op = op.copy()\n kw = out.params\n kw['chunks'] = [reduce_chunk]\n kw['nsplits'] = tuple((s,) for s in out.shape)\n return new_op.new_tileables(op.inputs, kws=[kw])\n\n @classmethod\n def _execute_map(cls, ctx, op: \"KMeansScalablePlusPlusInit\"):\n distances = ctx[op.inputs[0].key]\n min_distance_ids = np.argmin(distances, axis=1)\n min_distances = distances[range(len(distances)), min_distance_ids]\n ctx[op.outputs[0].key] = (min_distances, min_distance_ids)\n\n @classmethod\n def _execute_combine(cls, ctx, op: \"KMeansScalablePlusPlusInit\"):\n out = op.outputs[0]\n all_distances, all_min_distance_ids = tuple(zip(*(ctx[inp.key] for inp in op.inputs)))\n distances = np.stack(all_distances).T\n min_distance_ids = np.stack(all_min_distance_ids).T\n\n combined_min_distance_id = np.argmin(distances, axis=1)\n min_distance_ids = min_distance_ids[range(len(distances)), combined_min_distance_id]\n count = np.bincount(min_distance_ids)\n result = np.zeros(out.shape[0], dtype=np.int64)\n result[:len(count)] = count\n ctx[out.key] = result\n\n @classmethod\n def _execute_reduce(cls, ctx, op: \"KMeansScalablePlusPlusInit\"):\n from sklearn.cluster import KMeans\n\n inputs = [ctx[inp.key] for inp in op.inputs]\n\n count = np.zeros(inputs[1].shape[0], dtype=np.int64)\n for inp in inputs[1:]:\n count += inp\n weight = count / count.sum()\n\n centers = inputs[0]\n\n kmeans = KMeans(n_clusters=op.n_clusters, n_init=1,\n random_state=op.state)\n kmeans.fit(centers, sample_weight=weight)\n ctx[op.outputs[0].key] = kmeans.cluster_centers_\n\n @classmethod\n def execute(cls, ctx, op: \"KMeansScalablePlusPlusInit\"):\n if op.stage == OperandStage.map:\n return cls._execute_map(ctx, op)\n elif op.stage == OperandStage.combine:\n return cls._execute_combine(ctx, op)\n else:\n return cls._execute_reduce(ctx, op)\n\n\ndef _scalable_k_init(X, n_clusters, x_squared_norms, random_state,\n oversampling_factor=2, init_iter=5):\n op = KMeansScalablePlusPlusInit(x=X, n_clusters=n_clusters,\n x_squared_norms=x_squared_norms,\n state=random_state, init_iter=init_iter,\n oversampling_factor=oversampling_factor)\n return op()\n" ]
[ [ "numpy.log", "sklearn.cluster.KMeans", "numpy.stack", "numpy.dtype", "sklearn.cluster.k_means_._k_init", "numpy.argmin", "numpy.bincount", "numpy.zeros", "sklearn.cluster._kmeans._kmeans_plusplus" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sammosummo/sammosummo.github.io
[ "afecf92aadccf5a0ee1eda835e32a8dbbff35c7c", "afecf92aadccf5a0ee1eda835e32a8dbbff35c7c" ]
[ "assets/scripts/bsem2.py", "assets/scripts/neals-funnel-a.py" ]
[ "\"\"\"Example of Bayesian confirmatory factor analysis in PyMC3 in which the latent\nvariables are estimated.\n\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport pymc3 as pm\nimport theano.tensor as tt\nimport matplotlib.pyplot as plt\nfrom os.path import exists\n\nfrom matplotlib import rcParams\nfrom pymc3.math import matrix_dot, matrix_inverse\nfrom tabulate import tabulate\n\n\ndef bsem(\n items,\n factors,\n paths,\n beta=0,\n nu_sd=2.5,\n alpha_sd=2.5,\n d_beta=2.5,\n corr_items=False,\n corr_factors=False,\n g_eta=100,\n l_eta=1,\n beta_beta=1,\n):\n r\"\"\"Constructs Bayesian SEM.\n\n Args:\n items (np.array): Array of item data.\n factors (np.array): Factor design.\n paths (np.array): Array of directed factor paths.\n beta (:obj:`float` or `'estimate'`, optional): Standard deviation of normal\n prior on cross loadings. If `'estimate'`, beta is estimated from the data.\n nu_sd (:obj:`float`, optional): Standard deviation of normal prior on item\n intercepts.\n alpha_sd (:obj:`float`, optional): Standard deviation of normal prior on factor\n intercepts.\n d_beta (:obj:`float`, optional): Scale parameter of half-Cauchy prior on factor\n standard deviation.\n corr_factors (:obj:`bool`, optional): Allow correlated factors.\n corr_items (:obj:`bool`, optional): Allow correlated items.\n g_eta (:obj:`float`, optional): Shape parameter of LKJ prior on residual item\n correlation matrix.\n l_eta (:obj:`float`, optional): Shape parameter of LKJ prior on factor\n correlation matrix.\n beta_beta (:obj:`float`, optional): Beta parameter of beta prior on beta.\n\n Returns:\n\n None: Places model in context.\n\n \"\"\"\n # get numbers of cases, items, and factors\n n, p = items.shape\n p_, m = factors.shape\n assert p == p_, \"Mismatch between data and factor-loading matrices\"\n assert paths.shape == (m, m), \"Paths matrix has wrong shape\"\n I = tt.eye(m, m)\n\n # place priors on item and factor intercepts\n nu = pm.Normal(name=r\"$\\nu$\", mu=0, sd=nu_sd, shape=p, testval=items.mean(axis=0))\n alpha = pm.Normal(name=r\"$\\alpha$\", mu=0, sd=alpha_sd, shape=m, testval=np.zeros(m))\n\n # place priors on unscaled factor loadings\n Phi = pm.Normal(name=r\"$\\Phi$\", mu=0, sd=1, shape=factors.shape, testval=factors)\n\n # place priors on paths\n B = tt.zeros(paths.shape)\n npths = np.sum(paths, axis=None)\n print(npths)\n if npths > 0:\n b = pm.Normal(name=r\"$b$\", mu=0, sd=1, shape=npths, testval=np.ones(npths))\n # create the paths matrix\n k = 0\n for i in range(m):\n for j in range(m):\n if paths[i, j] == 1:\n B = tt.set_subtensor(B[i, j], b[k])\n k += 1\n Gamma = pm.Deterministic(\"$\\Gamma$\", B)\n\n # create masking matrix for factor loadings\n if isinstance(beta, str):\n assert beta == \"estimate\", f\"Don't know what to do with '{beta}'\"\n beta = pm.Beta(name=r\"$\\beta$\", alpha=1, beta=beta_beta, testval=0.1)\n M = (1 - np.asarray(factors)) * beta + np.asarray(factors)\n\n # create scaled factor loadings\n Lambda = pm.Deterministic(r\"$\\Lambda$\", Phi * M)\n\n # determine item means\n mu = nu + matrix_dot(Lambda, alpha)\n\n # place priors on item standard deviations\n D = pm.HalfCauchy(name=r\"$D$\", beta=d_beta, shape=p, testval=items.std(axis=0))\n\n # place priors on item correlations\n f = pm.Lognormal.dist(sd=0.25)\n if not corr_items:\n Omega = np.eye(p)\n else:\n G = pm.LKJCholeskyCov(name=r\"$G$\", eta=g_eta, n=p, sd_dist=f)\n ch1 = pm.expand_packed_triangular(p, G, lower=True)\n K = tt.dot(ch1, ch1.T)\n sd1 = tt.sqrt(tt.diag(K))\n Omega = pm.Deterministic(r\"$\\Omega$\", K / sd1[:, None] / sd1[None, :])\n\n # determine residual item variances and covariances\n Theta = pm.Deterministic(r\"$\\Theta$\", D[None, :] * Omega * D[:, None])\n\n # place priors on factor correlations\n if not corr_factors:\n Psi = np.eye(m)\n else:\n L = pm.LKJCholeskyCov(name=r\"$L$\", eta=l_eta, n=m, sd_dist=f)\n ch = pm.expand_packed_triangular(m, L, lower=True)\n Gamma = tt.dot(ch, ch.T)\n sd = tt.sqrt(tt.diag(Gamma))\n Psi = pm.Deterministic(r\"$\\Psi$\", Gamma / sd[:, None] / sd[None, :])\n\n # determine variances and covariances of items\n A = matrix_inverse(I - Gamma)\n C = matrix_inverse(I - Gamma.T)\n Sigma = matrix_dot(Lambda, A, Psi, C, Lambda.T) + Theta\n\n # place priors on observations\n pm.MvNormal(name=\"$Y$\", mu=mu, cov=Sigma, observed=items, shape=items.shape)\n\n\ndef main():\n\n # load the data\n df = pd.read_csv(\"../../assets/data/HS.csv\", index_col=0)\n\n # define items to keep\n item_names = [\n \"visual\",\n \"cubes\",\n \"paper\",\n \"flags\",\n \"general\",\n \"paragrap\",\n \"sentence\",\n \"wordc\",\n \"wordm\",\n \"addition\",\n \"code\",\n \"counting\",\n \"straight\",\n \"wordr\",\n \"numberr\",\n \"figurer\",\n \"object\",\n \"numberf\",\n \"figurew\",\n ]\n\n # define the factor structure\n factors = np.array(\n [\n [1, 0, 0, 0, 0],\n [1, 0, 0, 0, 0],\n [1, 0, 0, 0, 0],\n [1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 1, 0],\n ]\n )\n\n paths = np.array(\n [\n [0, 0, 0, 0, 1],\n [0, 0, 0, 0, 1],\n [0, 0, 0, 0, 1],\n [0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0],\n ]\n )\n\n # iterate over the two schools\n for school, sdf in df.groupby(\"school\"):\n\n # define the path to save results\n f = f\"../data/BSEM examples/{school}\"\n\n # select the 19 commonly used variables\n items = sdf[item_names]\n\n # for numerical convenience, standardize the data\n items = (items - items.mean()) / items.std()\n\n with pm.Model():\n\n # construct the model\n bsem(items, factors, paths)\n\n if not exists(f):\n\n # sample and save\n trace = pm.sample(chains=2) # 19000, tune=1000,\n pm.save_trace(trace, f)\n\n else:\n\n trace = pm.load_trace(f)\n\n pm.traceplot(trace, compact=True)\n rcParams[\"font.size\"] = 14\n plt.savefig(f\"{f}/traceplot.png\")\n\n # create a nice summary table\n loadings = pd.DataFrame(\n trace[r\"$\\Lambda$\"].mean(axis=0).round(3),\n index=[v.title() for v in item_names],\n columns=[\"Spatial\", \"Verbal\", \"Speed\", \"Memory\", \"g\"],\n )\n loadings.to_csv(f\"{f}/loadings.csv\")\n print(tabulate(loadings, tablefmt=\"pipe\", headers=\"keys\"))\n #\n # # correlations = pd.DataFrame(\n # # trace[r\"$\\Psi$\"].mean(axis=0).round(3),\n # # index=[\"Spatial\", \"Verbal\", \"Speed\", \"Memory\", \"g\"],\n # # columns=[\"Spatial\", \"Verbal\", \"Speed\", \"Memory\", \"g\"],\n # # )\n # # correlations.to_csv(f\"{f}/factor_correlations.csv\")\n #\n _paths = pd.DataFrame(\n trace[r\"$\\Gamma$\"].mean(axis=0).round(3),\n index=[\"Spatial\", \"Verbal\", \"Speed\", \"Memory\", \"g\"],\n columns=[\"Spatial\", \"Verbal\", \"Speed\", \"Memory\", \"g\"],\n )\n _paths.to_csv(f\"{f}/factor_paths.csv\")\n print(tabulate(_paths, tablefmt=\"pipe\", headers=\"keys\"))\n #\n # correlations = pd.DataFrame(\n # trace[r\"$\\Omega$\"].mean(axis=0).round(3),\n # index=item_names,\n # columns=item_names,\n # )\n # correlations.to_csv(f\"{f}/item_correlations.csv\")\n\n\nif __name__ == \"__main__\":\n main()\n", "\"\"\"Generate data from Neal's funnel distribution.\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nfrom scipy.stats import norm\n\n\ndef main():\n\n # set up figure\n fs = rcParams[\"figure.figsize\"]\n rcParams[\"figure.figsize\"] = (fs[0], fs[0] / 2)\n rcParams[\"lines.linewidth\"] = 2\n rcParams[\"font.size\"] = 14\n\n # generate data\n np.random.seed(0)\n k = 9\n n = 10000\n v = norm.rvs(0, 3, n)\n x = norm.rvs(0, np.exp(v / 2), (k, n))\n\n # plot data\n fig, axes = plt.subplots(1, 2, constrained_layout=True)\n ax = axes[0]\n ax.scatter(x[0], v, marker=\".\", alpha=0.05, rasterized=True)\n ax.set_xlim(-20, 20)\n ax.set_ylim(-9, 9)\n ax.set_xlabel(\"$x_0$\")\n ax.set_ylabel(\"$v$\")\n\n # plot analytic log-likelihood\n ax = axes[1]\n r = 500\n x, v = np.meshgrid(np.linspace(-20, 20, r), np.linspace(-9, 9, r))\n logp = norm.logpdf(v, 0, 3) + norm.logpdf(x, 0, np.exp(v / 2))\n ax.imshow(logp, vmin=-7.5, vmax=-2.5, cmap=\"viridis\", origin=\"lower\")\n ax.set_yticks([])\n ax.set_yticklabels([])\n ax.set_xticks(np.linspace(0, 499, 5))\n ax.set_xticklabels(np.linspace(-20, 20, 5).astype(int))\n ax.set_xlabel(\"$x_0$\")\n\n # save\n plt.savefig(\"../images/neals-funnel-a.svg\", bbox_inches=0, transparent=True)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.read_csv", "numpy.asarray", "numpy.eye", "matplotlib.pyplot.savefig", "numpy.ones", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.random.seed", "numpy.linspace", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "scipy.stats.norm.logpdf", "scipy.stats.norm.rvs", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DrJones142/Sepsis2019
[ "c11344e3ee311ff15d9f752de1606b833ebdca3d", "c11344e3ee311ff15d9f752de1606b833ebdca3d" ]
[ "evaluation-2019-master/evaluate_sepsis_score.py", "get_sepsis_score.py" ]
[ "#!/usr/bin/env python\n\n# This file contains functions for evaluating algorithms for the 2019 PhysioNet/\n# CinC Challenge. You can run it as follows:\n#\n# python evaluate_sepsis_score.py labels predictions scores.psv\n#\n# where 'labels' is a directory containing files with labels, 'predictions' is a\n# directory containing files with predictions, and 'scores.psv' (optional) is a\n# collection of scores for the predictions.\n\n################################################################################\n\n# The evaluate_scores function computes a normalized utility score for a cohort\n# of patients along with several traditional scoring metrics.\n#\n# Inputs:\n# 'label_directory' is a directory of pipe-delimited text files containing a\n# binary vector of labels for whether a patient is not septic (0) or septic\n# (1) for each time interval.\n#\n# 'prediction_directory' is a directory of pipe-delimited text files, where\n# the first column of the file gives the predicted probability that the\n# patient is septic at each time, and the second column of the file is a\n# binarized version of this vector. Note that there must be a prediction for\n# every label.\n#\n# Outputs:\n# 'auroc' is the area under the receiver operating characteristic curve\n# (AUROC).\n#\n# 'auprc' is the area under the precision recall curve (AUPRC).\n#\n# 'accuracy' is accuracy.\n#\n# 'f_measure' is F-measure.\n#\n# 'normalized_observed_utility' is a normalized utility-based measure that we\n# created for the Challenge. This score is normalized so that a perfect score\n# is 1 and no positive predictions is 0.\n#\n# Example:\n# Omitted due to length. See the below examples.\n\nimport numpy as np, os, os.path, sys, warnings\n\ndef evaluate_sepsis_score(label_directory, prediction_directory):\n # Set parameters.\n label_header = 'SepsisLabel'\n prediction_header = 'PredictedLabel'\n probability_header = 'PredictedProbability'\n\n dt_early = -12\n dt_optimal = -6\n dt_late = 3\n\n max_u_tp = 1\n min_u_fn = -2\n u_fp = -0.05\n u_tn = 0\n\n # Find label and prediction files.\n label_files = []\n for f in os.listdir(label_directory):\n g = os.path.join(label_directory, f)\n if os.path.isfile(g) and not f.lower().startswith('.') and f.lower().endswith('psv'):\n label_files.append(g)\n label_files = sorted(label_files)\n\n prediction_files = []\n for f in os.listdir(prediction_directory):\n g = os.path.join(prediction_directory, f)\n if os.path.isfile(g) and not f.lower().startswith('.') and f.lower().endswith('psv'):\n prediction_files.append(g)\n prediction_files = sorted(prediction_files)\n\n if len(label_files) != len(prediction_files):\n raise Exception('Numbers of label and prediction files must be the same.')\n\n # Load labels and predictions.\n num_files = len(label_files)\n cohort_labels = []\n cohort_predictions = []\n cohort_probabilities = []\n\n for k in range(num_files):\n labels = load_column(label_files[k], label_header, '|')\n predictions = load_column(prediction_files[k], prediction_header, '|')\n probabilities = load_column(prediction_files[k], probability_header, '|')\n\n # Check labels and predictions for errors.\n if not (len(labels) == len(predictions) and len(predictions) == len(probabilities)):\n raise Exception('Numbers of labels and predictions for a file must be the same.')\n\n num_rows = len(labels)\n\n for i in range(num_rows):\n if labels[i] not in (0, 1):\n raise Exception('Labels must satisfy label == 0 or label == 1.')\n\n if predictions[i] not in (0, 1):\n raise Exception('Predictions must satisfy prediction == 0 or prediction == 1.')\n\n if not 0 <= probabilities[i] <= 1:\n warnings.warn('Probabilities do not satisfy 0 <= probability <= 1.')\n\n if 0 < np.sum(predictions) < num_rows:\n min_probability_positive = np.min(probabilities[predictions == 1])\n max_probability_negative = np.max(probabilities[predictions == 0])\n\n if min_probability_positive <= max_probability_negative:\n warnings.warn('Predictions are inconsistent with probabilities, i.e., a positive prediction has a lower (or equal) probability than a negative prediction.')\n\n # Record labels and predictions.\n cohort_labels.append(labels)\n cohort_predictions.append(predictions)\n cohort_probabilities.append(probabilities)\n\n # Compute AUC, accuracy, and F-measure.\n labels = np.concatenate(cohort_labels)\n predictions = np.concatenate(cohort_predictions)\n probabilities = np.concatenate(cohort_probabilities)\n\n auroc, auprc = compute_auc(labels, probabilities)\n accuracy, f_measure = compute_accuracy_f_measure(labels, predictions)\n\n # Compute utility.\n observed_utilities = np.zeros(num_files)\n best_utilities = np.zeros(num_files)\n worst_utilities = np.zeros(num_files)\n inaction_utilities = np.zeros(num_files)\n\n for k in range(num_files):\n labels = cohort_labels[k]\n num_rows = len(labels)\n observed_predictions = cohort_predictions[k]\n best_predictions = np.zeros(num_rows)\n worst_predictions = np.zeros(num_rows)\n inaction_predictions = np.zeros(num_rows)\n\n if np.any(labels):\n t_sepsis = np.argmax(labels) - dt_optimal\n best_predictions[max(0, t_sepsis + dt_early) : min(t_sepsis + dt_late + 1, num_rows)] = 1\n worst_predictions = 1 - best_predictions\n\n observed_utilities[k] = compute_prediction_utility(labels, observed_predictions, dt_early, dt_optimal, dt_late, max_u_tp, min_u_fn, u_fp, u_tn)\n best_utilities[k] = compute_prediction_utility(labels, best_predictions, dt_early, dt_optimal, dt_late, max_u_tp, min_u_fn, u_fp, u_tn)\n worst_utilities[k] = compute_prediction_utility(labels, worst_predictions, dt_early, dt_optimal, dt_late, max_u_tp, min_u_fn, u_fp, u_tn)\n inaction_utilities[k] = compute_prediction_utility(labels, inaction_predictions, dt_early, dt_optimal, dt_late, max_u_tp, min_u_fn, u_fp, u_tn)\n\n unnormalized_observed_utility = np.sum(observed_utilities)\n unnormalized_best_utility = np.sum(best_utilities)\n unnormalized_worst_utility = np.sum(worst_utilities)\n unnormalized_inaction_utility = np.sum(inaction_utilities)\n\n normalized_observed_utility = (unnormalized_observed_utility - unnormalized_inaction_utility) / (unnormalized_best_utility - unnormalized_inaction_utility)\n\n return auroc, auprc, accuracy, f_measure, normalized_observed_utility\n\n# The load_column function loads a column from a table.\n#\n# Inputs:\n# 'filename' is a string containing a filename.\n#\n# 'header' is a string containing a header.\n#\n# Outputs:\n# 'column' is a vector containing a column from the file with the given\n# header.\n#\n# Example:\n# Omitted.\n\ndef load_column(filename, header, delimiter):\n column = []\n with open(filename, 'r') as f:\n for i, l in enumerate(f):\n arrs = l.strip().split(delimiter)\n if i == 0:\n try:\n j = arrs.index(header)\n except:\n raise Exception('{} must contain column with header {} containing numerical entries.'.format(filename, header))\n else:\n if len(arrs[j]):\n column.append(float(arrs[j]))\n return np.array(column)\n\n# The compute_auc function computes AUROC and AUPRC as well as other summary\n# statistics (TP, FP, FN, TN, TPR, TNR, PPV, NPV, etc.) that can be exposed\n# from this function.\n#\n# Inputs:\n# 'labels' is a binary vector, where labels[i] == 0 if the patient is not\n# labeled as septic at time i and labels[i] == 1 if the patient is labeled as\n# septic at time i.\n#\n# 'predictions' is a probability vector, where predictions[i] gives the\n# predicted probability that the patient is septic at time i. Note that there\n# must be a prediction for every label, i.e, len(labels) ==\n# len(predictions).\n#\n# Outputs:\n# 'auroc' is a scalar that gives the AUROC of the algorithm using its\n# predicted probabilities, where specificity is interpolated for intermediate\n# sensitivity values.\n#\n# 'auprc' is a scalar that gives the AUPRC of the algorithm using its\n# predicted probabilities, where precision is a piecewise constant function of\n# recall.\n#\n# Example:\n# In [1]: labels = [0, 0, 0, 0, 1, 1]\n# In [2]: predictions = [0.3, 0.4, 0.6, 0.7, 0.8, 0.8]\n# In [3]: auroc, auprc = compute_auc(labels, predictions)\n# In [4]: auroc\n# Out[4]: 1.0\n# In [5]: auprc\n# Out[5]: 1.0\n\ndef compute_auc(labels, predictions, check_errors=True):\n # Check inputs for errors.\n if check_errors:\n if len(predictions) != len(labels):\n raise Exception('Numbers of predictions and labels must be the same.')\n\n for label in labels:\n if not label in (0, 1):\n raise Exception('Labels must satisfy label == 0 or label == 1.')\n\n for prediction in predictions:\n if not 0 <= prediction <= 1:\n warnings.warn('Predictions do not satisfy 0 <= prediction <= 1.')\n\n # Find prediction thresholds.\n thresholds = np.unique(predictions)[::-1]\n if thresholds[0] != 1:\n thresholds = np.insert(thresholds, 0, 1)\n if thresholds[-1] == 0:\n thresholds = thresholds[:-1]\n\n n = len(labels)\n m = len(thresholds)\n\n # Populate contingency table across prediction thresholds.\n tp = np.zeros(m)\n fp = np.zeros(m)\n fn = np.zeros(m)\n tn = np.zeros(m)\n\n # Find indices that sort the predicted probabilities from largest to\n # smallest.\n idx = np.argsort(predictions)[::-1]\n\n i = 0\n for j in range(m):\n # Initialize contingency table for j-th prediction threshold.\n if j == 0:\n tp[j] = 0\n fp[j] = 0\n fn[j] = np.sum(labels)\n tn[j] = n - fn[j]\n else:\n tp[j] = tp[j - 1]\n fp[j] = fp[j - 1]\n fn[j] = fn[j - 1]\n tn[j] = tn[j - 1]\n\n # Update contingency table for i-th largest predicted probability.\n while i < n and predictions[idx[i]] >= thresholds[j]:\n if labels[idx[i]]:\n tp[j] += 1\n fn[j] -= 1\n else:\n fp[j] += 1\n tn[j] -= 1\n i += 1\n\n # Summarize contingency table.\n tpr = np.zeros(m)\n tnr = np.zeros(m)\n ppv = np.zeros(m)\n npv = np.zeros(m)\n\n for j in range(m):\n if tp[j] + fn[j]:\n tpr[j] = tp[j] / (tp[j] + fn[j])\n else:\n tpr[j] = 1\n if fp[j] + tn[j]:\n tnr[j] = tn[j] / (fp[j] + tn[j])\n else:\n tnr[j] = 1\n if tp[j] + fp[j]:\n ppv[j] = tp[j] / (tp[j] + fp[j])\n else:\n ppv[j] = 1\n if fn[j] + tn[j]:\n npv[j] = tn[j] / (fn[j] + tn[j])\n else:\n npv[j] = 1\n\n # Compute AUROC as the area under a piecewise linear function with TPR /\n # sensitivity (x-axis) and TNR / specificity (y-axis) and AUPRC as the area\n # under a piecewise constant with TPR / recall (x-axis) and PPV / precision\n # (y-axis).\n auroc = 0\n auprc = 0\n for j in range(m-1):\n auroc += 0.5 * (tpr[j + 1] - tpr[j]) * (tnr[j + 1] + tnr[j])\n auprc += (tpr[j + 1] - tpr[j]) * ppv[j + 1]\n\n return auroc, auprc\n\n# The compute_accuracy_f_measure function computes the accuracy and F-measure\n# for a patient.\n#\n# Inputs:\n# 'labels' is a binary vector, where labels[i] == 0 if the patient is not\n# labeled as septic at time i and labels[i] == 1 if the patient is labeled as\n# septic at time i.\n#\n# 'predictions' is a binary vector, where predictions[i] == 0 if the patient\n# is not predicted to be septic at time i and predictions[i] == 1 if the\n# patient is predicted to be septic at time i. Note that there must be a\n# prediction for every label, i.e, len(labels) == len(predictions).\n#\n# Output:\n# 'accuracy' is a scalar that gives the accuracy of the predictions using its\n# binarized predictions.\n#\n# 'f_measure' is a scalar that gives the F-measure of the predictions using its\n# binarized predictions.\n#\n# Example:\n# In [1]: labels = [0, 0, 0, 0, 1, 1]\n# In [2]: predictions = [0, 0, 1, 1, 1, 1]\n# In [3]: accuracy, f_measure = compute_accuracy_f_measure(labels, predictions)\n# In [4]: accuracy\n# Out[4]: 0.666666666667\n# In [5]: f_measure\n# Out[5]: 0.666666666667\n\ndef compute_accuracy_f_measure(labels, predictions, check_errors=True):\n # Check inputs for errors.\n if check_errors:\n if len(predictions) != len(labels):\n raise Exception('Numbers of predictions and labels must be the same.')\n\n for label in labels:\n if not label in (0, 1):\n raise Exception('Labels must satisfy label == 0 or label == 1.')\n\n for prediction in predictions:\n if not prediction in (0, 1):\n raise Exception('Predictions must satisfy prediction == 0 or prediction == 1.')\n\n # Populate contingency table.\n n = len(labels)\n tp = 0\n fp = 0\n fn = 0\n tn = 0\n\n for i in range(n):\n if labels[i] and predictions[i]:\n tp += 1\n elif not labels[i] and predictions[i]:\n fp += 1\n elif labels[i] and not predictions[i]:\n fn += 1\n elif not labels[i] and not predictions[i]:\n tn += 1\n\n # Summarize contingency table.\n if tp + fp + fn + tn:\n accuracy = float(tp + tn) / float(tp + fp + fn + tn)\n else:\n accuracy = 1.0\n\n if 2 * tp + fp + fn:\n f_measure = float(2 * tp) / float(2 * tp + fp + fn)\n else:\n f_measure = 1.0\n\n return accuracy, f_measure\n\n# The compute_prediction_utility function computes the total time-dependent\n# utility for a patient.\n#\n# Inputs:\n# 'labels' is a binary vector, where labels[i] == 0 if the patient is not\n# labeled as septic at time i and labels[i] == 1 if the patient is labeled as\n# septic at time i.\n#\n# 'predictions' is a binary vector, where predictions[i] == 0 if the patient\n# is not predicted to be septic at time i and predictions[i] == 1 if the\n# patient is predicted to be septic at time i. Note that there must be a\n# prediction for every label, i.e, len(labels) == len(predictions).\n#\n# Output:\n# 'utility' is a scalar that gives the total time-dependent utility of the\n# algorithm using its binarized predictions.\n#\n# Example:\n# In [1]: labels = [0, 0, 0, 0, 1, 1]\n# In [2]: predictions = [0, 0, 1, 1, 1, 1]\n# In [3]: utility = compute_prediction_utility(labels, predictions)\n# In [4]: utility\n# Out[4]: 3.388888888888889\n\ndef compute_prediction_utility(labels, predictions, dt_early=-12, dt_optimal=-6, dt_late=3.0, max_u_tp=1, min_u_fn=-2, u_fp=-0.05, u_tn=0, check_errors=True):\n # Check inputs for errors.\n if check_errors:\n if len(predictions) != len(labels):\n raise Exception('Numbers of predictions and labels must be the same.')\n\n for label in labels:\n if not label in (0, 1):\n raise Exception('Labels must satisfy label == 0 or label == 1.')\n\n for prediction in predictions:\n if not prediction in (0, 1):\n raise Exception('Predictions must satisfy prediction == 0 or prediction == 1.')\n\n if dt_early >= dt_optimal:\n raise Exception('The earliest beneficial time for predictions must be before the optimal time.')\n\n if dt_optimal >= dt_late:\n raise Exception('The optimal time for predictions must be before the latest beneficial time.')\n\n # Does the patient eventually have sepsis?\n if np.any(labels):\n is_septic = True\n t_sepsis = np.argmax(labels) - dt_optimal\n else:\n is_septic = False\n t_sepsis = float('inf')\n\n n = len(labels)\n\n # Define slopes and intercept points for utility functions of the form\n # u = m * t + b.\n m_1 = float(max_u_tp) / float(dt_optimal - dt_early)\n b_1 = -m_1 * dt_early\n m_2 = float(-max_u_tp) / float(dt_late - dt_optimal)\n b_2 = -m_2 * dt_late\n m_3 = float(min_u_fn) / float(dt_late - dt_optimal)\n b_3 = -m_3 * dt_optimal\n\n # Compare predicted and true conditions.\n u = np.zeros(n)\n for t in range(n):\n if t <= t_sepsis + dt_late:\n # TP\n if is_septic and predictions[t]:\n if t <= t_sepsis + dt_optimal:\n u[t] = max(m_1 * (t - t_sepsis) + b_1, u_fp)\n elif t <= t_sepsis + dt_late:\n u[t] = m_2 * (t - t_sepsis) + b_2\n # FP\n elif not is_septic and predictions[t]:\n u[t] = u_fp\n # FN\n elif is_septic and not predictions[t]:\n if t <= t_sepsis + dt_optimal:\n u[t] = 0\n elif t <= t_sepsis + dt_late:\n u[t] = m_3 * (t - t_sepsis) + b_3\n # TN\n elif not is_septic and not predictions[t]:\n u[t] = u_tn\n\n # Find total utility for patient.\n return np.sum(u)\n\nif __name__ == '__main__':\n auroc, auprc, accuracy, f_measure, utility = evaluate_sepsis_score(sys.argv[1], sys.argv[2])\n\n output_string = 'AUROC|AUPRC|Accuracy|F-measure|Utility\\n{}|{}|{}|{}|{}'.format(auroc, auprc, accuracy, f_measure, utility)\n if len(sys.argv) > 3:\n with open(sys.argv[3], 'w') as f:\n f.write(output_string)\n else:\n print(output_string)\n", "#new version as of June 10 2019\r\nfrom sklearn.decomposition import PCA\r\nfrom tensorflow import keras\r\nimport numpy as np\r\nimport functions\r\n\r\ndef load_sepsis_model():\r\n stacked_train1 = np.load(\"stacked_train1.npy\")\r\n stacked_train2 = np.load(\"stacked_train2.npy\")\r\n stacked_train3 = np.load(\"stacked_train3.npy\")\r\n stacked_train4 = np.load(\"stacked_train4.npy\")\r\n stacked_train = np.vstack((stacked_train1, stacked_train2))\r\n stacked_train = np.vstack((stacked_train, stacked_train3))\r\n stacked_train = np.vstack((stacked_train, stacked_train3))\r\n stacked_train = np.vstack((stacked_train, stacked_train4))\r\n \r\n pca = PCA(n_components=10)\r\n pca.fit(stacked_train)\r\n \r\n model1 = keras.models.load_model('my_model.h5')\r\n model = (model1, pca)\r\n return(model)\r\n \r\ndef get_sepsis_score(current_data, model):\r\n\r\n model1 = model[0]\r\n pca = model[1]\r\n \r\n test_patient, QSOFA = functions.hour_by_hour(current_data)\r\n \r\n #PCA requires a 2D array. This bit ensures that if it is the first hour, then the patient will have 2D\r\n if test_patient.size == 40:\r\n test_patient = np.vstack((test_patient, test_patient))\r\n QSOFA = np.vstack((QSOFA, QSOFA))\r\n pca_test = pca.transform(test_patient)\r\n pca_test = np.hstack((pca_test, QSOFA))\r\n else:\r\n pca_test = pca.transform(test_patient)\r\n pca_test = np.hstack((pca_test, QSOFA))\r\n \r\n output=model1.predict(pca_test[-2:-1,:])\r\n \r\n if output[-1,1] >= .1:\r\n current_score = output[-1,1]\r\n current_label = 1\r\n else:\r\n current_score = output[-1,1]\r\n current_label = 0\r\n \r\n return(current_score, current_label)\r\n\r\n" ]
[ [ "numpy.unique", "numpy.min", "numpy.concatenate", "numpy.max", "numpy.argmax", "numpy.any", "numpy.insert", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "tensorflow.keras.models.load_model", "numpy.hstack", "numpy.load", "sklearn.decomposition.PCA", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
emeraldsrs/tensorflow-for-poets-2
[ "355578ac26097e5cee5873ef2bcea165ed4539b3" ]
[ "scripts/label_image.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\ndef load_graph(model_file):\n graph = tf.Graph()\n graph_def = tf.GraphDef()\n\n with open(model_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n with graph.as_default():\n tf.import_graph_def(graph_def)\n\n return graph\n\ndef read_tensor_from_image_file(file_name, input_height=299, input_width=299,\n\t\t\t\tinput_mean=0, input_std=255):\n input_name = \"file_reader\"\n output_name = \"normalized\"\n file_reader = tf.read_file(file_name, input_name)\n if file_name.endswith(\".png\"):\n image_reader = tf.image.decode_png(file_reader, channels = 3,\n name='png_reader')\n elif file_name.endswith(\".gif\"):\n image_reader = tf.squeeze(tf.image.decode_gif(file_reader,\n name='gif_reader'))\n elif file_name.endswith(\".bmp\"):\n image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')\n else:\n image_reader = tf.image.decode_jpeg(file_reader, channels = 3,\n name='jpeg_reader')\n float_caster = tf.cast(image_reader, tf.float32)\n dims_expander = tf.expand_dims(float_caster, 0);\n resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])\n normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])\n sess = tf.Session()\n result = sess.run(normalized)\n\n return result\n\ndef load_labels(label_file):\n label = []\n proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()\n for l in proto_as_ascii_lines:\n label.append(l.rstrip())\n return label\n\nif __name__ == \"__main__\":\n file_name = \"tf_files/flower_photos/daisy/3475870145_685a19116d.jpg\"\n model_file = \"tf_files/retrained_graph.pb\"\n label_file = \"tf_files/retrained_labels.txt\"\n input_height = 299\n input_width = 299\n input_mean = 0\n input_std = 255\n input_layer = \"Mul\"\n output_layer = \"final_result\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--image\", help=\"image to be processed\")\n parser.add_argument(\"--graph\", help=\"graph/model to be executed\")\n parser.add_argument(\"--labels\", help=\"name of file containing labels\")\n parser.add_argument(\"--input_height\", type=int, help=\"input height\")\n parser.add_argument(\"--input_width\", type=int, help=\"input width\")\n parser.add_argument(\"--input_mean\", type=int, help=\"input mean\")\n parser.add_argument(\"--input_std\", type=int, help=\"input std\")\n parser.add_argument(\"--input_layer\", help=\"name of input layer\")\n parser.add_argument(\"--output_layer\", help=\"name of output layer\")\n args = parser.parse_args()\n\n if args.graph:\n model_file = args.graph\n if args.image:\n file_name = args.image\n if args.labels:\n label_file = args.labels\n if args.input_height:\n input_height = args.input_height\n if args.input_width:\n input_width = args.input_width\n if args.input_mean:\n input_mean = args.input_mean\n if args.input_std:\n input_std = args.input_std\n if args.input_layer:\n input_layer = args.input_layer\n if args.output_layer:\n output_layer = args.output_layer\n\n graph = load_graph(model_file)\n t = read_tensor_from_image_file(file_name,\n input_height=input_height,\n input_width=input_width,\n input_mean=input_mean,\n input_std=input_std)\n\n input_name = \"import/\" + input_layer\n output_name = \"import/\" + output_layer\n input_operation = graph.get_operation_by_name(input_name);\n output_operation = graph.get_operation_by_name(output_name);\n\n with tf.Session(graph=graph) as sess:\n start = time.time()\n results = sess.run(output_operation.outputs[0],\n {input_operation.outputs[0]: t})\n end=time.time()\n results = np.squeeze(results)\n\n top_k = results.argsort()[-5:][::-1]\n labels = load_labels(label_file)\n\n print('\\nEvaluation time (1-image): {:.3f}s\\n'.format(end-start))\n template = \"{} (score={:0.5f})\"\n for i in top_k:\n print(template.format(labels[i], results[i]))\n" ]
[ [ "tensorflow.Graph", "tensorflow.image.resize_bilinear", "tensorflow.import_graph_def", "tensorflow.read_file", "tensorflow.gfile.GFile", "numpy.squeeze", "tensorflow.cast", "tensorflow.image.decode_png", "tensorflow.expand_dims", "tensorflow.image.decode_bmp", "tensorflow.subtract", "tensorflow.image.decode_gif", "tensorflow.Session", "tensorflow.GraphDef", "tensorflow.image.decode_jpeg" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Zhendong-Wang/arsm_image_captioning
[ "2282b76ab03b53952269d94d6c4b19ab98636ca5" ]
[ "misc/loss_wrapper.py" ]
[ "import torch\nimport misc.utils as utils\nimport numpy as np\nfrom misc.rewards import init_scorer, get_self_critical_reward, get_arsk_loss_cuda\n\nclass LossWrapper(torch.nn.Module):\n def __init__(self, model, opt):\n super(LossWrapper, self).__init__()\n self.opt = opt\n self.model = model\n if opt.label_smoothing > 0:\n self.crit = utils.LabelSmoothing(smoothing=opt.label_smoothing)\n else:\n self.crit = utils.LanguageModelCriterion()\n self.rl_crit = utils.RewardCriterion()\n\n def forward(self, fc_feats, att_feats, labels, masks, att_masks, gts, gt_indices,\n sc_flag, use_arm=False):\n out = {}\n if use_arm:\n gts = [gts[_] for _ in gt_indices.tolist()]\n loss = get_arsk_loss_cuda(self.model, fc_feats, att_feats, labels, att_masks, gts)\n #out['reward'] = torch.ones(4).mean()\n elif not sc_flag:\n loss = self.crit(self.model(fc_feats, att_feats, labels, att_masks), labels[:,1:], masks[:,1:])\n else:\n gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks, opt={'sample_max':0}, mode='sample')\n gts = [gts[_] for _ in gt_indices.tolist()]\n reward = get_self_critical_reward(self.model, fc_feats, att_feats, att_masks, gts, gen_result, self.opt)\n reward = torch.from_numpy(reward).float().to(gen_result.device)\n loss = self.rl_crit(sample_logprobs, gen_result.data, reward)\n out['reward'] = reward[:,0].mean()\n out['loss'] = loss\n\n return out\n" ]
[ [ "torch.from_numpy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JonasFrey96/RPOSE
[ "7da77499ab777ce7ee37b731541982870da8d40b" ]
[ "src/common/visu/visualizer.py" ]
[ "import os\nimport random\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\n\nfrom PIL import Image, ImageDraw\nfrom scipy.spatial.transform import Rotation as R\nimport copy\nimport cv2\nimport io\nfrom matplotlib import cm\n\nimport math\nfrom math import pi\nimport imageio\nfrom skimage.morphology import convex_hull_image\nfrom PIL import Image, ImageDraw\nfrom skimage import measure\nfrom skimage.segmentation import mark_boundaries\n\n\nfrom src.common.ycb.ycb_helper import BoundingBox\nfrom .flow_viz import flow_to_image\n\n__all__ = [\"Visualizer\"]\n\n\ndef image_functionality(func):\n def wrap(*args, **kwargs):\n log = False\n if kwargs.get(\"method\", \"def\") == \"def\":\n img = func(*args, **kwargs)\n log = True\n elif kwargs.get(\"method\", \"def\") == \"left\":\n kwargs_clone = copy.deepcopy(kwargs)\n kwargs_clone[\"store\"] = False\n kwargs_clone[\"jupyter\"] = False\n res = func(*args, **kwargs_clone)\n args[0]._storage_left = res\n elif kwargs.get(\"method\", \"def\") == \"right\":\n kwargs_clone = copy.deepcopy(kwargs)\n kwargs_clone[\"store\"] = False\n kwargs_clone[\"jupyter\"] = False\n res = func(*args, **kwargs_clone)\n args[0]._storage_right = res\n\n if args[0]._storage_right is not None and args[0]._storage_left is not None:\n img = np.concatenate([args[0]._storage_left, args[0]._storage_right], axis=1)\n args[0]._storage_left = None\n args[0]._storage_right = None\n log = True\n\n log *= not kwargs.get(\"not_log\", False)\n\n if log:\n log_exp = args[0].logger is not None\n tag = kwargs.get(\"tag\", \"TagNotDefined\")\n jupyter = kwargs.get(\"jupyter\", False)\n # Each logging call is able to override the setting that is stored in the visualizer\n if kwargs.get(\"store\", None) is not None:\n store = kwargs[\"store\"]\n else:\n store = args[0]._store\n\n if kwargs.get(\"epoch\", None) is not None:\n epoch = kwargs[\"epoch\"]\n else:\n epoch = args[0]._epoch\n\n # Store & Log & Display in Jupyter\n if store:\n p = os.path.join(args[0].p_visu, f\"{epoch:06d}_{tag}.png\")\n imageio.imwrite(p, img)\n\n if log_exp:\n H, W, C = img.shape\n ds = cv2.resize(\n img, dsize=(int(W / 2), int(H / 2)), interpolation=cv2.INTER_CUBIC\n )\n if args[0].logger is not None:\n try:\n # logger == neptuneai\n args[0].logger.log_image(\n log_name=tag, image=np.float32(ds) / 255, step=epoch\n )\n except:\n try:\n from neptune.new.types import File\n\n args[0].logger[tag].log(File.as_image(np.float32(ds) / 255))\n except:\n try:\n # logger == tensorboard\n args[0].logger.experiment.add_image(\n tag=tag, img_tensor=ds, global_step=epoch, dataformats=\"HWC\"\n )\n except:\n print(\"Tensorboard Logging and Neptune Logging failed !!!\")\n pass\n\n if jupyter:\n display(Image.fromarray(img))\n\n return func(*args, **kwargs)\n\n return wrap\n\n\ndef backproject_points(p, fx, fy, cx, cy):\n \"\"\"\n p.shape = (nr_points,xyz)\n \"\"\"\n # true_divide\n u = torch.round((torch.div(p[:, 0], p[:, 2]) * fx) + cx)\n v = torch.round((torch.div(p[:, 1], p[:, 2]) * fy) + cy)\n\n if torch.isnan(u).any() or torch.isnan(v).any():\n u = torch.tensor(cx).unsqueeze(0)\n v = torch.tensor(cy).unsqueeze(0)\n print(\"Predicted z=0 for translation. u=cx, v=cy\")\n # raise Exception\n\n return torch.stack([v, u]).T\n\n\ndef get_img_from_fig(fig, dpi=180):\n buf = io.BytesIO()\n fig.savefig(buf, format=\"png\", dpi=dpi)\n buf.seek(0)\n img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)\n buf.close()\n img = cv2.imdecode(img_arr, 1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n return img\n\n\nclass Visualizer:\n def __init__(self, p_visu, logger=None, num_classes=20, epoch=0, store=True):\n self.p_visu = p_visu\n self.logger = logger\n\n if not os.path.exists(self.p_visu):\n os.makedirs(self.p_visu)\n\n self._epoch = epoch\n self._store = store\n\n jet = cm.get_cmap(\"jet\")\n self.SEG_COLORS = (\n np.stack([jet(v) for v in np.linspace(0, 1, num_classes)]) * 255\n ).astype(np.uint8)\n self.SEG_COLORS_BINARY = (\n np.stack([jet(v) for v in np.linspace(0, 1, 2)]) * 255\n ).astype(np.uint8)\n\n self._flow_scale = 1000\n Nc = int(np.math.pi * 2 * self._flow_scale)\n cmap = plt.cm.get_cmap(\"hsv\", Nc)\n self._flow_cmap = [cmap(i) for i in range(cmap.N)]\n\n self._storage_left = None\n self._storage_right = None\n\n class DotDict(dict):\n \"\"\"dot.notation access to dictionary attributes\"\"\"\n\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n self._meta_data = {\n \"stuff_classes\": [\"Invalid\", \"Valid\"],\n \"stuff_colors\": [[0, 166, 237], [127, 184, 0]],\n }\n self._meta_data = DotDict(self._meta_data)\n\n @property\n def epoch(self):\n return self._epoch\n\n @epoch.setter\n def epoch(self, epoch):\n self._epoch = epoch\n\n @property\n def store(self):\n return self._store\n\n @store.setter\n def store(self, store):\n self._store = store\n\n @image_functionality\n def plot_detectron(self, img, label, text_off=False, **kwargs):\n img = self.plot_image(img, not_log=True)\n try:\n label = label.clone().cpu().numpy()\n except:\n pass\n label = label.astype(np.long)\n\n H, W, C = img.shape\n uni = np.unique(label)\n overlay = np.zeros_like(img)\n\n centers = []\n for u in uni:\n m = label == u\n col = self._meta_data[\"stuff_colors\"][u]\n overlay[m] = col\n labels_mask = measure.label(m)\n regions = measure.regionprops(labels_mask)\n regions.sort(key=lambda x: x.area, reverse=True)\n cen = np.mean(regions[0].coords, axis=0).astype(np.uint32)\n centers.append((self._meta_data[\"stuff_classes\"][u], cen))\n\n back = np.zeros((H, W, 4))\n back[:, :, :3] = img\n back[:, :, 3] = 255\n fore = np.zeros((H, W, 4))\n fore[:, :, :3] = overlay\n fore[:, :, 3] = 100\n img_new = Image.alpha_composite(\n Image.fromarray(np.uint8(back)), Image.fromarray(np.uint8(fore))\n )\n if not text_off:\n draw = ImageDraw.Draw(img_new)\n for i in centers:\n draw.text(tuple(i[1]), str(i[0]), fill=(255, 255, 255, 128))\n\n img_new = img_new.convert(\"RGB\")\n mask = mark_boundaries(img_new, label, color=(255, 255, 255))\n mask = mask.sum(axis=2)\n m = mask == mask.max()\n img_new = np.array(img_new)\n img_new[m] = (255, 255, 255)\n return np.uint8(img_new)\n\n # @image_functionality\n # def plot_detectron(self, img, label, **kwargs):\n # # use image function to get imagae is np.array uint8\n # img = self.plot_image( img, not_log=True )\n # try:\n # label = label.clone().cpu().numpy()\n # except:\n # pass\n # label = label.astype(np.long)\n # detectronVisualizer = DetectronVisu( torch.from_numpy(img).type(torch.uint8), self._meta_data, scale=1)\n # out = detectronVisualizer.draw_sem_seg( label, area_threshold=None, alpha=kwargs.get('alpha',0.5)).get_image()\n # return out\n\n @image_functionality\n def plot_image(self, img, **kwargs):\n try:\n img = img.clone().cpu().numpy()\n except:\n pass\n if img.shape[2] == 3 or img.shape[2] == 4:\n pass\n elif img.shape[0] == 3 or img.shape[0] == 4:\n img = np.moveaxis(img, [0, 1, 2], [2, 0, 1])\n else:\n raise Exception(\"Invalid Shape\")\n if img.max() <= 1:\n img = img * 255\n img = np.uint8(img)\n return img\n\n @image_functionality\n def flow_to_gradient(\n self, img, flow, mask, tl=[0, 0], br=[479, 639], *args, **kwargs\n ):\n \"\"\"\n img torch.tensor(h,w,3)\n flow torch.tensor(h,w,2)\n mask torch.tensor(h,w) BOOL\n call with either:\n \"\"\"\n amp = torch.norm(flow, p=2, dim=2)\n amp = amp / (torch.max(amp) + 1.0e-6) # normalize the amplitude\n dir_bin = torch.atan2(flow[:, :, 0], flow[:, :, 1])\n dir_bin[dir_bin < 0] += 2 * np.math.pi\n\n dir_bin *= self._flow_scale\n dir_bin = dir_bin.type(torch.long)\n\n h, w = 480, 640\n arr = np.zeros((h, w, 4), dtype=np.uint8)\n arr2 = np.zeros((h, w, 4), dtype=np.uint8)\n arr_img = np.ones((h, w, 4), dtype=np.uint8) * 255\n arr_img[:, :, :3] = img\n\n u_list = (\n np.uint32(np.linspace(float(tl[0]), float(br[0]), num=h))[:, None]\n .repeat(640, 1)\n .flatten()\n )\n v_list = (\n np.uint32(np.linspace(float(tl[1]), float(br[1]), num=w))[None, :]\n .repeat(480, 0)\n .flatten()\n )\n\n u_org = np.uint32(np.linspace(0, h - 1, num=h))[:, None].repeat(640, 1).flatten()\n v_org = np.uint32(np.linspace(0, w - 1, num=w))[None, :].repeat(480, 0).flatten()\n sel1 = dir_bin.numpy()[u_org, v_org]\n sel1[sel1 > (len(self._flow_cmap) - 1)] = len(self._flow_cmap) - 1\n\n m1 = (u_list < 480) * (u_list > 0) * (v_list < 640) * (u_list > 0)\n u_list = u_list[m1]\n v_list = v_list[m1]\n sel1 = sel1[m1]\n\n arr2[u_list, v_list] = np.uint8((np.array(self._flow_cmap) * 255)[sel1])\n arr = arr2\n mask = mask[:, :, None].repeat(1, 1, 4).type(torch.bool).numpy()\n arr_img[mask] = arr[mask]\n return arr_img[:, :, :3]\n\n def plot_flow(self, flow, **kwargs):\n # flow input HxWx2 or 2xHxW tensor or array, dtype float\n if type(flow) == torch.Tensor:\n if flow.device != \"cpu\":\n flow = flow.cpu()\n flow = flow.numpy()\n\n if flow.shape[0] == 2:\n flow = np.moveaxis(flow, [0, 1, 2], [2, 1, 0])\n flow = flow.astype(np.float32)\n\n img = flow_to_image(flow)\n return self.plot_image(img=img, **kwargs)\n\n @image_functionality\n def plot_translations(self, img, flow, mask, min_points=50, *args, **kwargs):\n \"\"\"\n img torch.tensor(h,w,3)\n flow torch.tensor(h,w,2)\n mask torch.tensor(h,w) BOOL\n \"\"\"\n flow = flow * mask.type(torch.float32)[:, :, None].repeat(1, 1, 2)\n # flow '[+down/up-], [+right/left-]'\n\n def bin_dir_amplitude(flow):\n amp = torch.norm(flow, p=2, dim=2)\n amp = amp / (torch.max(amp) + 1.0e-6) # normalize the amplitude\n dir_bin = torch.atan2(flow[:, :, 0], flow[:, :, 1])\n nr_bins = 8\n bin_rad = 2 * pi / nr_bins\n dir_bin = torch.round(dir_bin / bin_rad) * bin_rad\n return dir_bin, amp\n\n rot_bin, amp = bin_dir_amplitude(flow)\n s = 20\n\n while torch.sum(mask[::s, ::s]) < min_points and s > 1:\n s -= 1\n\n a = 2 if s > 15 else 1\n pil_img = Image.fromarray(img.numpy().astype(np.uint8), \"RGB\")\n draw = ImageDraw.Draw(pil_img)\n txt = f\"\"\"Horizontal, pos right | neg left:\nmax = {torch.max(flow[mask][:,0])}\nmin = {torch.min(flow[mask][:,0])}\nmean = {torch.mean(flow[mask][:,0])}\nVertical, pos down | neg up:\nmax = {torch.max(flow[mask][:,1])}\nmin = {torch.min(flow[mask][:,1])}\nmean = {torch.mean(flow[mask][:,1])}\"\"\"\n draw.text((10, 60), txt, fill=(201, 45, 136, 255))\n col = (0, 255, 0)\n grey = (207, 207, 207)\n for u in range(int(flow.shape[0] / s) - 2):\n u = int(u * s)\n for v in range(int(flow.shape[1] / s) - 2):\n v = int(v * s)\n if mask[u, v] == True:\n du = round(math.cos(rot_bin[u, v])) * s / 2 * amp[u, v]\n dv = round(math.sin(rot_bin[u, v])) * s / 2 * amp[u, v]\n try:\n draw.line([(v, u), (v + dv, u + du)], fill=col, width=2)\n draw.ellipse(\n [(v - a, u - a), (v + a, u + a)], outline=grey, fill=grey, width=2\n )\n except:\n pass\n return np.array(pil_img).astype(np.uint8)\n\n @image_functionality\n def plot_contour(\n self,\n img,\n points,\n K=None,\n H=None,\n cam_cx=0,\n cam_cy=0,\n cam_fx=0,\n cam_fy=0,\n trans=[[0, 0, 0]],\n rot_mat=[[1, 0, 0], [0, 1, 0], [0, 0, 1]],\n thickness=2,\n color=(0, 255, 0),\n *args,\n **kwargs,\n ):\n \"\"\"\n path := != None creats the path and store to it path/tag.png\n img:= original_image, [widht,height,RGB], torch\n points:= points of the object model [length,x,y,z]\n trans: [1,3]\n rot: [3,3]\n \"\"\"\n if K is not None:\n cam_cx = K[0, 2]\n cam_cy = K[1, 2]\n cam_fx = K[0, 0]\n cam_fy = K[1, 1]\n if H is not None:\n rot_mat = H[:3, :3]\n trans = H[:3, 3][None, :]\n if H[3, 3] != 1:\n raise Exception\n if H[3, 0] != 0 or H[3, 1] != 0 or H[3, 2] != 0:\n raise Exception\n\n rot_mat = np.array(rot_mat)\n trans = np.array(trans)\n img_f = copy.deepcopy(img).astype(np.uint8)\n points = np.dot(points, rot_mat.T)\n points = np.add(points, trans[0, :])\n h = img_f.shape[0]\n w = img_f.shape[1]\n acc_array = np.zeros((h, w, 1), dtype=np.uint8)\n\n # project pointcloud onto image\n for i in range(0, points.shape[0]):\n p_x = points[i, 0]\n p_y = points[i, 1]\n p_z = points[i, 2]\n if p_z < 1.0e-4:\n continue\n u = int(((p_x / p_z) * cam_fx) + cam_cx)\n v = int(((p_y / p_z) * cam_fy) + cam_cy)\n try:\n a = 10\n acc_array[v - a : v + a + 1, u - a : u + a + 1, 0] = 1\n except:\n pass\n\n kernel = np.ones((a * 2, a * 2, 1), np.uint8)\n erosion = cv2.erode(acc_array, kernel, iterations=1)\n\n try: # problem cause by different cv2 version > 4.0\n contours, hierarchy = cv2.findContours(\n np.expand_dims(erosion, 2), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE\n )\n except: # version < 4.0\n _, contours, hierarchy = cv2.findContours(\n np.expand_dims(erosion, 2), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE\n )\n out = np.zeros((h, w, 3), dtype=np.uint8)\n cv2.drawContours(out, contours, -1, (0, 255, 0), 3)\n\n for i in range(h):\n for j in range(w):\n if out[i, j, 1] == 255:\n img_f[i, j, :] = out[i, j, :]\n\n return img_f.astype(np.uint8)\n\n @image_functionality\n def plot_segmentation(self, label, *args, **kwargs):\n try:\n label = label.clone().cpu().numpy()\n except:\n pass\n\n if label.dtype == np.bool:\n col_map = self.SEG_COLORS_BINARY\n else:\n col_map = self.SEG_COLORS\n label = label.round()\n\n H, W = label.shape[:2]\n img = np.zeros((H, W, 3), dtype=np.uint8)\n for i, color in enumerate(col_map):\n img[label == i] = color[:3]\n\n return img\n\n @image_functionality\n def plot_convex_hull(\n self, img, points, K, H, color=(0, 255, 0, 255), *args, **kwargs\n ):\n \"\"\"\n img:= original_image, [widht,height,RGB]\n points:= points of the object model [length,x,y,z]\n trans: [1,3]\n rot: [3,3]\n \"\"\"\n try:\n points = points.clone().cpu().numpy()\n except:\n pass\n try:\n H = H.clone().cpu().numpy()\n except:\n pass\n try:\n K = K.clone().cpu().numpy()\n except:\n pass\n\n base_layer = Image.fromarray(copy.deepcopy(img)).convert(\"RGBA\")\n color_layer = Image.new(\"RGBA\", base_layer.size, color=tuple(color[:3]))\n alpha_mask = Image.new(\"L\", base_layer.size, 0)\n\n alpha_mask_draw = ImageDraw.Draw(alpha_mask)\n\n target = points @ H[:3, :3].T + H[:3, 3]\n pixels = np.round(\n ((K @ target.T)[:2, :] / (K @ target.T)[2, :][None, :].repeat(2, 0)).T\n ).astype(np.long)\n _h, _w, _ = img.shape\n m = (\n (pixels[:, 0] >= w)\n * (pixels[:, 1] >= w)\n * (pixels[:, 1] < (_h - w - 1))\n * (pixels[:, 0] < (_w - w - 1))\n )\n pixels = pixels[m]\n arr = np.zeros(img.shape[:2], dtype=np.uint8)\n arr[pixels[:, 1], pixels[:, 0]] = 255\n convex_mask = np.uint8(convex_hull_image(arr)) * color[3]\n alpha_mask = Image.fromarray(convex_mask, mode=\"L\")\n base_layer = np.array(Image.composite(color_layer, base_layer, alpha_mask))\n return base_layer.astype(np.uint8)\n\n @image_functionality\n def plot_estimated_pose(\n self, img, points, K, H, w=2, color=(0, 255, 0, 255), *args, **kwargs\n ):\n \"\"\"\n img:= original_image, [widht,height,RGB]\n points:= points of the object model [length,x,y,z]\n trans: [1,3]\n rot: [3,3]\n \"\"\"\n try:\n points = points.clone().cpu().numpy()\n except:\n pass\n try:\n H = H.clone().cpu().numpy()\n except:\n pass\n try:\n K = K.clone().cpu().numpy()\n except:\n pass\n\n base_layer = Image.fromarray(copy.deepcopy(img)).convert(\"RGBA\")\n color_layer = Image.new(\"RGBA\", base_layer.size, color=tuple(color[:3]))\n alpha_mask = Image.new(\"L\", base_layer.size, 0)\n alpha_mask_draw = ImageDraw.Draw(alpha_mask)\n\n target = points @ H[:3, :3].T + H[:3, 3]\n pixels = np.round(\n ((K @ target.T)[:2, :] / (K @ target.T)[2, :][None, :].repeat(2, 0)).T\n )\n _h, _w, _ = img.shape\n m = (\n (pixels[:, 0] >= w)\n * (pixels[:, 1] >= w)\n * (pixels[:, 1] < (_h - w - 1))\n * (pixels[:, 0] < (_w - w - 1))\n )\n pixels = pixels[m]\n\n for u, v in pixels.tolist():\n alpha_mask_draw.ellipse([(u - w, v - w), (u + w + 1, v + w + 1)], color[3])\n base_layer = np.array(Image.composite(color_layer, base_layer, alpha_mask))\n return base_layer.astype(np.uint8)\n\n @image_functionality\n def plot_estimated_pose_on_bb(\n self,\n img,\n points,\n tl,\n br,\n trans=[[0, 0, 0]],\n rot_mat=[[1, 0, 0], [0, 1, 0], [0, 0, 1]],\n cam_cx=0,\n cam_cy=0,\n cam_fx=0,\n cam_fy=0,\n w=2,\n K=None,\n H=None,\n color_code_depth=False,\n max_val=2,\n *args,\n **kwargs,\n ):\n \"\"\"\n img:= original_image, [widht,height,RGB]\n points:= points of the object model [length,x,y,z]\n trans: [1,3]\n rot: [3,3]\n \"\"\"\n if K is not None:\n cam_cx = K[0, 2]\n cam_cy = K[1, 2]\n cam_fx = K[0, 0]\n cam_fy = K[1, 1]\n if H is not None:\n rot_mat = H[:3, :3]\n trans = H[:3, 3][None, :]\n if H[3, 3] != 1:\n raise Exception\n if H[3, 0] != 0 or H[3, 1] != 0 or H[3, 2] != 0:\n raise Exception\n\n if type(rot_mat) == list:\n rot_mat = np.array(rot_mat)\n if type(trans) == list:\n trans = np.array(trans)\n\n img_d = copy.deepcopy(img)\n points = np.dot(points, rot_mat.T)\n points = np.add(points, trans[0, :])\n width = int(br[1] - tl[1])\n height = int(br[0] - tl[0])\n off_h = int(tl[0])\n off_w = int(tl[1])\n\n for i in range(0, points.shape[0]):\n p_x = points[i, 0]\n p_y = points[i, 1]\n p_z = points[i, 2]\n\n u = int((int(((p_x / p_z) * cam_fx) + cam_cx) - off_w) / width * 640)\n v = int((int(((p_y / p_z) * cam_fy) + cam_cy) - off_h) / height * 480)\n\n try:\n if color_code_depth:\n z = min(max(0, points[i, 2]), max_val) / max_val\n turbo = cm.get_cmap(\"turbo\", 256)\n rgba = turbo(float(z))\n img_d[v - w : v + w + 1, u - w : u + w + 1, 0] = int(rgba[0] * 255)\n img_d[v - w : v + w + 1, u - w : u + w + 1, 1] = int(rgba[1] * 255)\n img_d[v - w : v + w + 1, u - w : u + w + 1, 2] = int(rgba[2] * 255)\n else:\n img_d[v - w : v + w + 1, u - w : u + w + 1, 0] = 0\n img_d[v - w : v + w + 1, u - w : u + w + 1, 1] = 255\n img_d[v - w : v + w + 1, u - w : u + w + 1, 2] = 0\n except:\n # print(\"out of bounce\")\n pass\n\n return img_d.astype(np.uint8)\n\n @image_functionality\n def plot_bounding_box(\n self, img, rmin=0, rmax=0, cmin=0, cmax=0, str_width=2, b=None, *args, **kwargs\n ):\n \"\"\"\n img:= original_image, [widht,height,RGB]\n \"\"\"\n\n if isinstance(b, dict):\n rmin = b[\"rmin\"]\n rmax = b[\"rmax\"]\n cmin = b[\"cmin\"]\n cmax = b[\"cmax\"]\n\n # ToDo check Input data\n img_d = np.array(copy.deepcopy(img))\n\n c = [0, 0, 255]\n rmin_mi = max(0, rmin - str_width)\n rmin_ma = min(img_d.shape[0], rmin + str_width)\n\n rmax_mi = max(0, rmax - str_width)\n rmax_ma = min(img_d.shape[0], rmax + str_width)\n\n cmin_mi = max(0, cmin - str_width)\n cmin_ma = min(img_d.shape[1], cmin + str_width)\n\n cmax_mi = max(0, cmax - str_width)\n cmax_ma = min(img_d.shape[1], cmax + str_width)\n\n img_d[rmin_mi:rmin_ma, cmin:cmax, :] = c\n img_d[rmax_mi:rmax_ma, cmin:cmax, :] = c\n img_d[rmin:rmax, cmin_mi:cmin_ma, :] = c\n img_d[rmin:rmax, cmax_mi:cmax_ma, :] = c\n img_d = img_d.astype(np.uint8)\n return img_d.astype(np.uint8)\n\n @image_functionality\n def plot_batch_projection(self, images, target, cam, max_images=10, *args, **kwargs):\n\n num = min(max_images, target.shape[0])\n fig = plt.figure(figsize=(7, num * 3.5))\n for i in range(num):\n masked_idx = backproject_points(\n target[i], fx=cam[i, 2], fy=cam[i, 3], cx=cam[i, 0], cy=cam[i, 1]\n )\n\n for j in range(masked_idx.shape[0]):\n try:\n images[i, int(masked_idx[j, 0]), int(masked_idx[j, 1]), 0] = 0\n images[i, int(masked_idx[j, 0]), int(masked_idx[j, 1]), 1] = 255\n images[i, int(masked_idx[j, 0]), int(masked_idx[j, 1]), 2] = 0\n except:\n pass\n\n min1 = torch.min(masked_idx[:, 0])\n max1 = torch.max(masked_idx[:, 0])\n max2 = torch.max(masked_idx[:, 1])\n min2 = torch.min(masked_idx[:, 1])\n\n bb = BoundingBox(p1=torch.stack([min1, min2]), p2=torch.stack([max1, max2]))\n\n bb_img = bb.plot(images[i, :, :, :3].cpu().numpy().astype(np.uint8))\n fig.add_subplot(num, 2, i * 2 + 1)\n plt.imshow(bb_img)\n\n fig.add_subplot(num, 2, i * 2 + 2)\n real = images[i, :, :, :3].cpu().numpy().astype(np.uint8)\n plt.imshow(real)\n\n img = get_img_from_fig(fig).astype(np.uint8)\n plt.close()\n return img\n\n @image_functionality\n def visu_network_input(self, data, max_images=10, *args, **kwargs):\n num = min(max_images, data.shape[0])\n fig = plt.figure(figsize=(7, num * 3.5))\n\n for i in range(num):\n\n n_render = f\"batch{i}_render.png\"\n n_real = f\"batch{i}_real.png\"\n real = np.transpose(data[i, :3, :, :].cpu().numpy().astype(np.uint8), (1, 2, 0))\n render = np.transpose(data[i, 3:, :, :].cpu().numpy().astype(np.uint8), (1, 2, 0))\n\n # plt_img(real, name=n_real, folder=folder)\n # plt_img(render, name=n_render, folder=folder)\n\n fig.add_subplot(num, 2, i * 2 + 1)\n plt.imshow(real)\n plt.tight_layout()\n fig.add_subplot(num, 2, i * 2 + 2)\n plt.imshow(render)\n plt.tight_layout()\n\n img = get_img_from_fig(fig).astype(np.uint8)\n plt.close()\n return img\n\n @image_functionality\n def plot_depthmap(self, depth, fix_max=2, *args, **kwargs):\n arr = depth.clone().cpu().numpy()\n\n arr[0, 0] = 0\n arr[0, 1] = fix_max\n\n w = depth.shape[0]\n h = depth.shape[1]\n\n fig = plt.figure(figsize=(6, float(w / h * 6)))\n ax = []\n ax.append(fig.add_subplot(1, 1, 1))\n ax[-1].get_xaxis().set_visible(False)\n ax[-1].get_yaxis().set_visible(False)\n pos = ax[-1].imshow(arr, cmap=\"turbo\")\n fig.colorbar(pos, ax=ax[-1])\n\n img = get_img_from_fig(fig).astype(np.uint8)\n plt.close()\n return img\n\n @image_functionality\n def plot_corrospondence(\n self,\n u_map,\n v_map,\n flow_mask,\n real_img,\n render_img,\n col=(0, 255, 0),\n colorful=False,\n text=False,\n res_h=30,\n res_w=30,\n min_points=50,\n *args,\n **kwargs,\n ):\n \"\"\"Plot Matching Points on Real and Render Image\n Args:\n u_map (torch.tensor dtype float): H,W\n v_map (torch.tensor dtype float): H,W\n flow_mask (torch.tensor dtype bool): H,W\n real_img (torch.tensor dtype float): H,W,3\n render_img (torch.tensor dtype float): H,W,3\n \"\"\"\n cropped_comp = np.concatenate(\n [real_img.cpu().numpy(), render_img.cpu().numpy()], axis=1\n ).astype(np.uint8)\n cropped_comp_img = Image.fromarray(cropped_comp)\n draw = ImageDraw.Draw(cropped_comp_img)\n\n m = flow_mask != 0\n if text:\n txt = f\"\"\"Flow in Height:\nmax = {torch.max(u_map[m].type(torch.float32))}\nmin = {torch.min(u_map[m].type(torch.float32))}\nmean = {torch.mean(u_map[m].type(torch.float32))}\nFlow in Vertical:\nmax = {torch.max(v_map[m].type(torch.float32))}\nmin = {torch.min(v_map[m].type(torch.float32))}\nmean = {torch.mean(v_map[m].type(torch.float32))}\"\"\"\n draw.text((10, 60), txt, fill=(201, 45, 136, 255))\n\n Nc = 20\n cmap = plt.cm.get_cmap(\"gist_rainbow\", Nc)\n cmaplist = [cmap(i) for i in range(cmap.N)]\n\n h, w = u_map.shape\n\n while torch.sum(flow_mask[::res_h, ::res_w]) < min_points and res_h > 1:\n res_w -= 1\n res_h -= 1\n\n for _w in range(0, w, res_w):\n for _h in range(0, h, res_h):\n\n if flow_mask[_h, _w] != 0:\n try:\n delta_h = u_map[_h, _w]\n delta_w = v_map[_h, _w]\n if colorful:\n col = random.choice(cmaplist)[:3]\n col = (int(col[0] * 255), int(col[1] * 255), int(col[2] * 255))\n draw.line(\n [(int(_w), int(_h)), (int(_w + w - delta_w), int(_h - delta_h))],\n fill=col,\n width=4,\n )\n except:\n print(\"failed\")\n\n return np.array(cropped_comp_img).astype(np.uint8)\n\n @image_functionality\n def visu_network_input_pred(\n self, data, images, target, cam, max_images=10, *args, **kwargs\n ):\n num = min(max_images, data.shape[0])\n fig = plt.figure(figsize=(10.5, num * 3.5))\n\n for i in range(num):\n # real render input\n n_render = f\"batch{i}_render.png\"\n n_real = f\"batch{i}_real.png\"\n real = np.transpose(data[i, :3, :, :].cpu().numpy().astype(np.uint8), (1, 2, 0))\n render = np.transpose(data[i, 3:, :, :].cpu().numpy().astype(np.uint8), (1, 2, 0))\n fig.add_subplot(num, 3, i * 3 + 1)\n plt.imshow(real)\n plt.tight_layout()\n fig.add_subplot(num, 3, i * 3 + 2)\n plt.imshow(render)\n plt.tight_layout()\n\n # prediction\n masked_idx = backproject_points(\n target[i], fx=cam[i, 2], fy=cam[i, 3], cx=cam[i, 0], cy=cam[i, 1]\n )\n for j in range(masked_idx.shape[0]):\n try:\n images[i, int(masked_idx[j, 0]), int(masked_idx[j, 1]), 0] = 0\n images[i, int(masked_idx[j, 0]), int(masked_idx[j, 1]), 1] = 255\n images[i, int(masked_idx[j, 0]), int(masked_idx[j, 1]), 2] = 0\n except:\n pass\n min1 = torch.min(masked_idx[:, 0])\n max1 = torch.max(masked_idx[:, 0])\n max2 = torch.max(masked_idx[:, 1])\n min2 = torch.min(masked_idx[:, 1])\n bb = BoundingBox(p1=torch.stack([min1, min2]), p2=torch.stack([max1, max2]))\n bb_img = bb.plot(images[i, :, :, :3].cpu().numpy().astype(np.uint8))\n fig.add_subplot(num, 3, i * 3 + 3)\n plt.imshow(bb_img)\n # fig.add_subplot(num, 2, i * 2 + 4)\n # real = images[i, :, :, :3].cpu().numpy().astype(np.uint8)\n # plt.imshow(real)\n\n img = get_img_from_fig(fig).astype(np.uint8)\n plt.close()\n return img\n" ]
[ [ "numpy.dot", "matplotlib.pyplot.imshow", "torch.mean", "numpy.expand_dims", "torch.max", "numpy.linspace", "torch.sum", "numpy.concatenate", "numpy.zeros_like", "numpy.mean", "numpy.moveaxis", "torch.norm", "matplotlib.pyplot.tight_layout", "numpy.unique", "numpy.uint8", "torch.round", "torch.tensor", "matplotlib.pyplot.close", "numpy.float32", "numpy.zeros", "matplotlib.pyplot.figure", "torch.div", "matplotlib.pyplot.cm.get_cmap", "torch.min", "torch.stack", "numpy.array", "torch.atan2", "torch.isnan", "numpy.ones", "matplotlib.cm.get_cmap", "numpy.add" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xyzhu8/mmocr
[ "c745667cd1af91dbff2385dcf2f2b80b9a40adb6", "f62b4513f5411bde9f24e1902b1cb1945340022a", "f62b4513f5411bde9f24e1902b1cb1945340022a", "f62b4513f5411bde9f24e1902b1cb1945340022a", "f62b4513f5411bde9f24e1902b1cb1945340022a" ]
[ "tests/test_dataset/test_ner_dataset.py", "mmocr/datasets/pipelines/test_time_aug.py", "docs_zh_CN/stats.py", "mmocr/models/textrecog/recognizer/base.py", "mmocr/models/textdet/necks/fpn_unet.py" ]
[ "import json\nimport os.path as osp\nimport tempfile\n\nimport torch\n\nfrom mmocr.datasets.ner_dataset import NerDataset\nfrom mmocr.models.ner.convertors.ner_convertor import NerConvertor\n\n\ndef _create_dummy_ann_file(ann_file):\n data = {\n 'text': '彭小军认为,国内银行现在走的是台湾的发卡模式',\n 'label': {\n 'address': {\n '台湾': [[15, 16]]\n },\n 'name': {\n '彭小军': [[0, 2]]\n }\n }\n }\n\n with open(ann_file, 'w') as fw:\n fw.write(json.dumps(data, ensure_ascii=False) + '\\n')\n\n\ndef _create_dummy_vocab_file(vocab_file):\n with open(vocab_file, 'w') as fw:\n for char in list(map(chr, range(ord('a'), ord('z') + 1))):\n fw.write(char + '\\n')\n\n\ndef _create_dummy_loader():\n loader = dict(\n type='HardDiskLoader',\n repeat=1,\n parser=dict(type='LineJsonParser', keys=['text', 'label']))\n return loader\n\n\ndef test_ner_dataset():\n # test initialization\n loader = _create_dummy_loader()\n categories = [\n 'address', 'book', 'company', 'game', 'government', 'movie', 'name',\n 'organization', 'position', 'scene'\n ]\n\n # create dummy data\n tmp_dir = tempfile.TemporaryDirectory()\n ann_file = osp.join(tmp_dir.name, 'fake_data.txt')\n vocab_file = osp.join(tmp_dir.name, 'fake_vocab.txt')\n _create_dummy_ann_file(ann_file)\n _create_dummy_vocab_file(vocab_file)\n\n max_len = 128\n ner_convertor = dict(\n type='NerConvertor',\n annotation_type='bio',\n vocab_file=vocab_file,\n categories=categories,\n max_len=max_len)\n\n test_pipeline = [\n dict(\n type='NerTransform',\n label_convertor=ner_convertor,\n max_len=max_len),\n dict(type='ToTensorNER')\n ]\n dataset = NerDataset(ann_file, loader, pipeline=test_pipeline)\n\n # test pre_pipeline\n img_info = dataset.data_infos[0]\n results = dict(img_info=img_info)\n dataset.pre_pipeline(results)\n\n # test prepare_train_img\n dataset.prepare_train_img(0)\n\n # test evaluation\n result = [[['address', 15, 16], ['name', 0, 2]]]\n\n dataset.evaluate(result)\n\n # test pred convert2entity function\n pred = [\n 21, 7, 17, 17, 21, 21, 21, 21, 21, 21, 13, 21, 21, 21, 21, 21, 1, 11,\n 21, 21, 7, 17, 17, 21, 21, 21, 21, 21, 21, 13, 21, 21, 21, 21, 21, 1,\n 11, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,\n 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,\n 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,\n 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,\n 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,\n 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 1, 21, 21, 21, 21, 21,\n 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 1, 21, 21, 21, 21,\n 21, 21\n ]\n preds = [pred[:128]]\n mask = [0] * 128\n for i in range(10):\n mask[i] = 1\n assert len(preds[0]) == len(mask)\n masks = torch.tensor([mask])\n convertor = NerConvertor(\n annotation_type='bio',\n vocab_file=vocab_file,\n categories=categories,\n max_len=128)\n all_entities = convertor.convert_pred2entities(preds=preds, masks=masks)\n assert len(all_entities[0][0]) == 3\n\n tmp_dir.cleanup()\n", "import mmcv\nimport numpy as np\nfrom mmdet.datasets.builder import PIPELINES\nfrom mmdet.datasets.pipelines.compose import Compose\n\n\[email protected]_module()\nclass MultiRotateAugOCR:\n \"\"\"Test-time augmentation with multiple rotations in the case that\n img_height > img_width.\n\n An example configuration is as follows:\n\n .. code-block::\n\n rotate_degrees=[0, 90, 270],\n transforms=[\n dict(\n type='ResizeOCR',\n height=32,\n min_width=32,\n max_width=160,\n keep_aspect_ratio=True),\n dict(type='ToTensorOCR'),\n dict(type='NormalizeOCR', **img_norm_cfg),\n dict(\n type='Collect',\n keys=['img'],\n meta_keys=[\n 'filename', 'ori_shape', 'img_shape', 'valid_ratio'\n ]),\n ]\n\n After MultiRotateAugOCR with above configuration, the results are wrapped\n into lists of the same length as follows:\n\n .. code-block::\n\n dict(\n img=[...],\n img_shape=[...]\n ...\n )\n\n Args:\n transforms (list[dict]): Transformation applied for each augmentation.\n rotate_degrees (list[int] | None): Degrees of anti-clockwise rotation.\n force_rotate (bool): If True, rotate image by 'rotate_degrees'\n while ignore image aspect ratio.\n \"\"\"\n\n def __init__(self, transforms, rotate_degrees=None, force_rotate=False):\n self.transforms = Compose(transforms)\n self.force_rotate = force_rotate\n if rotate_degrees is not None:\n self.rotate_degrees = rotate_degrees if isinstance(\n rotate_degrees, list) else [rotate_degrees]\n assert mmcv.is_list_of(self.rotate_degrees, int)\n for degree in self.rotate_degrees:\n assert 0 <= degree < 360\n assert degree % 90 == 0\n if 0 not in self.rotate_degrees:\n self.rotate_degrees.append(0)\n else:\n self.rotate_degrees = [0]\n\n def __call__(self, results):\n \"\"\"Call function to apply test time augment transformation to results.\n\n Args:\n results (dict): Result dict contains the data to be transformed.\n\n Returns:\n dict[str: list]: The augmented data, where each value is wrapped\n into a list.\n \"\"\"\n img_shape = results['img_shape']\n ori_height, ori_width = img_shape[:2]\n if not self.force_rotate and ori_height <= ori_width:\n rotate_degrees = [0]\n else:\n rotate_degrees = self.rotate_degrees\n aug_data = []\n for degree in set(rotate_degrees):\n _results = results.copy()\n if degree == 0:\n pass\n elif degree == 90:\n _results['img'] = np.rot90(_results['img'], 1)\n elif degree == 180:\n _results['img'] = np.rot90(_results['img'], 2)\n elif degree == 270:\n _results['img'] = np.rot90(_results['img'], 3)\n data = self.transforms(_results)\n aug_data.append(data)\n # list of dict to dict of list\n aug_data_dict = {key: [] for key in aug_data[0]}\n for data in aug_data:\n for key, val in data.items():\n aug_data_dict[key].append(val)\n return aug_data_dict\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(transforms={self.transforms}, '\n repr_str += f'rotate_degrees={self.rotate_degrees})'\n return repr_str\n", "#!/usr/bin/env python\nimport functools as func\nimport glob\nimport re\nfrom os.path import basename, splitext\n\nimport numpy as np\nimport titlecase\n\n\ndef anchor(name):\n return re.sub(r'-+', '-', re.sub(r'[^a-zA-Z0-9]', '-',\n name.strip().lower())).strip('-')\n\n\n# Count algorithms\n\nfiles = sorted(glob.glob('*_models.md'))\n# files = sorted(glob.glob('docs/*_models.md'))\n\nstats = []\n\nfor f in files:\n with open(f, 'r') as content_file:\n content = content_file.read()\n\n # title\n title = content.split('\\n')[0].replace('#', '')\n\n # count papers\n papers = set((papertype, titlecase.titlecase(paper.lower().strip()))\n for (papertype, paper) in re.findall(\n r'\\n\\s*\\[([A-Z]+?)\\]\\s*\\n.*?\\btitle\\s*=\\s*{(.*?)}',\n content, re.DOTALL))\n # paper links\n revcontent = '\\n'.join(list(reversed(content.splitlines())))\n paperlinks = {}\n for _, p in papers:\n print(p)\n q = p.replace('\\\\', '\\\\\\\\').replace('?', '\\\\?')\n paperlinks[p] = ' '.join(\n (f'[⇨]({splitext(basename(f))[0]}.html#{anchor(paperlink)})'\n for paperlink in re.findall(\n rf'\\btitle\\s*=\\s*{{\\s*{q}\\s*}}.*?\\n## (.*?)\\s*[,;]?\\s*\\n',\n revcontent, re.DOTALL | re.IGNORECASE)))\n print(' ', paperlinks[p])\n paperlist = '\\n'.join(\n sorted(f' - [{t}] {x} ({paperlinks[x]})' for t, x in papers))\n # count configs\n configs = set(x.lower().strip()\n for x in re.findall(r'https.*configs/.*\\.py', content))\n\n # count ckpts\n ckpts = set(x.lower().strip()\n for x in re.findall(r'https://download.*\\.pth', content)\n if 'mmocr' in x)\n\n statsmsg = f\"\"\"\n## [{title}]({f})\n\n* 模型权重文件数量: {len(ckpts)}\n* 配置文件数量: {len(configs)}\n* 论文数量: {len(papers)}\n{paperlist}\n\n \"\"\"\n\n stats.append((papers, configs, ckpts, statsmsg))\n\nallpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _, _ in stats])\nallconfigs = func.reduce(lambda a, b: a.union(b), [c for _, c, _, _ in stats])\nallckpts = func.reduce(lambda a, b: a.union(b), [c for _, _, c, _ in stats])\nmsglist = '\\n'.join(x for _, _, _, x in stats)\n\npapertypes, papercounts = np.unique([t for t, _ in allpapers],\n return_counts=True)\ncountstr = '\\n'.join(\n [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)])\n\nmodelzoo = f\"\"\"\n# Overview\n\n* Number of checkpoints: {len(allckpts)}\n* Number of configs: {len(allconfigs)}\n* Number of papers: {len(allpapers)}\n{countstr}\n\nFor supported datasets, see [datasets overview](datasets.md).\n\n{msglist}\n\"\"\"\n\nwith open('modelzoo.md', 'w') as f:\n f.write(modelzoo)\n", "import warnings\nfrom abc import ABCMeta, abstractmethod\nfrom collections import OrderedDict\n\nimport mmcv\nimport torch\nimport torch.distributed as dist\nfrom mmcv.runner import BaseModule, auto_fp16\n\nfrom mmocr.core import imshow_text_label\n\n\nclass BaseRecognizer(BaseModule, metaclass=ABCMeta):\n \"\"\"Base class for text recognition.\"\"\"\n\n def __init__(self, init_cfg=None):\n super().__init__(init_cfg=init_cfg)\n self.fp16_enabled = False\n\n @abstractmethod\n def extract_feat(self, imgs):\n \"\"\"Extract features from images.\"\"\"\n pass\n\n @abstractmethod\n def forward_train(self, imgs, img_metas, **kwargs):\n \"\"\"\n Args:\n img (tensor): tensors with shape (N, C, H, W).\n Typically should be mean centered and std scaled.\n img_metas (list[dict]): List of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details of the values of these keys, see\n :class:`mmdet.datasets.pipelines.Collect`.\n kwargs (keyword arguments): Specific to concrete implementation.\n \"\"\"\n pass\n\n @abstractmethod\n def simple_test(self, img, img_metas, **kwargs):\n pass\n\n @abstractmethod\n def aug_test(self, imgs, img_metas, **kwargs):\n \"\"\"Test function with test time augmentation.\n\n Args:\n imgs (list[tensor]): Tensor should have shape NxCxHxW,\n which contains all images in the batch.\n img_metas (list[list[dict]]): The metadata of images.\n \"\"\"\n pass\n\n def forward_test(self, imgs, img_metas, **kwargs):\n \"\"\"\n Args:\n imgs (tensor | list[tensor]): Tensor should have shape NxCxHxW,\n which contains all images in the batch.\n img_metas (list[dict] | list[list[dict]]):\n The outer list indicates images in a batch.\n \"\"\"\n if isinstance(imgs, list):\n assert len(imgs) > 0\n assert imgs[0].size(0) == 1, ('aug test does not support '\n f'inference with batch size '\n f'{imgs[0].size(0)}')\n assert len(imgs) == len(img_metas)\n return self.aug_test(imgs, img_metas, **kwargs)\n\n return self.simple_test(imgs, img_metas, **kwargs)\n\n @auto_fp16(apply_to=('img', ))\n def forward(self, img, img_metas, return_loss=True, **kwargs):\n \"\"\"Calls either :func:`forward_train` or :func:`forward_test` depending\n on whether ``return_loss`` is ``True``.\n\n Note that img and img_meta are single-nested (i.e. tensor and\n list[dict]).\n \"\"\"\n\n if return_loss:\n return self.forward_train(img, img_metas, **kwargs)\n\n if isinstance(img, list):\n for idx, each_img in enumerate(img):\n if each_img.dim() == 3:\n img[idx] = each_img.unsqueeze(0)\n else:\n if len(img_metas) == 1 and isinstance(img_metas[0], list):\n img_metas = img_metas[0]\n\n return self.forward_test(img, img_metas, **kwargs)\n\n def _parse_losses(self, losses):\n \"\"\"Parse the raw outputs (losses) of the network.\n\n Args:\n losses (dict): Raw outputs of the network, which usually contain\n losses and other necessary infomation.\n\n Returns:\n tuple[tensor, dict]: (loss, log_vars), loss is the loss tensor\n which may be a weighted sum of all losses, log_vars contains\n all the variables to be sent to the logger.\n \"\"\"\n log_vars = OrderedDict()\n for loss_name, loss_value in losses.items():\n if isinstance(loss_value, torch.Tensor):\n log_vars[loss_name] = loss_value.mean()\n elif isinstance(loss_value, list):\n log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)\n else:\n raise TypeError(\n f'{loss_name} is not a tensor or list of tensors')\n\n loss = sum(_value for _key, _value in log_vars.items()\n if 'loss' in _key)\n\n log_vars['loss'] = loss\n for loss_name, loss_value in log_vars.items():\n # reduce loss when distributed training\n if dist.is_available() and dist.is_initialized():\n loss_value = loss_value.data.clone()\n dist.all_reduce(loss_value.div_(dist.get_world_size()))\n log_vars[loss_name] = loss_value.item()\n\n return loss, log_vars\n\n def train_step(self, data, optimizer):\n \"\"\"The iteration step during training.\n\n This method defines an iteration step during training, except for the\n back propagation and optimizer update, which are done by an optimizer\n hook. Note that in some complicated cases or models (e.g. GAN),\n the whole process (including the back propagation and optimizer update)\n is also defined by this method.\n\n Args:\n data (dict): The outputs of dataloader.\n optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of\n runner is passed to ``train_step()``. This argument is unused\n and reserved.\n\n Returns:\n dict: It should contain at least 3 keys: ``loss``, ``log_vars``,\n ``num_samples``.\n\n - ``loss`` is a tensor for back propagation, which is a\n weighted sum of multiple losses.\n - ``log_vars`` contains all the variables to be sent to the\n logger.\n - ``num_samples`` indicates the batch size used for\n averaging the logs (Note: for the\n DDP model, num_samples refers to the batch size for each GPU).\n \"\"\"\n losses = self(**data)\n loss, log_vars = self._parse_losses(losses)\n\n outputs = dict(\n loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))\n\n return outputs\n\n def val_step(self, data, optimizer):\n \"\"\"The iteration step during validation.\n\n This method shares the same signature as :func:`train_step`, but is\n used during val epochs. Note that the evaluation after training epochs\n is not implemented by this method, but by an evaluation hook.\n \"\"\"\n losses = self(**data)\n loss, log_vars = self._parse_losses(losses)\n\n outputs = dict(\n loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))\n\n return outputs\n\n def show_result(self,\n img,\n result,\n gt_label='',\n win_name='',\n show=False,\n wait_time=0,\n out_file=None,\n **kwargs):\n \"\"\"Draw `result` on `img`.\n\n Args:\n img (str or tensor): The image to be displayed.\n result (dict): The results to draw on `img`.\n gt_label (str): Ground truth label of img.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n Default: 0.\n show (bool): Whether to show the image.\n Default: False.\n out_file (str or None): The output filename.\n Default: None.\n\n Returns:\n img (tensor): Only if not `show` or `out_file`.\n \"\"\"\n img = mmcv.imread(img)\n img = img.copy()\n pred_label = None\n if 'text' in result.keys():\n pred_label = result['text']\n\n # if out_file specified, do not show image in window\n if out_file is not None:\n show = False\n # draw text label\n if pred_label is not None:\n img = imshow_text_label(\n img,\n pred_label,\n gt_label,\n show=show,\n win_name=win_name,\n wait_time=wait_time,\n out_file=out_file)\n\n if not (show or out_file):\n warnings.warn('show==False and out_file is not specified, only '\n 'result image will be returned')\n return img\n\n return img\n", "import torch\nimport torch.nn.functional as F\n# from mmcv.cnn import xavier_init\nfrom mmcv.runner import BaseModule\nfrom mmdet.models.builder import NECKS\nfrom torch import nn\n\n\nclass UpBlock(BaseModule):\n \"\"\"Upsample block for DRRG and TextSnake.\"\"\"\n\n def __init__(self, in_channels, out_channels, init_cfg=None):\n super().__init__(init_cfg=init_cfg)\n\n assert isinstance(in_channels, int)\n assert isinstance(out_channels, int)\n\n self.conv1x1 = nn.Conv2d(\n in_channels, in_channels, kernel_size=1, stride=1, padding=0)\n self.conv3x3 = nn.Conv2d(\n in_channels, out_channels, kernel_size=3, stride=1, padding=1)\n self.deconv = nn.ConvTranspose2d(\n out_channels, out_channels, kernel_size=4, stride=2, padding=1)\n\n def forward(self, x):\n x = F.relu(self.conv1x1(x))\n x = F.relu(self.conv3x3(x))\n x = self.deconv(x)\n return x\n\n\[email protected]_module()\nclass FPN_UNet(BaseModule):\n \"\"\"The class for implementing DRRG and TextSnake U-Net-like FPN.\n\n DRRG: Deep Relational Reasoning Graph Network for Arbitrary Shape\n Text Detection [https://arxiv.org/abs/2003.07493].\n TextSnake: A Flexible Representation for Detecting Text of Arbitrary Shapes\n [https://arxiv.org/abs/1807.01544].\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n init_cfg=dict(\n type='Xavier',\n layer=['Conv2d', 'ConvTranspose2d'],\n distribution='uniform')):\n super().__init__(init_cfg=init_cfg)\n\n assert len(in_channels) == 4\n assert isinstance(out_channels, int)\n\n blocks_out_channels = [out_channels] + [\n min(out_channels * 2**i, 256) for i in range(4)\n ]\n blocks_in_channels = [blocks_out_channels[1]] + [\n in_channels[i] + blocks_out_channels[i + 2] for i in range(3)\n ] + [in_channels[3]]\n\n self.up4 = nn.ConvTranspose2d(\n blocks_in_channels[4],\n blocks_out_channels[4],\n kernel_size=4,\n stride=2,\n padding=1)\n self.up_block3 = UpBlock(blocks_in_channels[3], blocks_out_channels[3])\n self.up_block2 = UpBlock(blocks_in_channels[2], blocks_out_channels[2])\n self.up_block1 = UpBlock(blocks_in_channels[1], blocks_out_channels[1])\n self.up_block0 = UpBlock(blocks_in_channels[0], blocks_out_channels[0])\n\n def forward(self, x):\n c2, c3, c4, c5 = x\n\n x = F.relu(self.up4(c5))\n\n x = torch.cat([x, c4], dim=1)\n x = F.relu(self.up_block3(x))\n\n x = torch.cat([x, c3], dim=1)\n x = F.relu(self.up_block2(x))\n\n x = torch.cat([x, c2], dim=1)\n x = F.relu(self.up_block1(x))\n\n x = self.up_block0(x)\n # the output should be of the same height and width as backbone input\n return x\n" ]
[ [ "torch.tensor" ], [ "numpy.rot90" ], [ "numpy.unique" ], [ "torch.distributed.get_world_size", "torch.distributed.is_available", "torch.distributed.is_initialized" ], [ "torch.nn.Conv2d", "torch.nn.ConvTranspose2d", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MetaCell/pygeppetto-django
[ "2222228af89ad3edb68ab4ff41c85ffd6c81af9c" ]
[ "pygeppetto_gateway/base.py" ]
[ "import copy\nimport json\nimport logging\nimport os\nimport pathlib\nimport typing as t\nimport zlib\n\nimport enforce\nimport quantities as pq\nimport requests\nimport websocket\nimport numpy as np\nfrom django.conf import settings\n\nfrom pygeppetto_gateway import helpers\nfrom pygeppetto_gateway.interpreters import core, helpers as pgi_helpers\nfrom scidash.general import helpers as general_hlp\nfrom scidash.general.backends import ScidashCacheBackend\n\ndb_logger = logging.getLogger('db')\nenforce.config({'mode': 'covariant'})\n\n\[email protected]_validation\nclass GeppettoServletManager():\n \"\"\" Base class for communication with Java Geppetto server \"\"\"\n\n DEFAULT_HOST = 'ws://localhost:8080'\n\n cookies = None\n ws = None\n\n instances = {}\n\n @classmethod\n def get_instance(cls, instance_name):\n if cls.instances.get(instance_name, None) is None:\n cls.instances[instance_name] = GeppettoServletManager()\n\n return cls.instances.get(instance_name)\n\n def __init__(self) -> None:\n\n if hasattr(settings, 'GEPPETTO_SERVLET_URL'):\n self.host = settings.GEPPETTO_SERVLET_URL\n else:\n self.host = self.DEFAULT_HOST\n\n db_logger.info(f\"Servlet manager init with {self.host} GEPPETTO url\")\n\n self._say_hello_geppetto()\n self._connect()\n\n def _connect(self) -> websocket.WebSocket:\n self.ws = websocket.WebSocket()\n self.ws.timeout = 120\n\n self.ws.connect(self.host, cookie=self.cookies)\n\n return self.ws\n\n def close(self) -> None:\n db_logger.info(\"Connection closed\")\n\n self.ws.close()\n\n def _say_hello_geppetto(self) -> None:\n \"\"\"_say_hello_geppetto\n\n Here we have to do a GET request to base Geppetto page, and then put\n session cookies into websocket connection, or it will not work.\n \"\"\"\n\n http_response = requests.get(settings.GEPPETTO_BASE_URL)\n\n self.cookies = \";\".join(\n [\n \"{}={}\".format(x, y)\n for x, y in http_response.cookies.iteritems()\n ]\n )\n\n def _send(self, payload: str) -> None:\n \"\"\"_send\n\n sending data in payload to websocket\n \"\"\"\n\n if self.cookies is None:\n raise Exception(\n \"You forgot to say hello to geppetto\"\n \"(self._say_hello_geppetto())\"\n )\n\n self.ws.send(payload)\n\n def handle(\n self, _type: str, data: t.Union[dict, str], request_id=\"pg-request\"\n ):\n payload = json.dumps(\n {\n 'requestID': request_id,\n 'type': _type,\n 'data': data\n }\n )\n\n db_logger.info(\n f\"Sending '{_type}' request to GEPPETTO. Payload {payload}\"\n )\n\n self._send(payload)\n\n def read(self) -> str:\n result = self.ws.recv()\n\n if isinstance(result, bytes):\n result_bytes = bytearray(result)[1:]\n\n result = zlib.decompress(result_bytes, 15 + 32).decode()\n\n return result\n\n\n# TODO: convert this class to\n# TODO: GeppettoManager, move all building functionality to interpreters\[email protected]_validation\nclass GeppettoProjectBuilder():\n def __init__(\n self,\n interpreter: core.BaseModelInterpreter,\n score=None,\n model_file_url: t.Union[str, dict] = None,\n **options: dict\n ) -> None:\n \"\"\"__init__\n\n :**options: not required\n project_location: location where project file will be saved\n after replacing all values\n model_file_location: location where nml file will be saved\n after downloading\n watched_variables: list of variables, extracted from model\n timestep: timestep, as you see\n length: time model will be simulating\n project_name: obviously a project name\n \"\"\"\n\n self.no_score = score is None\n\n if not self.no_score:\n self.score = score\n else:\n db_logger.info(\n f'Score is not presented, working with url {model_file_url}'\n )\n self.model_file_url = model_file_url if self.no_score else self.score.model_instance.url # noqa: E501\n self.interpreter = interpreter(self.model_file_url)\n self.xmi_template = self.interpreter.get_model_template()\n self.project_template = self.interpreter.get_project_template()\n self.model_name = options.get('model_name', 'defaultModel')\n\n self.built_xmi_location = options.get('xmi_location', '/tmp/model.xmi')\n\n self.built_project_location = options.get(\n 'project_location', '/tmp/project.json'\n )\n\n self.model_file_location = options.get(\n 'model_file_location', '/tmp/model.nml'\n )\n\n for _dir in [\n 'built_xmi_location', 'built_project_location',\n 'model_file_location'\n ]:\n self.create_dir_if_not_exists(getattr(self, _dir))\n\n self.base_project_files_host = getattr(\n settings, 'BASE_PROJECT_FILES_HOST',\n 'http://localhost:8000/static/projects/'\n )\n\n if self.no_score:\n self.watched_variables = json.dumps(\n options.get('watched_variables', [])\n )\n else:\n self.watched_variables = json.dumps(\n self.score.model_instance.run_params.get(\n 'watchedVariables', []\n )\n )\n\n self.timestep = options.get('timestep', 0.00025)\n self.duration = options.get('duration', 0.800025)\n self.project_name = options.get('project_name', 'defaultProject')\n\n def setup_protocol(self, score_instance) -> str:\n model_class = general_hlp.import_class(\n score_instance.model_instance.model_class.import_path\n )\n model_instance = model_class(\n self.model_file_location, backend=ScidashCacheBackend.name\n )\n test_class = general_hlp.import_class(\n score_instance.test_instance.test_class.import_path\n )\n\n observation = copy.deepcopy(score_instance.test_instance.observation)\n params = copy.deepcopy(score_instance.test_instance.params)\n\n try:\n destructured = json.loads(\n score_instance.test_instance.test_class.units\n )\n except json.JSONDecodeError:\n units = general_hlp.import_class(\n score_instance.test_instance.test_class.units\n )\n else:\n if destructured.get('name', False):\n base_unit = general_hlp.import_class(\n destructured.get('base').get('quantity')\n )\n units = pq.UnitQuantity(\n destructured.get('name'),\n base_unit * destructured.get('base').get('coefficient'),\n destructured.get('symbol')\n )\n else:\n units = destructured\n\n for key in observation:\n if isinstance(units, dict):\n try:\n parsed_observation = json.loads(observation[key])\n np_observation = np.array(parsed_observation\n ) * general_hlp.import_class(\n units[key]\n )\n observation[key] = np_observation\n except json.JSONDecodeError:\n observation[key] = int(\n observation[key]\n ) * units[key] if key != 'n' else int(observation[key])\n else:\n try:\n parsed_observation = json.loads(observation[key])\n np_observation = np.array(parsed_observation) * units\n observation[key] = np_observation\n except json.JSONDecodeError:\n observation[key] = int(observation[key]\n ) * units if key != 'n' else int(\n observation[key]\n )\n\n params_units = score_instance.test_instance.test_class.params_units\n\n for key in params_units:\n params_units[key] = general_hlp.import_class(params_units[key])\n\n processed_params = {}\n\n for key in params:\n if params[key] is not None:\n processed_params[key] = float(params[key]) * params_units[key]\n\n test_instance = test_class(observation=observation)\n\n test_instance.setup_protocol(model_instance)\n\n nml_paths = model_instance.get_nml_paths()\n\n project_files_dir = self.model_file_location.replace(\n os.path.basename(self.model_file_location), ''\n )\n\n for path in nml_paths:\n db_logger.info(f\"Rewriting model file {path}\")\n\n with open(path, 'r') as f:\n file_name = os.path.basename(path)\n model_file_content = f.read()\n\n file_path = os.path.join(project_files_dir, file_name)\n\n with open(file_path, 'w+') as nf:\n nf.write(model_file_content)\n\n # embarassing\n lems_file_path = model_instance.lems_file_path\n\n with open(lems_file_path, 'r') as lf:\n content = lf.read()\n\n with open(\n os.path.join(project_files_dir, os.path.basename(\n lems_file_path\n )), 'w+'\n ) as nlf:\n nlf.write(content)\n\n self.interpreter = general_hlp.import_class(\n pgi_helpers.interpreter_detector(\n os.path.basename(lems_file_path)\n )\n )\n\n model_file_location = os.path.join(\n project_files_dir, os.path.basename(lems_file_path)\n )\n\n self.interpreter = self.interpreter(model_file_location)\n\n return model_file_location\n\n def get_file_path_tail(self, path: str):\n base_dir = getattr(settings, 'BASE_DIR', None)\n\n if base_dir is None:\n raise Exception(\n 'You should set BASE_DIR to project in settings.py'\n )\n\n base_project_files_dir = os.path.join(base_dir, 'static', 'projects')\n\n return path.replace(f'{base_project_files_dir}/', '')\n\n def create_dir_if_not_exists(self, path: str):\n dir_path = pathlib.Path(pathlib.Path(path).parents[0])\n\n if not dir_path.is_dir():\n dir_path.mkdir(parents=True, exist_ok=True)\n\n def build_url(self, path: str):\n return f'{self.base_project_files_host}{self.get_file_path_tail(path)}'\n\n def write_model_to_file(self) -> str:\n \"\"\"write_model_to_file\n\n Writes model file from `self.model_file_url`,\n saves it to `self.downloaded_nml_location`\n \"\"\"\n\n model_file_content = self.interpreter.get_model_file_content()\n\n db_logger.info(f\"Writing {self.model_file_url}\")\n\n with open(self.model_file_location, 'w') as model:\n model.write(model_file_content)\n\n file_name = os.path.basename(self.model_file_location)\n project_dir = self.model_file_location.replace(file_name, '')\n\n helpers.process_includes(\n self.model_file_url, project_dir, self.interpreter\n )\n\n if not self.no_score:\n self.model_file_location = self.setup_protocol(self.score)\n\n return self.model_file_location\n\n def build_xmi(self) -> str:\n \"\"\"build_xmi\n\n Builds xmi Geppetto model file from downloaded nml, saves to\n `self._built_xmi_location`\n \"\"\"\n\n with open(self.built_xmi_location, 'w') as xt:\n xt.write(\n self.xmi_template.format(\n name=self.model_name,\n target=self.interpreter.extract_target(),\n url=self.build_url(self.model_file_location)\n )\n )\n\n return self.build_url(self.built_xmi_location)\n\n def build_project(self) -> str:\n \"\"\"build_project\n :returns: path to project file\n\n \"\"\"\n\n self.write_model_to_file()\n self.build_xmi()\n\n with open(self.built_project_location, 'w+') as project:\n project.write(\n self.project_template.format(\n project_name=self.project_name,\n instance=self.interpreter.extract_instance(),\n target=self.interpreter.extract_target(),\n watched_variables=self.watched_variables,\n url=self.build_url(self.built_xmi_location),\n score_id=\"NULL\" if self.no_score else self.score.pk\n )\n )\n\n return self.build_url(self.built_project_location)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shakeelDS/ml_python
[ "d5e9e954ddcff2202532fc1aafdcffe0cd139331" ]
[ "exercise_answers/dim_red_function.py" ]
[ "from sklearn.decomposition import PCA \r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import f1_score\r\n\r\n\r\ndef PCA_train_predict_score(X, y, k, random_state=42):\r\n \"\"\"\r\n Function to perform PCA and generate model to check performance of given k components.\r\n \r\n Args:\r\n X (np.array (n,p)): Feature data\r\n y (np.array (n,)): Target data\r\n k (int): number of components for PCA to return\r\n random_state (int): random seed for train_test_split\r\n \r\n Returns:\r\n score (num): f1_score result of model trained.\r\n \"\"\"\r\n # Split the data\r\n X_train, X_test, y_train, y_test = train_test_split(X, \r\n y, \r\n test_size=0.33, \r\n random_state=random_state)\r\n # Fit the PCA model\r\n pca = PCA(n_components=k).fit(X_train)\r\n \r\n # Our test data needs to be PCA'd on the original PCA model fit\r\n X_train = pca.transform(X_train)\r\n X_test = pca.transform(X_test)\r\n \r\n # Train the model \r\n model = LogisticRegression().fit(X_train, y_train)\r\n \r\n # Generate predictions and score the predictions\r\n y_pred = model.predict(X_test)\r\n score = f1_score(y_test, y_pred)\r\n \r\n return score\r\n" ]
[ [ "sklearn.metrics.f1_score", "sklearn.decomposition.PCA", "sklearn.model_selection.train_test_split", "sklearn.linear_model.LogisticRegression" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
suzannastep/eulers
[ "886da24546a490a11bc31ace4fbfa71536b129bf" ]
[ "solver/__init__.py" ]
[ "import matplotlib\nimport os\n\nif os.system != \"nt\": #pragma: no cover\n matplotlib.use(\"Agg\")\n" ]
[ [ "matplotlib.use" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cshyundev/LW-PSMNet
[ "d80d3b12c55ba30c781a7578a4728a2cd6321866" ]
[ "lightmodels/channel_compression/stackhourglass.py" ]
[ "from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport math\nfrom .submodule import *\n\nclass hourglass(nn.Module):\n def __init__(self, inplanes):\n super(hourglass, self).__init__()\n\n self.conv1 = nn.Sequential(convbn_3d(inplanes, inplanes*2, kernel_size=3, stride=2, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn_3d(inplanes*2, inplanes*2,\n kernel_size=3, stride=1, pad=1)\n\n self.conv3 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=2, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv4 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1),\n nn.ReLU(inplace=True))\n\n self.conv5 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes*2, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False),\n nn.BatchNorm3d(inplanes*2)) # +conv2\n\n self.conv6 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False),\n nn.BatchNorm3d(inplanes)) # +x\n\n\n def forward(self, x, presqu, postsqu):\n\n out = self.conv1(x) # in:1/4 out:1/8\n pre = self.conv2(out) # in:1/8 out:1/8\n if postsqu is not None:\n pre = F.relu(pre + postsqu, inplace=True)\n else:\n pre = F.relu(pre, inplace=True)\n\n out = self.conv3(pre) # in:1/8 out:1/16\n out = self.conv4(out) # in:1/16 out:1/16\n\n if presqu is not None:\n post = F.relu(self.conv5(out)+presqu,\n inplace=True) # in:1/16 out:1/8\n else:\n post = F.relu(self.conv5(out)+pre, inplace=True)\n\n out = self.conv6(post) # in:1/8 out:1/4\n\n return out, pre, post\n\n\nclass PSMNet(nn.Module):\n def __init__(self, maxdisp):\n super(PSMNet, self).__init__()\n self.maxdisp = maxdisp\n\n # With new submodule.py\n # params: 3339552 -> 1008528\n self.feature_extraction = feature_extraction()\n\n # params: 83072 -> 20800\n self.dres0 = nn.Sequential(convbn_3d(32, 16, 3, 1, 1), # convbn_3d(64, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n convbn_3d(16, 16, 3, 1, 1), # convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True))\n\n # params: 55424 -> 13888\n self.dres1 = nn.Sequential(convbn_3d(16, 16, 3, 1, 1), # convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n convbn_3d(16, 16, 3, 1, 1)) # convbn_3d(32, 32, 3, 1, 1))\n\n # params: 553664 -> 138592\n self.dres2 = hourglass(16) # self.dres2 = hourglass(32)\n\n # params: 553664 -> 138592\n self.dres3 = hourglass(16) # self.dres3 = hourglass(32)\n\n # params: 553664 -> 138592\n self.dres4 = hourglass(16) # self.dres4 = hourglass(32)\n\n # params: 28576 -> 7376\n self.classif1 = nn.Sequential(convbn_3d(16, 16, 3, 1, 1), # convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n # nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))\n nn.Conv3d(16, 1, kernel_size=3, padding=1, stride=1, bias=False))\n\n # params: 28576 -> 7376\n self.classif2 = nn.Sequential(convbn_3d(16, 16, 3, 1, 1), # convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n #nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))\n nn.Conv3d(16, 1, kernel_size=3, padding=1, stride=1, bias=False))\n\n # params: 28576 -> 7376\n self.classif3 = nn.Sequential(convbn_3d(16, 16, 3, 1, 1), # convbn_3d(32, 32, 3, 1, 1),\n nn.ReLU(inplace=True),\n #nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))\n nn.Conv3d(16, 1, kernel_size=3, padding=1, stride=1, bias=False))\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.Conv3d):\n n = m.kernel_size[0] * m.kernel_size[1] * \\\n m.kernel_size[2] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n\n def forward(self, left, right):\n\n refimg_fea = self.feature_extraction(left)\n targetimg_fea = self.feature_extraction(right)\n \n # matching\n cost = Variable(torch.FloatTensor(refimg_fea.size()[0], refimg_fea.size()[\n 1]*2, self.maxdisp//4, refimg_fea.size()[2], refimg_fea.size()[3]).zero_()).cuda()\n \n for i in range(self.maxdisp//4):\n if i > 0:\n cost[:, :refimg_fea.size()[1], i, :, i:] = refimg_fea[:, :, :, i:]\n cost[:, refimg_fea.size()[1]:, i, :,\n i:] = targetimg_fea[:, :, :, :-i]\n else:\n cost[:, :refimg_fea.size()[1], i, :, :] = refimg_fea\n cost[:, refimg_fea.size()[1]:, i, :, :] = targetimg_fea\n cost = cost.contiguous()\n \n cost0 = self.dres0(cost)\n \n cost0 = self.dres1(cost0) + cost0\n \n out1, pre1, post1 = self.dres2(cost0, None, None)\n out1 = out1+cost0\n \n out2, pre2, post2 = self.dres3(out1, pre1, post1)\n out2 = out2+cost0\n \n out3, pre3, post3 = self.dres4(out2, pre1, post2)\n out3 = out3+cost0\n \n cost1 = self.classif1(out1)\n cost2 = self.classif2(out2) + cost1\n cost3 = self.classif3(out3) + cost2\n \n if self.training:\n cost1 = F.upsample(cost1, [self.maxdisp, left.size()[\n 2], left.size()[3]], mode='trilinear')\n cost2 = F.upsample(cost2, [self.maxdisp, left.size()[\n 2], left.size()[3]], mode='trilinear')\n\n cost1 = torch.squeeze(cost1, 1)\n pred1 = F.softmax(cost1, dim=1)\n pred1 = disparityregression(self.maxdisp)(pred1)\n\n cost2 = torch.squeeze(cost2, 1)\n pred2 = F.softmax(cost2, dim=1)\n pred2 = disparityregression(self.maxdisp)(pred2)\n\n cost3 = F.upsample(cost3, [self.maxdisp, left.size()[\n 2], left.size()[3]], mode='trilinear')\n cost3 = torch.squeeze(cost3, 1)\n pred3 = F.softmax(cost3, dim=1)\n \n # For your information: This formulation 'softmax(c)' learned \"similarity\"\n # while 'softmax(-c)' learned 'matching cost' as mentioned in the paper.\n # However, 'c' or '-c' do not affect the performance because feature-based cost volume provided flexibility.\n pred3 = disparityregression(self.maxdisp)(pred3)\n \n if self.training:\n return pred1, pred2, pred3\n else:\n return pred3\n \n" ]
[ [ "torch.nn.functional.softmax", "torch.nn.ConvTranspose3d", "torch.nn.Conv3d", "torch.nn.functional.relu", "torch.nn.ReLU", "torch.nn.BatchNorm3d", "torch.squeeze" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
smilence86/A-Light-and-Fast-Face-Detector-for-Edge-Devices
[ "cea550ae7999394da9c02ec15cb58b44c154e306" ]
[ "license_plate_detection/accuracy_evaluation/predict.py" ]
[ "# coding: utf-8\nimport sys\nimport os\nimport numpy\nimport cv2\n\n# empty data batch class for dynamical properties\nclass DataBatch:\n pass\n\n\ndef NMS(boxes, overlap_threshold):\n '''\n\n :param boxes: numpy nx5, n is the number of boxes, 0:4->x1, y1, x2, y2, 4->score\n :param overlap_threshold:\n :return:\n '''\n if boxes.shape[0] == 0:\n return boxes\n\n # if the bounding boxes integers, convert them to floats --\n # this is important since we'll be doing a bunch of divisions\n if boxes.dtype != numpy.float32:\n boxes = boxes.astype(numpy.float32)\n\n # initialize the list of picked indexes\n pick = []\n # grab the coordinates of the bounding boxes\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n sc = boxes[:, 4]\n widths = x2 - x1\n heights = y2 - y1\n\n # compute the area of the bounding boxes and sort the bounding\n # boxes by the bottom-right y-coordinate of the bounding box\n area = heights * widths\n idxs = numpy.argsort(sc) # 从小到大排序\n\n # keep looping while some indexes still remain in the indexes list\n while len(idxs) > 0:\n # grab the last index in the indexes list and add the\n # index value to the list of picked indexes\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n\n # compare secend highest score boxes\n xx1 = numpy.maximum(x1[i], x1[idxs[:last]])\n yy1 = numpy.maximum(y1[i], y1[idxs[:last]])\n xx2 = numpy.minimum(x2[i], x2[idxs[:last]])\n yy2 = numpy.minimum(y2[i], y2[idxs[:last]])\n\n # compute the width and height of the bo( box\n w = numpy.maximum(0, xx2 - xx1 + 1)\n h = numpy.maximum(0, yy2 - yy1 + 1)\n\n # compute the ratio of overlap\n overlap = (w * h) / area[idxs[:last]]\n\n # delete all indexes from the index list that have\n idxs = numpy.delete(idxs, numpy.concatenate(([last], numpy.where(overlap > overlap_threshold)[0])))\n\n # return only the bounding boxes that were picked using the\n # integer data type\n return boxes[pick]\n\n\nclass Predict(object):\n\n def __init__(self,\n mxnet,\n symbol_file_path,\n model_file_path,\n ctx,\n receptive_field_list,\n receptive_field_stride,\n bbox_small_list,\n bbox_large_list,\n receptive_field_center_start,\n num_output_scales\n ):\n self.mxnet = mxnet\n self.symbol_file_path = symbol_file_path\n self.model_file_path = model_file_path\n self.ctx = ctx\n\n self.receptive_field_list = receptive_field_list\n self.receptive_field_stride = receptive_field_stride\n self.bbox_small_list = bbox_small_list\n self.bbox_large_list = bbox_large_list\n self.receptive_field_center_start = receptive_field_center_start\n self.num_output_scales = num_output_scales\n self.constant = [i / 2.0 for i in self.receptive_field_list]\n self.input_height = 480\n self.input_width = 640\n self.__load_model()\n\n def __load_model(self):\n # load symbol and parameters\n print('----> load symbol file: %s\\n----> load model file: %s' % (self.symbol_file_path, self.model_file_path))\n if not os.path.exists(self.symbol_file_path):\n print('The symbol file does not exist!!!!')\n sys.exit(1)\n if not os.path.exists(self.model_file_path):\n print('The model file does not exist!!!!')\n sys.exit(1)\n self.symbol_net = self.mxnet.symbol.load(self.symbol_file_path)\n data_name = 'data'\n data_name_shape = (data_name, (1, 3, self.input_height, self.input_width))\n self.module = self.mxnet.module.Module(symbol=self.symbol_net,\n data_names=[data_name],\n label_names=None,\n context=self.ctx,\n work_load_list=None)\n self.module.bind(data_shapes=[data_name_shape],\n for_training=False)\n\n save_dict = self.mxnet.nd.load(self.model_file_path)\n self.arg_name_arrays = dict()\n self.arg_name_arrays['data'] = self.mxnet.nd.zeros((1, 3, self.input_height, self.input_width), self.ctx)\n self.aux_name_arrays = {}\n for k, v in save_dict.items():\n tp, name = k.split(':', 1)\n if tp == 'arg':\n self.arg_name_arrays.update({name: v.as_in_context(self.ctx)})\n if tp == 'aux':\n self.aux_name_arrays.update({name: v.as_in_context(self.ctx)})\n self.module.init_params(arg_params=self.arg_name_arrays,\n aux_params=self.aux_name_arrays,\n allow_missing=True)\n print('----> Model is loaded successfully.')\n\n def predict(self, image, resize_scale=1., score_threshold=0.8, top_k=100, NMS_threshold=0.3, NMS_flag=True, skip_scale_branch_list=[]):\n\n if image.ndim != 3 or image.shape[2] != 3:\n print('Only RGB images are supported.')\n return None\n\n bbox_collection = []\n\n shorter_side = min(image.shape[:2])\n if shorter_side * resize_scale < 128:\n resize_scale = float(128) / shorter_side\n\n input_image = cv2.resize(image, (0, 0), fx=resize_scale, fy=resize_scale)\n\n input_image = input_image.astype(dtype=numpy.float32)\n input_image = input_image[:, :, :, numpy.newaxis]\n input_image = input_image.transpose([3, 2, 0, 1])\n\n data_batch = DataBatch()\n data_batch.data = [self.mxnet.ndarray.array(input_image, self.ctx)]\n\n self.module.forward(data_batch=data_batch, is_train=False)\n results = self.module.get_outputs()\n outputs = []\n for output in results:\n outputs.append(output.asnumpy())\n\n for i in range(self.num_output_scales):\n if i in skip_scale_branch_list:\n continue\n\n score_map = numpy.squeeze(outputs[i * 2], (0, 1))\n\n score_map_show = score_map * 255\n score_map_show[score_map_show < 0] = 0\n score_map_show[score_map_show > 255] = 255\n cv2.imshow('score_map' + str(i), cv2.resize(score_map_show.astype(dtype=numpy.uint8), (0, 0), fx=2, fy=2))\n cv2.waitKey()\n\n bbox_map = numpy.squeeze(outputs[i * 2 + 1], 0)\n\n RF_center_Xs = numpy.array([self.receptive_field_center_start[i] + self.receptive_field_stride[i] * x for x in range(score_map.shape[1])])\n RF_center_Xs_mat = numpy.tile(RF_center_Xs, [score_map.shape[0], 1])\n RF_center_Ys = numpy.array([self.receptive_field_center_start[i] + self.receptive_field_stride[i] * y for y in range(score_map.shape[0])])\n RF_center_Ys_mat = numpy.tile(RF_center_Ys, [score_map.shape[1], 1]).T\n\n x_lt_mat = RF_center_Xs_mat - bbox_map[0, :, :] * self.constant[i]\n y_lt_mat = RF_center_Ys_mat - bbox_map[1, :, :] * self.constant[i]\n x_rb_mat = RF_center_Xs_mat - bbox_map[2, :, :] * self.constant[i]\n y_rb_mat = RF_center_Ys_mat - bbox_map[3, :, :] * self.constant[i]\n\n x_lt_mat = x_lt_mat / resize_scale\n x_lt_mat[x_lt_mat < 0] = 0\n y_lt_mat = y_lt_mat / resize_scale\n y_lt_mat[y_lt_mat < 0] = 0\n x_rb_mat = x_rb_mat / resize_scale\n x_rb_mat[x_rb_mat > image.shape[1]] = image.shape[1]\n y_rb_mat = y_rb_mat / resize_scale\n y_rb_mat[y_rb_mat > image.shape[0]] = image.shape[0]\n\n select_index = numpy.where(score_map > score_threshold)\n for idx in range(select_index[0].size):\n bbox_collection.append((x_lt_mat[select_index[0][idx], select_index[1][idx]],\n y_lt_mat[select_index[0][idx], select_index[1][idx]],\n x_rb_mat[select_index[0][idx], select_index[1][idx]],\n y_rb_mat[select_index[0][idx], select_index[1][idx]],\n score_map[select_index[0][idx], select_index[1][idx]]))\n\n # NMS\n bbox_collection = sorted(bbox_collection, key=lambda item: item[-1], reverse=True)\n if len(bbox_collection) > top_k:\n bbox_collection = bbox_collection[0:top_k]\n bbox_collection_numpy = numpy.array(bbox_collection, dtype=numpy.float32)\n\n if NMS_flag:\n final_bboxes = NMS(bbox_collection_numpy, NMS_threshold)\n final_bboxes_ = []\n for i in range(final_bboxes.shape[0]):\n final_bboxes_.append((final_bboxes[i, 0], final_bboxes[i, 1], final_bboxes[i, 2], final_bboxes[i, 3], final_bboxes[i, 4]))\n\n return final_bboxes_\n else:\n return bbox_collection_numpy\n\n\ndef run_prediction_pickle():\n from config_farm import configuration_64_512_16L_3scales_v1 as cfg\n import mxnet\n\n data_pickle_file_path = '../data_provider_farm/data_folder/data_list_CCPD_train_debug.pkl'\n from data_provider_farm.pickle_provider import PickleProvider\n pickle_provider = PickleProvider(data_pickle_file_path)\n positive_index = pickle_provider.positive_index\n negative_index = pickle_provider.negative_index\n all_index = positive_index #+negative_index\n print(\"num of positive: %d\\nnum of negative: %d\" % (len(positive_index), len(negative_index)))\n import random\n random.shuffle(all_index)\n\n symbol_file_path = '../symbol_farm/symbol_64_512_16L_3scales_v1_deploy.json'\n model_file_path = '../saved_model/configuration_64_512_16L_3scales_v1_2019-09-29-13-41-44/train_64_512_16L_3scales_v1_iter_600000.params'\n my_predictor = Predict(mxnet=mxnet,\n symbol_file_path=symbol_file_path,\n model_file_path=model_file_path,\n ctx=mxnet.gpu(0),\n receptive_field_list=cfg.param_receptive_field_list,\n receptive_field_stride=cfg.param_receptive_field_stride,\n bbox_small_list=cfg.param_bbox_small_list,\n bbox_large_list=cfg.param_bbox_large_list,\n receptive_field_center_start=cfg.param_receptive_field_center_start,\n num_output_scales=cfg.param_num_output_scales)\n\n for idx in all_index:\n im, _, bboxes_gt = pickle_provider.read_by_index(idx)\n\n bboxes = my_predictor.predict(im, resize_scale=1, score_threshold=0.5, top_k=10000, NMS_threshold=0.5)\n for bbox in bboxes:\n cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)\n\n cv2.imshow('im', im)\n cv2.waitKey()\n\n\ndef run_prediction_folder():\n from config_farm import configuration_64_512_16L_3scales_v1 as cfg\n import mxnet\n\n test_folder = './test_images'\n file_name_list = [file_name for file_name in os.listdir(test_folder) if file_name.lower().endswith('jpg') or file_name.lower().endswith('png')]\n\n symbol_file_path = '../symbol_farm/symbol_64_512_16L_3scales_v1_deploy.json'\n model_file_path = '../saved_model/configuration_64_512_16L_3scales_v1_2019-09-29-13-41-44/train_64_512_16L_3scales_v1_iter_1000000.params'\n my_predictor = Predict(mxnet=mxnet,\n symbol_file_path=symbol_file_path,\n model_file_path=model_file_path,\n ctx=mxnet.gpu(0),\n receptive_field_list=cfg.param_receptive_field_list,\n receptive_field_stride=cfg.param_receptive_field_stride,\n bbox_small_list=cfg.param_bbox_small_list,\n bbox_large_list=cfg.param_bbox_large_list,\n receptive_field_center_start=cfg.param_receptive_field_center_start,\n num_output_scales=cfg.param_num_output_scales)\n\n for file_name in file_name_list:\n im = cv2.imread(os.path.join(test_folder, file_name))\n\n bboxes = my_predictor.predict(im, resize_scale=1, score_threshold=0.5, top_k=10000, NMS_threshold=0.4, NMS_flag=True, skip_scale_branch_list=[])\n for bbox in bboxes:\n cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)\n\n if max(im.shape[:2]) > 1440:\n scale = 1440/max(im.shape[:2])\n im = cv2.resize(im, (0, 0), fx=scale, fy=scale)\n cv2.imshow('im', im)\n cv2.waitKey()\n cv2.imwrite('./test_images/'+file_name+'_result.jpg', im)\n\n\nif __name__ == '__main__':\n # run_prediction_pickle()\n run_prediction_folder()\n" ]
[ [ "numpy.maximum", "numpy.minimum", "numpy.squeeze", "numpy.tile", "numpy.argsort", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
arunbonagiri190/Cat-Dog-Classifier
[ "fe87a6547f423a6082366a07245b9c8b6bd288a1" ]
[ "app.py" ]
[ "import torch\nimport torchvision.transforms as transforms\nimport model\nfrom PIL import Image\nimport sys\n\nDIR=\"data/models/\"\nMODEL=\"model-100-epochs-adam-0003-lr-cpu.pth\"\n\n\ndef get_model(PATH, model):\n device = torch.device('cpu')\n model.load_state_dict(torch.load(PATH, map_location=device))\n model.eval()\n return model\n\ndef load_img(PATH):\n img = Image.open(PATH)\n img.load()\n return img\n\ndef load_apply_preprocessing(PATH):\n\n test_transforms = transforms.Compose([\n\n transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5],\n [0.5, 0.5, 0.5])])\n img = load_img(PATH)\n img = test_transforms(img)\n img = torch.unsqueeze(img, 0)\n return img\n\ndef predict(model, img):\n with torch.no_grad():\n pred = model(img)\n \n idx = pred.argmax()\n prob = torch.nn.functional.softmax(pred, dim=1)[0][idx].item()\n res = (f\"Cat {prob}%\") if pred.argmax()==0 else (f\"Dog {prob}%\")\n return res\n\nif __name__ == \"__main__\":\n\n sample_img = sys.argv[1] #\"data/cat.jpg\"\n model = model.Classifier()\n model = get_model(DIR+MODEL, model)\n \n img = load_apply_preprocessing(sample_img)\n result = predict(model, img)\n print(\"Image:\",sample_img,\" ---> \",result)" ]
[ [ "torch.nn.functional.softmax", "torch.load", "torch.unsqueeze", "torch.no_grad", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sdss/astra_thecannon
[ "3062025aa2ac3b8af257490be63201587b23762d" ]
[ "python/astra_thecannon/continuum.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nContinuum-normalization.\n\"\"\"\n\nfrom __future__ import (division, print_function, absolute_import,\n unicode_literals)\n\n__all__ = [\"normalize\", \"sines_and_cosines\"]\n\nimport numpy as np\nimport os\nfrom warnings import warn\n\n\ndef _continuum_design_matrix(dispersion, L, order):\n \"\"\"\n Build a design matrix for the continuum determination, using sines and\n cosines.\n\n :param dispersion:\n An array of dispersion points.\n\n :param L:\n The length-scale for the sine and cosine functions.\n\n :param order:\n The number of sines and cosines to use in the fit.\n \"\"\"\n\n L, dispersion = float(L), np.array(dispersion)\n scale = 2 * (np.pi / L)\n return np.vstack([\n np.ones_like(dispersion).reshape((1, -1)), \n np.array([\n [np.cos(o * scale * dispersion), np.sin(o * scale * dispersion)] \\\n for o in range(1, order + 1)]).reshape((2 * order, dispersion.size))\n ])\n\n\ndef sines_and_cosines(dispersion, flux, ivar, continuum_pixels, L=1400, order=3, \n regions=None, fill_value=1.0, **kwargs):\n \"\"\"\n Fit the flux values of pre-defined continuum pixels using a sum of sine and\n cosine functions.\n\n :param dispersion:\n The dispersion values.\n\n :param flux:\n The flux values for all pixels, as they correspond to the `dispersion`\n array.\n\n :param ivar:\n The inverse variances for all pixels, as they correspond to the\n `dispersion` array.\n\n :param continuum_pixels:\n A mask that selects pixels that should be considered as 'continuum'.\n\n :param L: [optional]\n The length scale for the sines and cosines.\n\n :param order: [optional]\n The number of sine/cosine functions to use in the fit.\n\n :param regions: [optional]\n Specify sections of the spectra that should be fitted separately in each\n star. This may be due to gaps between CCDs, or some other physically-\n motivated reason. These values should be specified in the same units as\n the `dispersion`, and should be given as a list of `[(start, end), ...]`\n values. For example, APOGEE spectra have gaps near the following\n wavelengths which could be used as `regions`:\n\n >> regions = ([15090, 15822], [15823, 16451], [16452, 16971])\n\n :param fill_value: [optional]\n The continuum value to use for when no continuum was calculated for that\n particular pixel (e.g., the pixel is outside of the `regions`).\n\n :param full_output: [optional]\n If set as True, then a metadata dictionary will also be returned.\n\n :returns:\n The continuum values for all pixels, and a dictionary that contains \n metadata about the fit.\n \"\"\"\n\n scalar = kwargs.pop(\"__magic_scalar\", 1e-6) # MAGIC\n flux, ivar = np.atleast_2d(flux), np.atleast_2d(ivar)\n\n if regions is None:\n regions = [(dispersion[0], dispersion[-1])]\n\n region_masks = []\n region_matrices = []\n continuum_masks = []\n continuum_matrices = []\n pixel_included_in_regions = np.zeros_like(flux).astype(int)\n for i, (start, end) in enumerate(regions):\n\n # Build the masks for this region.\n si, ei = np.searchsorted(dispersion, (start, end))\n region_mask = (end >= dispersion) * (dispersion >= start)\n region_masks.append(region_mask)\n pixel_included_in_regions[:, region_mask] += 1\n\n continuum_masks.append(continuum_pixels[\n (ei >= continuum_pixels) * (continuum_pixels >= si)])\n\n # Build the design matrices for this region.\n region_matrices.append(\n _continuum_design_matrix(dispersion[region_masks[-1]], L, order))\n continuum_matrices.append(\n _continuum_design_matrix(dispersion[continuum_masks[-1]], L, order))\n\n # TODO: ISSUE: Check for overlapping regions and raise an warning.\n\n # Check for non-zero pixels (e.g. ivar > 0) that are not included in a\n # region. We should warn about this very loudly!\n warn_on_pixels = (pixel_included_in_regions == 0) * (ivar > 0)\n\n metadata = []\n continuum = np.ones_like(flux) * fill_value\n for i in range(flux.shape[0]):\n\n warn_indices = np.where(warn_on_pixels[i])[0]\n if any(warn_indices):\n # Split by deltas so that we give useful warning messages.\n segment_indices = np.where(np.diff(warn_indices) > 1)[0]\n segment_indices = np.sort(np.hstack(\n [0, segment_indices, segment_indices + 1, len(warn_indices)]))\n segment_indices = segment_indices.reshape(-1, 2)\n\n segments = \", \".join([\"{:.1f} to {:.1f}\".format(\n dispersion[s], dispersion[e], e-s) for s, e in segment_indices])\n\n warn(f\"Some pixels in have measured flux values (e.g., ivar > 0) but are not included \"\n f\"in any specified region ({segments}).\")\n\n # Get the flux and inverse variance for this object.\n object_metadata = []\n object_flux, object_ivar = (flux[i], ivar[i])\n\n # Normalize each region.\n for region_mask, region_matrix, continuum_mask, continuum_matrix in \\\n zip(region_masks, region_matrices, continuum_masks, continuum_matrices):\n if continuum_mask.size == 0:\n # Skipping..\n object_metadata.append([order, L, fill_value, scalar, [], None])\n continue\n\n # We will fit to continuum pixels only. \n continuum_disp = dispersion[continuum_mask] \n continuum_flux, continuum_ivar \\\n = (object_flux[continuum_mask], object_ivar[continuum_mask])\n\n # Solve for the amplitudes.\n M = continuum_matrix\n MTM = np.dot(M, continuum_ivar[:, None] * M.T)\n MTy = np.dot(M, (continuum_ivar * continuum_flux).T)\n\n eigenvalues = np.linalg.eigvalsh(MTM)\n MTM[np.diag_indices(len(MTM))] += scalar * np.max(eigenvalues)\n eigenvalues = np.linalg.eigvalsh(MTM)\n condition_number = max(eigenvalues)/min(eigenvalues)\n\n amplitudes = np.linalg.solve(MTM, MTy)\n continuum[i, region_mask] = np.dot(region_matrix.T, amplitudes)\n object_metadata.append(\n (order, L, fill_value, scalar, amplitudes, condition_number))\n\n metadata.append(object_metadata)\n\n return (continuum, metadata) \n \n\ndef normalize(dispersion, flux, ivar, continuum_regions=None, L=1400, order=3, \n regions=([3000, 10000], [15090, 15822], [15823, 16451], [16452, 16971]), \n fill_value=1.0, **kwargs):\n \"\"\"\n Pseudo-continuum-normalize the flux using a defined set of continuum pixels\n and a sum of sine and cosine functions.\n\n :param dispersion:\n The dispersion values.\n\n :param flux:\n The flux values for all pixels, as they correspond to the `dispersion`\n array.\n\n :param ivar:\n The inverse variances for all pixels, as they correspond to the\n `dispersion` array.\n\n :param continuum_regions: [optional]\n A list of two-length tuples that describe the start and end of regions\n that should be treated as continuum.\n\n :param L: [optional]\n The length scale for the sines and cosines.\n\n :param order: [optional]\n The number of sine/cosine functions to use in the fit.\n\n :param regions: [optional]\n Specify sections of the spectra that should be fitted separately in each\n star. This may be due to gaps between CCDs, or some other physically-\n motivated reason. These values should be specified in the same units as\n the `dispersion`, and should be given as a list of `[(start, end), ...]`\n values. For example, APOGEE spectra have gaps near the following\n wavelengths which could be used as `regions`:\n\n >> regions = ([15090, 15822], [15823, 16451], [16452, 16971])\n\n :param fill_value: [optional]\n The continuum value to use for when no continuum was calculated for that\n particular pixel (e.g., the pixel is outside of the `regions`).\n\n :param full_output: [optional]\n If set as True, then a metadata dictionary will also be returned.\n\n :returns:\n The continuum values for all pixels, and a dictionary that contains \n metadata about the fit.\n \"\"\"\n\n if continuum_regions is None:\n default_path = os.path.join(os.path.dirname(__file__), \"etc/continuum-regions.list\")\n continuum_regions = np.loadtxt(default_path)\n\n # Work out the continuum pixels.\n mask = np.zeros(dispersion.size, dtype=bool)\n for start, end in dispersion.searchsorted(continuum_regions):\n mask[start:end] = True\n\n continuum_pixels = np.arange(dispersion.size)[mask]\n\n continuum, metadata = sines_and_cosines(dispersion, flux, ivar, \n continuum_pixels, L=L, order=order, regions=regions,\n fill_value=fill_value, **kwargs)\n\n normalized_flux = flux/continuum\n normalized_ivar = continuum * ivar * continuum\n normalized_flux[normalized_ivar == 0] = 1.0\n \n non_finite_pixels = ~np.isfinite(normalized_flux)\n normalized_flux[non_finite_pixels] = 1.0\n normalized_ivar[non_finite_pixels] = 0.0\n\n return (normalized_flux, normalized_ivar, continuum, metadata)\n\n\n" ]
[ [ "numpy.dot", "numpy.linalg.solve", "numpy.ones_like", "numpy.isfinite", "numpy.arange", "numpy.cos", "numpy.sin", "numpy.atleast_2d", "numpy.max", "numpy.zeros_like", "numpy.diff", "numpy.searchsorted", "numpy.linalg.eigvalsh", "numpy.array", "numpy.zeros", "numpy.where", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kerkelae/dkmri
[ "af07880faa09b007d7ea56018ab9dbd9ae1ca223" ]
[ "dkmri/tests/test_dkmri.py" ]
[ "import numpy as np\nimport numpy.testing as npt\n\nimport dkmri\n\n\nSEED = 123\n\nparams = np.array(\n [\n 7.90764792,\n 0.88660664,\n 0.82186469,\n 0.81741033,\n 0.25016042,\n 0.12341918,\n 0.28344717,\n 0.97744794,\n 0.64809536,\n 0.54047796,\n 0.09333558,\n -0.06614247,\n 0.07547532,\n 0.16822022,\n 0.12438352,\n 0.14840455,\n 0.16173709,\n 0.17534938,\n 0.42078548,\n -0.05851049,\n 0.07203667,\n 0.12034342,\n ]\n)\n\n\ndef test_design_matrix():\n bvals = np.arange(5)\n bvecs = np.array(\n [\n [1.0, 0.0, 0.0],\n [1.0, 0.0, 0.0],\n [1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 1.0],\n ]\n )\n desired_X = np.array(\n [\n [1.0, 1.0, 1.0, 1.0, 1.0],\n [0.0, -1.0, -2.0, -0.0, -0.0],\n [0.0, -0.0, -0.0, -3.0, -0.0],\n [0.0, -0.0, -0.0, -0.0, -4.0],\n [0.0, -0.0, -0.0, -0.0, -0.0],\n [0.0, -0.0, -0.0, -0.0, -0.0],\n [0.0, -0.0, -0.0, -0.0, -0.0],\n [0.0, 1 / 6, 2 / 3, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.5, 0.0],\n [0.0, 0.0, 0.0, 0.0, 8 / 3],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0],\n ]\n ).T\n X = dkmri.design_matrix(bvals, bvecs)\n npt.assert_almost_equal(X, desired_X)\n\n\ndef test_params_to_D():\n desired_D = np.array(\n [\n [0.88660664, 0.25016042, 0.12341918],\n [0.25016042, 0.82186469, 0.28344717],\n [0.12341918, 0.28344717, 0.81741033],\n ]\n )\n D = dkmri.params_to_D(params)\n npt.assert_almost_equal(D, desired_D)\n\n\ndef test_params_to_W():\n desired_W = np.array(\n [\n [\n [\n [1.37882815, 0.131663, -0.09330328],\n [0.131663, 0.22815298, -0.0825373],\n [-0.09330328, -0.0825373, 0.24735503],\n ],\n [\n [0.131663, 0.22815298, -0.0825373],\n [0.22815298, 0.10646858, 0.10161789],\n [-0.0825373, 0.10161789, 0.16976136],\n ],\n [\n [-0.09330328, -0.0825373, 0.24735503],\n [-0.0825373, 0.10161789, 0.16976136],\n [0.24735503, 0.16976136, 0.17546049],\n ],\n ],\n [\n [\n [0.131663, 0.22815298, -0.0825373],\n [0.22815298, 0.10646858, 0.10161789],\n [-0.0825373, 0.10161789, 0.16976136],\n ],\n [\n [0.22815298, 0.10646858, 0.10161789],\n [0.10646858, 0.9142299, 0.23729835],\n [0.10161789, 0.23729835, 0.59357726],\n ],\n [\n [-0.0825373, 0.10161789, 0.16976136],\n [0.10161789, 0.23729835, 0.59357726],\n [0.16976136, 0.59357726, 0.20934554],\n ],\n ],\n [\n [\n [-0.09330328, -0.0825373, 0.24735503],\n [-0.0825373, 0.10161789, 0.16976136],\n [0.24735503, 0.16976136, 0.17546049],\n ],\n [\n [-0.0825373, 0.10161789, 0.16976136],\n [0.10161789, 0.23729835, 0.59357726],\n [0.16976136, 0.59357726, 0.20934554],\n ],\n [\n [0.24735503, 0.16976136, 0.17546049],\n [0.16976136, 0.59357726, 0.20934554],\n [0.17546049, 0.20934554, 0.76242038],\n ],\n ],\n ]\n )\n W = dkmri.params_to_W(params)\n npt.assert_almost_equal(W, desired_W)\n\n\ndef test_tensors_to_params():\n S0 = np.exp(params[..., 0])\n D = dkmri.params_to_D(params)\n W = dkmri.params_to_W(params)\n npt.assert_almost_equal(dkmri.tensors_to_params(S0, D, W), params)\n return\n\n\ndef test__adc():\n np.random.seed(SEED)\n D = dkmri.params_to_D(params)\n for _ in range(100):\n v = np.random.random((1, 3)) - 0.5\n v /= np.linalg.norm(v)\n desired_adc = (v @ D @ v.T)[0]\n adc = np.asarray(dkmri._adc(params, v))\n npt.assert_almost_equal(adc, desired_adc)\n vs = np.vstack((v, v))\n adcs = np.asarray(dkmri._adc(params, vs))\n npt.assert_almost_equal(adcs[0], adc)\n npt.assert_almost_equal(adcs[1], adc)\n\n\ndef test_params_to_md():\n desired_md = 0.8419605533333335\n md = dkmri.params_to_md(params)\n npt.assert_almost_equal(md, desired_md)\n\n\ndef test_params_to_ad():\n desired_ad = 1.2839527280964818\n ad = dkmri.params_to_ad(params)\n npt.assert_almost_equal(ad, desired_ad)\n\n\ndef test_params_to_rd():\n desired_rd = 0.6209644659517595\n rd = dkmri.params_to_rd(params)\n npt.assert_almost_equal(rd, desired_rd)\n\n\ndef test_params_to_fa():\n desired_fa = 0.4425100287524919\n fa = dkmri.params_to_fa(params)\n npt.assert_almost_equal(fa, desired_fa)\n\n\ndef test__akc():\n np.random.seed(SEED)\n D = dkmri.params_to_D(params)\n W = dkmri.params_to_W(params)\n for _ in range(100):\n v = np.random.random((1, 3)) - 0.5\n v /= np.linalg.norm(v)\n md = dkmri.params_to_md(params)\n adc = dkmri._adc(params, v)\n desired_akc = (md / adc) ** 2 * v[0] @ (v[0] @ W @ v[0]) @ v[0]\n akc = np.asarray(dkmri._akc(params, v))\n npt.assert_almost_equal(akc, desired_akc)\n vs = np.vstack((v, v))\n akcs = np.asarray(dkmri._akc(params, vs))\n npt.assert_almost_equal(akcs[0], akc)\n npt.assert_almost_equal(akcs[1], akc)\n\n\ndef test_params_to_mk():\n desired_mk = 1.1124342668323295\n mk = dkmri.params_to_mk(params)\n npt.assert_almost_equal(mk, desired_mk)\n\n\ndef test_params_to_ak():\n desired_ak = 0.7109767625600302\n ak = dkmri.params_to_ak(params)\n npt.assert_almost_equal(ak, desired_ak)\n\n\ndef test_params_to_rk():\n desired_rk = 1.5180490434619633\n rk = dkmri.params_to_rk(params)\n npt.assert_almost_equal(rk, desired_rk)\n\n\ndef test__mtk():\n desired_mtk = 1.0387297963232285\n mtk = dkmri._mtk(params)\n npt.assert_almost_equal(mtk, desired_mtk)\n" ]
[ [ "numpy.random.random", "numpy.random.seed", "numpy.arange", "numpy.linalg.norm", "numpy.testing.assert_almost_equal", "numpy.array", "numpy.exp", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
martinfleis/transbigdata
[ "520cb59dd857ac1e30d904aabda1b76addf9354d" ]
[ "src/transbigdata/grids.py" ]
[ "import geopandas as gpd \nimport pandas as pd\nfrom shapely.geometry import Polygon,Point\nimport math \nimport numpy as np\ndef rect_grids(bounds,accuracy = 500):\n '''\n 生成研究范围内的方形栅格\n\n 输入\n -------\n bounds : List\n 生成范围的边界,[lon1,lat1,lon2,lat2] (WGS84坐标系) 其中,lon1,lat1是左下角坐标,lon2,lat2是右上角坐标 \n accuracy : number\n 栅格大小(米)\n \n\n 输出\n -------\n grid : GeoDataFrame\n 栅格的GeoDataFrame,其中LONCOL与LATCOL为栅格的编号,HBLON与HBLAT为栅格的中心点坐标 \n params : List\n 栅格参数(lonStart,latStart,deltaLon,deltaLat),分别为栅格左下角坐标与单个栅格的经纬度长宽\n '''\n #导入math包 \n #划定栅格划分范围\n lon1 = bounds[0]\n lat1 = bounds[1]\n lon2 = bounds[2]\n lat2 = bounds[3]\n #取得左下角的经纬度 \n latStart = min(lat1, lat2); \n lonStart = min(lon1, lon2); \n #计算栅格的经纬度增加量大小▲Lon和▲Lat,地球半径取6371004米 \n deltaLon = accuracy * 360 / (2 * math.pi * 6371004 * math.cos((lat1 + lat2) * math.pi / 360)); \n deltaLat = accuracy * 360 / (2 * math.pi * 6371004); \n #定义空的GeoDataFrame表,再往里加栅格 \n data = gpd.GeoDataFrame() \n #定义空的list,后面循环一次就往里面加东西 \n LONCOL_list = [] \n LATCOL_list = [] \n geometry_list = [] \n HBLON_list = [] \n HBLAT_list = [] \n #计算行列要生成的栅格数量 \n #lon方向是lonsnum个栅格 \n lonsnum = int((lon2-lon1)/deltaLon)+1 \n #lat方向是latsnum个栅格 \n latsnum = int((lat2-lat1)/deltaLat)+1 \n for i in range(lonsnum): \n for j in range(latsnum): \n #第i列,第j行的栅格中心点坐标 \n HBLON = i*deltaLon + lonStart \n HBLAT = j*deltaLat + latStart \n #用周围的栅格推算三个顶点的位置\n HBLON_1 = (i+1)*deltaLon + lonStart \n HBLAT_1 = (j+1)*deltaLat + latStart \n #生成栅格的Polygon形状 \n grid_ij = Polygon([ \n (HBLON-deltaLon/2,HBLAT-deltaLat/2), \n (HBLON_1-deltaLon/2,HBLAT-deltaLat/2), \n (HBLON_1-deltaLon/2,HBLAT_1-deltaLat/2), \n (HBLON-deltaLon/2,HBLAT_1-deltaLat/2)]) \n #把生成的数据都加入到前面定义的空list里面 \n LONCOL_list.append(i) \n LATCOL_list.append(j) \n HBLON_list.append(HBLON) \n HBLAT_list.append(HBLAT) \n geometry_list.append(grid_ij) \n #为geopandas文件的每一列赋值为刚刚的list \n data['LONCOL'] = LONCOL_list \n data['LATCOL'] = LATCOL_list \n data['HBLON'] = HBLON_list \n data['HBLAT'] = HBLAT_list \n data['geometry'] = geometry_list \n params = (lonStart,latStart,deltaLon,deltaLat)\n return data,params \n\ndef grid_params(bounds,accuracy = 500):\n '''\n 栅格参数获取\n\n 输入\n -------\n bounds : List\n 生成范围的边界,[lon1,lat1,lon2,lat2] (WGS84坐标系) 其中,lon1,lat1是左下角坐标,lon2,lat2是右上角坐标 \n accuracy : number\n 栅格大小(米)\n \n\n 输出\n -------\n params : List\n 栅格参数(lonStart,latStart,deltaLon,deltaLat),分别为栅格左下角坐标与单个栅格的经纬度长宽\n '''\n #划定栅格划分范围\n lon1 = bounds[0]\n lat1 = bounds[1]\n lon2 = bounds[2]\n lat2 = bounds[3]\n #取得左下角的经纬度 \n latStart = min(lat1, lat2); \n lonStart = min(lon1, lon2); \n #计算栅格的经纬度增加量大小▲Lon和▲Lat,地球半径取6371004米 \n deltaLon = accuracy * 360 / (2 * math.pi * 6371004 * math.cos((lat1 + lat2) * math.pi / 360)); \n deltaLat = accuracy * 360 / (2 * math.pi * 6371004); \n return (lonStart,latStart,deltaLon,deltaLat)\n\ndef GPS_to_grids(lon,lat,params):\n '''\n GPS数据对应栅格编号。输入数据的经纬度列与栅格参数,输出对应的栅格编号\n\n 输入\n -------\n lon : Series\n 经度列\n lat : Series\n 纬度列\n params : List\n 栅格参数(lonStart,latStart,deltaLon,deltaLat),分别为栅格左下角坐标与单个栅格的经纬度长宽\n \n 输出\n -------\n LONCOL : Series\n 经度栅格编号列\n LATCOL : Series\n 纬度栅格编号列\n '''\n (lonStart,latStart,deltaLon,deltaLat) = params\n loncol = ((lon - (lonStart - deltaLon / 2))/deltaLon).astype('int') \n latcol = ((lat - (latStart - deltaLat / 2))/deltaLat).astype('int') \n return loncol,latcol\ndef grids_centre(loncol,latcol,params):\n '''\n 栅格编号对应栅格中心点经纬度。输入数据的栅格编号与栅格参数,输出对应的栅格中心点\n\n 输入\n -------\n LONCOL : Series\n 经度栅格编号列\n LATCOL : Series\n 纬度栅格编号列\n params : List\n 栅格参数(lonStart,latStart,deltaLon,deltaLat),分别为栅格左下角坐标与单个栅格的经纬度长宽\n \n 输出\n -------\n HBLON : Series\n 栅格中心点经度列\n HBLAT : Series\n 栅格中心点纬度列\n '''\n (lonStart,latStart,deltaLon,deltaLat) = params\n hblon = loncol*deltaLon + lonStart #格子编号*格子宽+起始横坐标=格子中心横坐标 \n hblat = latcol*deltaLat + latStart\n return hblon,hblat\n\ndef gridid_to_polygon(loncol,latcol,params):\n '''\n 栅格编号生成栅格的地理信息列。输入数据的栅格编号与栅格参数,输出对应的地理信息列\n\n 输入\n -------\n LONCOL : Series\n 经度栅格编号列\n LATCOL : Series\n 纬度栅格编号列\n params : List\n 栅格参数(lonStart,latStart,deltaLon,deltaLat),分别为栅格左下角坐标与单个栅格的经纬度长宽\n \n 输出\n -------\n geometry : Series\n 栅格的矢量图形列\n '''\n (lonStart,latStart,deltaLon,deltaLat) = params\n HBLON = loncol*deltaLon + lonStart \n HBLAT = latcol*deltaLat + latStart \n #用周围的栅格推算三个顶点的位置\n HBLON_1 = (loncol+1)*deltaLon + lonStart \n HBLAT_1 = (latcol+1)*deltaLat + latStart \n df = pd.DataFrame()\n df['HBLON'] = HBLON\n df['HBLAT'] = HBLAT\n df['HBLON_1'] = HBLON_1\n df['HBLAT_1'] = HBLAT_1\n return df.apply(lambda r:Polygon([ \n (r['HBLON']-deltaLon/2,r['HBLAT']-deltaLat/2), \n (r['HBLON_1']-deltaLon/2,r['HBLAT']-deltaLat/2), \n (r['HBLON_1']-deltaLon/2,r['HBLAT_1']-deltaLat/2), \n (r['HBLON']-deltaLon/2,r['HBLAT_1']-deltaLat/2)]),axis = 1)\n\ndef hexagon_grids(bounds,accuracy = 500):\n '''\n 生成研究范围内的六边形渔网。\n\n 输入\n -------\n bounds : List\n 生成范围的边界,[lon1,lat1,lon2,lat2] (WGS84坐标系) 其中,lon1,lat1是左下角坐标,lon2,lat2是右上角坐标 \n accuracy : number\n 六边形的边长(米)\n \n 输出\n -------\n hexagon : GeoDataFrame\n 六边形渔网的矢量图形\n ''' \n #划定栅格划分范围\n (lon1,lat1,lon2,lat2) = bounds\n #取得左下角的经纬度 \n latStart = min(lat1, lat2); \n lonStart = min(lon1, lon2); \n latEnd = max(lat1, lat2); \n lonEnd = max(lon1, lon2); \n origin = gpd.GeoDataFrame([Point(lonStart,latStart),Point(lonEnd,latEnd)],columns = ['geometry'])\n origin.crs = {'init':'epsg:4326'}\n origin = origin.to_crs(epsg = 3857)\n x_o = origin['geometry'].iloc[0].x\n y_o = origin['geometry'].iloc[0].y\n x_d = origin['geometry'].iloc[1].x\n y_d = origin['geometry'].iloc[1].y\n\n lonsnum = (x_d-x_o)/accuracy\n latsnum = (y_d-y_o)/accuracy\n #1\n xs = np.arange(0,lonsnum,3)\n ys = np.arange(0,latsnum,2*(3/4)**0.5)\n xs = pd.DataFrame(xs,columns = ['x'])\n xs['tmp'] = 1\n ys = pd.DataFrame(ys,columns = ['y'])\n ys['tmp'] = 1\n df1 = pd.merge(xs,ys)\n #2\n xs = np.arange(1.5,lonsnum,3)\n ys = np.arange((3/4)**0.5,latsnum,2*(3/4)**0.5)\n xs = pd.DataFrame(xs,columns = ['x'])\n xs['tmp'] = 1\n ys = pd.DataFrame(ys,columns = ['y'])\n ys['tmp'] = 1\n df2 = pd.merge(xs,ys)\n df = pd.concat([df1,df2])\n df['x'],df['y'] = x_o+df['x']*accuracy,y_o+df['y']*accuracy\n def get_hexagon(x,y,accuracy):\n return Polygon([(x-accuracy,y),\n (x-accuracy/2,y+accuracy*(3/4)**0.5),\n (x+accuracy/2,y+accuracy*(3/4)**0.5),\n (x+accuracy,y),\n (x+accuracy/2,y-accuracy*(3/4)**0.5),\n (x-accuracy/2,y-accuracy*(3/4)**0.5),\n (x-accuracy,y)\n ]) \n df['geometry'] = df.apply(lambda r:get_hexagon(r['x'],r['y'],accuracy),axis = 1)\n df = gpd.GeoDataFrame(df)\n df.crs = {'init':'epsg:3857'}\n df = df.to_crs(epsg = 4326)\n df = df[['geometry']]\n df['ID'] = range(len(df))\n return df\n\n\ndef gridid_sjoin_shape(data,shape,params,col = ['LONCOL','LATCOL']):\n '''\n 输入数据(带有栅格经纬度编号两列),矢量图形与栅格化参数,输出数据栅格并对应矢量图形。\n \n 输入\n -------\n data : DataFrame\n 数据,(带有栅格经纬度编号两列)\n shape : GeoDataFrame\n 矢量图形\n params : List\n 栅格化参数\n col : List\n 列名,[经度栅格编号,纬度栅格编号]\n\n 输出\n -------\n data1 : DataFrame\n 数据栅格并对应矢量图形\n '''\n LONCOL,LATCOL = col\n data1 = data.copy()\n data1 = gpd.GeoDataFrame(data1)\n data1['geometry'] = gridid_to_polygon(data1[LONCOL],data1[LATCOL],params)\n data1 = gpd.sjoin(data1,shape)\n return data1\n\n" ]
[ [ "numpy.arange", "pandas.merge", "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
NathanKlineInstitute/SMARTAgent
[ "751c880c43d73eca395b5533f6f7fe56bf5816d4" ]
[ "connUtils.py" ]
[ "# neuronal network connection functions\nimport numpy as np\n\n#\ndef gid2pos (numc, startgid, gid):\n nrow = ncol = int(np.sqrt(numc))\n y = int((gid - startgid) / nrow)\n x = (gid - startgid) % ncol\n return (x,y)\n\ndef prob2conv (prob, npre):\n # probability to convergence; prob is connection probability, npre is number of presynaptic neurons\n return int(0.5 + prob * npre)\n\ndef conv2prob (conv, npre):\n # convergence to connection probability\n return conv / npre\n\ndef getconv (cmat, prety, poty, npre):\n # get convergence value from cmat dictionary (uses convergence if specified directly, otherwise uses p to calculate)\n if prety not in cmat: return 0\n if poty not in cmat[prety]: return 0\n if 'conv' in cmat[prety][poty]:\n return cmat[prety][poty]['conv']\n elif 'p' in cmat[prety][poty]:\n return prob2conv(cmat[prety][poty]['p'], npre)\n return 0\n\ndef connectOnePreNtoOneMNeuron (NBNeurons,offset_pre,offset_post):\n #this method is used to generate list of connections between preSynNeurons and motor neurons.\n blist = []\n if NBNeurons < 1: return blist\n for i in range(NBNeurons):\n preN = i+offset_pre\n postN = i+offset_post\n blist.append([preN,postN])\n return blist\n \ndef connectLayerswithOverlap (NBpreN, NBpostN, overlap_xdir,padded_preneurons_xdir,padded_postneurons_xdir):\n blist = []\n connCoords = []\n if NBpreN < 1 or NBpostN < 1: return blist, connCoords\n NBpreN_x = int(np.sqrt(NBpreN))\n NBpreN_y = int(np.sqrt(NBpreN))\n #NBpostN = 6400\t#number of postsynaptic neurons\n NBpostN_x = int(np.sqrt(NBpostN))\n NBpostN_y = int(np.sqrt(NBpostN))\n convergence_factor = NBpreN/NBpostN\n convergence_factor_x = int(np.sqrt(convergence_factor))\n convergence_factor_y = int(np.sqrt(convergence_factor))\n #overlap_xdir = 5\t#number of rows in a window for overlapping connectivity\n #overlap_ydir = 5\t#number of columns in a window for overlapping connectivity\n overlap_ydir = overlap_xdir\n preNIndices = np.zeros((NBpreN_x,NBpreN_y))\n postNIndices = np.zeros((NBpostN_x,NBpostN_y))\t\t#list created for indices from linear (1-6400) to square indexing (1-80,81-160,....) \n for i in range(NBpreN_x):\n for j in range(NBpreN_y):\n preNIndices[i,j]=j+(NBpreN_y*i)\n for i in range(NBpostN_x):\n for j in range(NBpostN_y):\n postNIndices[i,j]=j+(NBpostN_y*i)\n targetOffset_x = int(padded_postneurons_xdir/2)\n targetOffset_y = int(padded_postneurons_xdir/2) # assuming we have pops in square format. so anything x dimension would be same as in y dimension\n sourceOffset_x = int(padded_preneurons_xdir/2)\n sourceOffset_y = int(padded_preneurons_xdir/2) # assuming we have pops in square format. so anything x dimension would be same as in y dimension\n target_postNIndices = list(range(targetOffset_x,NBpostN_x-targetOffset_x,1))\n source_preNIndices = list(range(sourceOffset_x,NBpreN_x-sourceOffset_x,int(convergence_factor_x)))\n for i in range(len(target_postNIndices)):\t\t\t\t#parse the non\n for j in range(len(target_postNIndices)):\n postN = int(postNIndices[target_postNIndices[i],target_postNIndices[j]])\n preN = int(preNIndices[source_preNIndices[i],source_preNIndices[j]]) \n preNIndices[int(i*convergence_factor_y),int(j*convergence_factor_x)]\n preN_ind = np.where(preNIndices==preN)\n #print(preN_ind)\n x0 = preN_ind[0][0] - int(overlap_xdir/2)\n if x0<0:\n x0 = 0\n y0 = preN_ind[1][0] - int(overlap_ydir/2)\n if y0<0:\n y0 = 0\n xlast = preN_ind[0][0] + int(overlap_xdir/2)\n if xlast>NBpreN_x-1:\n xlast = NBpreN_x-1\n ylast = preN_ind[1][0] + int(overlap_ydir/2)\n if ylast>NBpreN_y-1:\n ylast = NBpreN_y-1\n xinds = [x0]\n for _ in range(xlast-x0):\n xinds.append(xinds[-1]+1)\n yinds = [y0]\n for _ in range(ylast-y0):\n yinds.append(yinds[-1]+1)\n for xi in range(len(xinds)):\n for yi in range(len(yinds)):\n preN = int(preNIndices[xinds[xi],yinds[yi]])\n blist.append([preN,postN]) \t\t\t#list of [presynaptic_neuron, postsynaptic_neuron]\n connCoords.append([xinds[xi],yinds[yi],i,j]) # list of coordinates of preN and postN\n return blist, connCoords\n\ndef connectLayerswithOverlapDiv(NBpreN, NBpostN, overlap_xdir,padded_preneurons_xdir,padded_postneurons_xdir):\n blist = []\n connCoords = []\n if NBpreN < 1 or NBpostN < 1: return blist, connCoords\n NBpreN_x = int(np.sqrt(NBpreN))\n NBpreN_y = int(np.sqrt(NBpreN))\n NBpostN_x = int(np.sqrt(NBpostN))\n NBpostN_y = int(np.sqrt(NBpostN))\n divergence_factor = NBpostN/NBpreN\n divergence_factor_x = int(np.sqrt(divergence_factor))\n divergence_factor_y = int(np.sqrt(divergence_factor))\n overlap_ydir = overlap_xdir\n preNIndices = np.zeros((NBpreN_x,NBpreN_y))\n postNIndices = np.zeros((NBpostN_x,NBpostN_y)) #list created for indices from linear (1-6400) to square indexing (1-80,81-160,..) \n targetOffset_x = int(padded_postneurons_xdir/2)\n targetOffset_y = int(padded_postneurons_xdir/2) # assuming we have pops in square format. so anything x dimension would be same as in y dimension\n sourceOffset_x = int(padded_preneurons_xdir/2)\n sourceOffset_y = int(padded_preneurons_xdir/2) # assuming we have pops in square format. so anything x dimension would be same as in y dimension\n target_postNIndices = list(range(targetOffset_x,NBpostN_x-targetOffset_x,int(divergence_factor_x)))\n source_preNIndices = list(range(sourceOffset_x,NBpreN_x-sourceOffset_x,1))\n for i in range(NBpreN_x):\n for j in range(NBpreN_y):\n preNIndices[i,j]=j+(NBpreN_y*i)\n for i in range(NBpostN_x):\n for j in range(NBpostN_y):\n postNIndices[i,j]=j+(NBpostN_y*i)\n for i in range(len(source_preNIndices)):\t\t\t\t#boundary conditions are implemented here\n for j in range(len(source_preNIndices)):\n preN = int(preNIndices[source_preNIndices[i],source_preNIndices[j]])\n postN = int(postNIndices[target_postNIndices[i],target_postNIndices[j]])\n postN_ind = np.where(postNIndices==postN)\n x0 = postN_ind[0][0] - int(overlap_xdir/2)\n if x0<0:\n x0 = 0\n y0 = postN_ind[1][0] - int(overlap_ydir/2)\n if y0<0:\n y0 = 0\n xlast = postN_ind[0][0] + int(overlap_xdir/2)\n if xlast>NBpostN_x-1:\n xlast = NBpostN_x-1\n ylast = postN_ind[1][0] + int(overlap_ydir/2)\n if ylast>NBpostN_y-1:\n ylast = NBpostN_y-1\n xinds = [x0]\n for _ in range(xlast-x0):\n xinds.append(xinds[-1]+1)\n yinds = [y0]\n for _ in range(ylast-y0):\n yinds.append(yinds[-1]+1)\n for xi in range(len(xinds)):\n for yi in range(len(yinds)):\n postN = int(postNIndices[xinds[xi],yinds[yi]])\n blist.append([preN,postN]) #list of [presynaptic_neuron, postsynaptic_neuron]\n connCoords.append([i,j,xinds[xi],yinds[yi]]) # list of coordinates of preN and postN\t\t\t \n return blist, connCoords\n\n# cLV1toEA, cLV1DEtoEA, cLV1DNEtoEA, cLV1DNtoEA, cLV1DNWtoEA, cLV1DWtoEA, cLV1DSWtoEA, cLV1DStoEA, cLV1DSEtoEA = createConnListV1toEA(60,3)\ndef createConnListV1toEA(NBpreN,NBobjs): # This function is hard coded.... Not sure how to make it more generalized.\n RO_neurons = []\n Ball_neurons = []\n RM_neurons = []\n count = 0\n for _ in range(int(NBpreN/NBobjs)): # this is assuming 3 objects represented by RO (Opponent Racket), Ball and RM (Model Racket)\n RO_neurons.append(count)\n Ball_neurons.append(count+1)\n RM_neurons.append(count+2)\n count = count+NBobjs\n # Dir neurons: E, NE, N, NW, W, SW, S, SE\n Dir_neurons = [0,1,2,3,4,5,6,7]\n combs = []\n for ro in RO_neurons:\n for b in Ball_neurons:\n for rm in RM_neurons:\n for dirs in Dir_neurons:\n combs.append([ro,b,rm,dirs])\n connsListV1toEA = []\n connsListV1DEtoEA = []\n connsListV1DNEtoEA = []\n connsListV1DNtoEA = []\n connsListV1DNWtoEA = []\n connsListV1DWtoEA = []\n connsListV1DSWtoEA = []\n connsListV1DStoEA = []\n connsListV1DSEtoEA = []\n postid = 0\n for comb in combs:\n combV1 = comb[0:3] #comb[0:NBobjs] # assuming NBobjs = 3\n combDirV1 = comb[3]\n for ob in combV1: \n connsListV1toEA.append([ob,postid])\n if combDirV1==0: #E\n connsListV1DEtoEA.append([0,postid])\n elif combDirV1==1: #NE\n connsListV1DNEtoEA.append([0,postid])\n elif combDirV1==2: #N\n connsListV1DNtoEA.append([0,postid])\n elif combDirV1==3: #NW\n connsListV1DNWtoEA.append([0,postid])\n elif combDirV1==4: #W\n connsListV1DWtoEA.append([0,postid])\n elif combDirV1==5: #SW\n connsListV1DSWtoEA.append([0,postid])\n elif combDirV1==6: #S\n connsListV1DStoEA.append([0,postid])\n elif combDirV1==7: #SE\n connsListV1DSEtoEA.append([0,postid])\n postid = postid+1 \n return connsListV1toEA, connsListV1DEtoEA, connsListV1DNEtoEA, connsListV1DNtoEA, connsListV1DNWtoEA, connsListV1DWtoEA, connsListV1DSWtoEA, connsListV1DStoEA, connsListV1DSEtoEA\n\n\ndef createConnListV1toEA2(NBpreN,NBobjs): # This function is hard coded.... Not sure how to make it more generalized.\n Ball_neurons = []\n RM_neurons = []\n count = 0\n for _ in range(int(NBpreN/NBobjs)): # this is assuming 3 objects represented by RO (Opponent Racket), Ball and RM (Model Racket)\n Ball_neurons.append(count)\n RM_neurons.append(count+1)\n count = count+NBobjs\n # Dir neurons: E, NE, N, NW, W, SW, S, SE\n Dir_neurons = [0,1,2,3,4,5,6,7]\n combs = []\n for b in Ball_neurons:\n for rm in RM_neurons:\n for dirs in Dir_neurons:\n combs.append([b,rm,dirs])\n connsListV1toEA = []\n connsListV1DEtoEA = []\n connsListV1DNEtoEA = []\n connsListV1DNtoEA = []\n connsListV1DNWtoEA = []\n connsListV1DWtoEA = []\n connsListV1DSWtoEA = []\n connsListV1DStoEA = []\n connsListV1DSEtoEA = []\n postid = 0\n for comb in combs:\n combV1 = comb[0:2] #comb[0:NBobjs] # assuming NBobjs = 2\n combDirV1 = comb[2]\n for ob in combV1: \n connsListV1toEA.append([ob,postid])\n if combDirV1==0: #E\n connsListV1DEtoEA.append([0,postid])\n elif combDirV1==1: #NE\n connsListV1DNEtoEA.append([0,postid])\n elif combDirV1==2: #N\n connsListV1DNtoEA.append([0,postid])\n elif combDirV1==3: #NW\n connsListV1DNWtoEA.append([0,postid])\n elif combDirV1==4: #W\n connsListV1DWtoEA.append([0,postid])\n elif combDirV1==5: #SW\n connsListV1DSWtoEA.append([0,postid])\n elif combDirV1==6: #S\n connsListV1DStoEA.append([0,postid])\n elif combDirV1==7: #SE\n connsListV1DSEtoEA.append([0,postid])\n postid = postid+1 \n return connsListV1toEA, connsListV1DEtoEA, connsListV1DNEtoEA, connsListV1DNtoEA, connsListV1DNWtoEA, connsListV1DWtoEA, connsListV1DSWtoEA, connsListV1DStoEA, connsListV1DSEtoEA\n" ]
[ [ "numpy.where", "numpy.zeros", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tongyao-zhu/virtex
[ "43b33289ffc963b41b6b98affc5e94dfe25e29c8" ]
[ "scripts/clf_linear.py" ]
[ "import argparse\nfrom collections import Counter\nimport os\n\nfrom loguru import logger\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader, DistributedSampler\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom virtex.config import Config\nfrom virtex.factories import (\n DownstreamDatasetFactory,\n PretrainingModelFactory,\n OptimizerFactory,\n LRSchedulerFactory,\n)\nfrom virtex.utils.checkpointing import CheckpointManager\nfrom virtex.utils.common import common_parser, common_setup, cycle\nimport virtex.utils.distributed as dist\nfrom virtex.utils.metrics import TopkAccuracy\nfrom virtex.utils.timer import Timer\n\n\n# fmt: off\nparser = common_parser(\n description=\"\"\"Do image classification with linear models and frozen\n feature extractor, or fine-tune the feature extractor end-to-end.\"\"\"\n)\ngroup = parser.add_argument_group(\"Downstream config arguments.\")\ngroup.add_argument(\n \"--down-config\", metavar=\"FILE\", help=\"Path to a downstream config file.\"\n)\ngroup.add_argument(\n \"--down-config-override\", nargs=\"*\", default=[],\n help=\"A list of key-value pairs to modify downstream config params.\",\n)\n\nparser.add_argument_group(\"Checkpointing and Logging\")\nparser.add_argument(\n \"--weight-init\", choices=[\"random\", \"imagenet\", \"torchvision\", \"virtex\"],\n default=\"virtex\", help=\"\"\"How to initialize weights:\n 1. 'random' initializes all weights randomly\n 2. 'imagenet' initializes backbone weights from torchvision model zoo\n 3. {'torchvision', 'virtex'} load state dict from --checkpoint-path\n - with 'torchvision', state dict would be from PyTorch's training\n script.\n - with 'virtex' it should be for our full pretrained model.\"\"\"\n)\nparser.add_argument(\n \"--log-every\", type=int, default=50,\n help=\"\"\"Log training curves to tensorboard after every these many iterations\n only master process logs averaged loss values across processes.\"\"\",\n)\nparser.add_argument(\n \"--checkpoint-path\",\n help=\"\"\"Path to load checkpoint and run downstream task evaluation. The\n name of checkpoint file is required to be `model_*.pth`, where * is\n iteration number from which the checkpoint was serialized.\"\"\"\n)\nparser.add_argument(\n \"--checkpoint-every\", type=int, default=5000,\n help=\"\"\"Serialize model to a checkpoint after every these many iterations.\n For ImageNet, (5005 iterations = 1 epoch); for iNaturalist (1710 iterations\n = 1 epoch).\"\"\",\n)\n# fmt: on\n\n\ndef main(_A: argparse.Namespace):\n\n if _A.num_gpus_per_machine == 0:\n # Set device as CPU if num_gpus_per_machine = 0.\n device = torch.device(\"cpu\")\n else:\n # Get the current device as set for current distributed process.\n # Check `launch` function in `virtex.utils.distributed` module.\n device = torch.cuda.current_device()\n\n # Create a downstream config object (this will be immutable) and perform\n # common setup such as logging and setting up serialization directory.\n _DOWNC = Config(_A.down_config, _A.down_config_override)\n common_setup(_DOWNC, _A, job_type=\"downstream\")\n\n # Create a (pretraining) config object and backup in serializaion directory.\n _C = Config(_A.config, _A.config_override)\n _C.dump(os.path.join(_A.serialization_dir, \"pretrain_config.yaml\"))\n\n # Get dataset name for tensorboard logging.\n DATASET = _DOWNC.DATA.ROOT.split(\"/\")[-1]\n\n # Set number of output classes according to dataset:\n NUM_CLASSES_MAPPING = {\"imagenet\": 1000, \"inaturalist\": 8142}\n NUM_CLASSES = NUM_CLASSES_MAPPING[DATASET]\n\n # -------------------------------------------------------------------------\n # INSTANTIATE DATALOADER, MODEL, OPTIMIZER, SCHEDULER\n # -------------------------------------------------------------------------\n train_dataset = DownstreamDatasetFactory.from_config(_DOWNC, split=\"train\")\n train_dataloader = DataLoader(\n train_dataset,\n batch_size=_DOWNC.OPTIM.BATCH_SIZE // dist.get_world_size(),\n num_workers=_A.cpu_workers,\n sampler=DistributedSampler(\n train_dataset,\n num_replicas=dist.get_world_size(),\n rank=dist.get_rank(),\n shuffle=True,\n ),\n drop_last=False,\n pin_memory=True,\n collate_fn=train_dataset.collate_fn,\n )\n val_dataset = DownstreamDatasetFactory.from_config(_DOWNC, split=\"val\")\n val_dataloader = DataLoader(\n val_dataset,\n batch_size=_DOWNC.OPTIM.BATCH_SIZE // dist.get_world_size(),\n num_workers=_A.cpu_workers,\n sampler=DistributedSampler(\n val_dataset,\n num_replicas=dist.get_world_size(),\n rank=dist.get_rank(),\n shuffle=False,\n ),\n pin_memory=True,\n drop_last=False,\n collate_fn=val_dataset.collate_fn,\n )\n # Initialize model using pretraining config.\n pretrained_model = PretrainingModelFactory.from_config(_C)\n\n # Load weights according to the init method, do nothing for `random`, and\n # `imagenet` is already taken care of.\n if _A.weight_init == \"virtex\":\n CheckpointManager(model=pretrained_model).load(_A.checkpoint_path)\n elif _A.weight_init == \"torchvision\":\n # Keep strict=False because this state dict may have weights for\n # last fc layer.\n pretrained_model.visual.cnn.load_state_dict(\n torch.load(_A.checkpoint_path, map_location=\"cpu\")[\"state_dict\"],\n strict=False,\n )\n\n # Pull out the CNN (torchvision-like) from our pretrained model and add\n # back the FC layer - this is exists in torchvision models, and is set to\n # `nn.Identity()` during pretraining.\n model = pretrained_model.visual.cnn # type: ignore\n model.fc = nn.Linear(_DOWNC.MODEL.VISUAL.FEATURE_SIZE, NUM_CLASSES).to(device)\n model = model.to(device)\n\n # Re-initialize the FC layer.\n torch.nn.init.normal_(model.fc.weight.data, mean=0.0, std=0.01)\n torch.nn.init.constant_(model.fc.bias.data, 0.0)\n\n # Freeze all layers except FC as per config param.\n if _DOWNC.MODEL.VISUAL.FROZEN:\n for name, param in model.named_parameters():\n if \"fc\" not in name:\n param.requires_grad = False\n\n # Cross entropy loss and accuracy meter.\n criterion = nn.CrossEntropyLoss()\n top1 = TopkAccuracy(top_k=1)\n\n optimizer = OptimizerFactory.from_config(_DOWNC, model.named_parameters())\n scheduler = LRSchedulerFactory.from_config(_DOWNC, optimizer)\n del pretrained_model\n\n # -------------------------------------------------------------------------\n # BEFORE TRAINING STARTS\n # -------------------------------------------------------------------------\n\n # Create an iterator from dataloader to sample batches perpetually.\n train_dataloader_iter = cycle(train_dataloader, device)\n\n # Wrap model and optimizer using NVIDIA Apex for mixed precision training.\n # NOTE: Always do this before wrapping model with DistributedDataParallel.\n if _DOWNC.FP16_OPT > 0:\n from apex import amp\n\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=f\"O{_DOWNC.FP16_OPT}\"\n )\n\n if dist.get_world_size() > 1:\n dist.synchronize()\n model = nn.parallel.DistributedDataParallel(\n model, device_ids=[device], find_unused_parameters=True\n )\n\n if dist.is_master_process():\n checkpoint_manager = CheckpointManager(\n _A.serialization_dir,\n model=model,\n optimizer=optimizer,\n scheduler=scheduler,\n )\n tensorboard_writer = SummaryWriter(log_dir=_A.serialization_dir)\n\n # Keep track of time per iteration and ETA.\n timer = Timer(start_from=1, total_iterations=_DOWNC.OPTIM.NUM_ITERATIONS)\n\n # -------------------------------------------------------------------------\n # TRAINING LOOP\n # -------------------------------------------------------------------------\n for iteration in range(1, _DOWNC.OPTIM.NUM_ITERATIONS + 1):\n timer.tic()\n optimizer.zero_grad()\n batch = next(train_dataloader_iter)\n\n logits = model(batch[\"image\"])\n loss = criterion(logits, batch[\"label\"])\n\n # Perform dynamic scaling of loss to adjust for mixed precision.\n if _DOWNC.FP16_OPT > 0:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n optimizer.step()\n scheduler.step(iteration)\n timer.toc()\n\n if iteration % _A.log_every == 0 and dist.is_master_process():\n logger.info(\n f\"{timer.stats} | Loss: {loss:.3f} | GPU: {dist.gpu_mem_usage()} MB\"\n )\n tensorboard_writer.add_scalar(f\"{DATASET}/train_loss\", loss, iteration)\n tensorboard_writer.add_scalar(\n f\"{DATASET}/learning_rate\",\n optimizer.param_groups[0][\"lr\"],\n iteration,\n )\n\n # ---------------------------------------------------------------------\n # VALIDATION\n # ---------------------------------------------------------------------\n if iteration % _A.checkpoint_every == 0:\n torch.set_grad_enabled(False)\n model.eval()\n\n total_val_loss = torch.tensor(0.0).to(device)\n\n for val_iteration, batch in enumerate(val_dataloader, start=1):\n for key in batch:\n batch[key] = batch[key].to(device)\n\n logits = model(batch[\"image\"])\n loss = criterion(logits, batch[\"label\"])\n top1(logits, batch[\"label\"])\n total_val_loss += loss\n\n # Divide each loss component by number of val batches per GPU.\n total_val_loss = total_val_loss / val_iteration\n dist.average_across_processes(total_val_loss)\n\n # Get accumulated Top-1 accuracy for logging across GPUs.\n acc = top1.get_metric(reset=True)\n dist.average_across_processes(acc)\n\n torch.set_grad_enabled(True)\n model.train()\n\n # Save recent checkpoint and best checkpoint based on accuracy.\n if dist.is_master_process():\n checkpoint_manager.step(iteration)\n\n if iteration % _A.checkpoint_every == 0 and dist.is_master_process():\n logger.info(f\"Iter: {iteration} | Top-1 accuracy: {acc})\")\n tensorboard_writer.add_scalar(\n f\"{DATASET}/val_loss\", total_val_loss, iteration\n )\n # This name scoping will result in Tensorboard displaying all metrics\n # (VOC07, caption, etc.) together.\n tensorboard_writer.add_scalars(\n f\"metrics/{DATASET}\", {\"top1\": acc}, iteration\n )\n\n # All processes will wait till master process is done logging.\n dist.synchronize()\n\n\nif __name__ == \"__main__\":\n _A = parser.parse_args()\n\n # Add an arg in config override if `--weight-init` is imagenet.\n if _A.weight_init == \"imagenet\":\n _A.config_override.extend([\"MODEL.VISUAL.PRETRAINED\", True])\n\n if _A.num_gpus_per_machine == 0:\n main(_A)\n else:\n # This will launch `main` and set appropriate CUDA device (GPU ID) as\n # per process (accessed in the beginning of `main`).\n dist.launch(\n main,\n num_machines=_A.num_machines,\n num_gpus_per_machine=_A.num_gpus_per_machine,\n machine_rank=_A.machine_rank,\n dist_url=_A.dist_url,\n args=(_A,),\n )\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.cuda.current_device", "torch.nn.init.constant_", "torch.load", "torch.tensor", "torch.nn.Linear", "torch.set_grad_enabled", "torch.nn.init.normal_", "torch.utils.tensorboard.SummaryWriter", "torch.device", "torch.nn.parallel.DistributedDataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
baohq1595/graph2graph
[ "3d1f33cd85c3c5ed0b7c67b5f74a0abe31a94271" ]
[ "fast_jtnn/diff_vae.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom fast_jtnn.mol_tree import Vocab, MolTree\nfrom fast_jtnn.nnutils import create_var, flatten_tensor, avg_pool\nfrom fast_jtnn.jtnn_enc import JTNNEncoder\nfrom fast_jtnn.jtnn_dec import JTNNDecoder\nfrom fast_jtnn.mpn import MPN\nfrom fast_jtnn.jtmpn import JTMPN\n\nfrom fast_jtnn.chemutils import enum_assemble, set_atommap, copy_edit_mol, attach_mols\nimport rdkit\nimport rdkit.Chem as Chem\nimport copy, math\n\nclass DiffVAE(nn.Module):\n\n def __init__(self, vocab, args):\n super(DiffVAE, self).__init__()\n self.vocab = vocab\n self.hidden_size = hidden_size = args.hidden_size\n self.rand_size = rand_size = args.rand_size\n\n self.jtmpn = JTMPN(hidden_size, args.depthG)\n self.mpn = MPN(hidden_size, args.depthG)\n\n if args.share_embedding:\n self.embedding = nn.Embedding(vocab.size(), hidden_size)\n self.jtnn = JTNNEncoder(hidden_size, args.depthT, self.embedding)\n self.decoder = JTNNDecoder(vocab, hidden_size, self.embedding, args.use_molatt)\n else:\n self.jtnn = JTNNEncoder(hidden_size, args.depthT, nn.Embedding(vocab.size(), hidden_size))\n self.decoder = JTNNDecoder(vocab, hidden_size, nn.Embedding(vocab.size(), hidden_size), args.use_molatt)\n\n self.A_assm = nn.Linear(hidden_size, hidden_size, bias=False)\n self.assm_loss = nn.CrossEntropyLoss(size_average=False)\n\n self.T_mean = nn.Linear(hidden_size, rand_size // 2)\n self.T_var = nn.Linear(hidden_size, rand_size // 2)\n self.G_mean = nn.Linear(hidden_size, rand_size // 2)\n self.G_var = nn.Linear(hidden_size, rand_size // 2)\n self.B_t = nn.Sequential(nn.Linear(hidden_size + rand_size // 2, hidden_size), nn.ReLU())\n self.B_g = nn.Sequential(nn.Linear(hidden_size + rand_size // 2, hidden_size), nn.ReLU())\n \n def encode(self, jtenc_holder, mpn_holder):\n tree_vecs, tree_mess = self.jtnn(*jtenc_holder)\n mol_vecs = self.mpn(*mpn_holder)\n return tree_vecs, tree_mess, mol_vecs\n\n def fuse_noise(self, tree_vecs, mol_vecs):\n tree_eps = create_var( torch.randn(tree_vecs.size(0), 1, self.rand_size // 2) )\n tree_eps = tree_eps.expand(-1, tree_vecs.size(1), -1)\n mol_eps = create_var( torch.randn(mol_vecs.size(0), 1, self.rand_size // 2) )\n mol_eps = mol_eps.expand(-1, mol_vecs.size(1), -1)\n\n tree_vecs = torch.cat([tree_vecs,tree_eps], dim=-1) \n mol_vecs = torch.cat([mol_vecs,mol_eps], dim=-1) \n return self.B_t(tree_vecs), self.B_g(mol_vecs)\n\n def fuse_pair(self, x_tree_vecs, x_mol_vecs, y_tree_vecs, y_mol_vecs, jtenc_scope, mpn_scope):\n diff_tree_vecs = y_tree_vecs.sum(dim=1) - x_tree_vecs.sum(dim=1)\n size = create_var(torch.Tensor([le for _,le in jtenc_scope]))\n diff_tree_vecs = diff_tree_vecs / size.unsqueeze(-1)\n\n diff_mol_vecs = y_mol_vecs.sum(dim=1) - x_mol_vecs.sum(dim=1)\n size = create_var(torch.Tensor([le for _,le in mpn_scope]))\n diff_mol_vecs = diff_mol_vecs / size.unsqueeze(-1)\n\n diff_tree_vecs, tree_kl = self.rsample(diff_tree_vecs, self.T_mean, self.T_var)\n diff_mol_vecs, mol_kl = self.rsample(diff_mol_vecs, self.G_mean, self.G_var)\n\n diff_tree_vecs = diff_tree_vecs.unsqueeze(1).expand(-1, x_tree_vecs.size(1), -1)\n diff_mol_vecs = diff_mol_vecs.unsqueeze(1).expand(-1, x_mol_vecs.size(1), -1)\n x_tree_vecs = torch.cat([x_tree_vecs,diff_tree_vecs], dim=-1)\n x_mol_vecs = torch.cat([x_mol_vecs,diff_mol_vecs], dim=-1)\n\n return self.B_t(x_tree_vecs), self.B_g(x_mol_vecs), tree_kl + mol_kl\n\n def rsample(self, z_vecs, W_mean, W_var):\n z_mean = W_mean(z_vecs)\n z_log_var = -torch.abs(W_var(z_vecs)) #Following Mueller et al.\n kl_loss = -0.5 * torch.mean(1.0 + z_log_var - z_mean * z_mean - torch.exp(z_log_var))\n epsilon = create_var(torch.randn_like(z_mean))\n z_vecs = z_mean + torch.exp(z_log_var / 2) * epsilon\n return z_vecs, kl_loss\n\n def forward(self, x_batch, y_batch, beta):\n x_batch, x_jtenc_holder, x_mpn_holder = x_batch\n y_batch, y_jtenc_holder, y_mpn_holder, y_jtmpn_holder = y_batch\n\n x_tree_vecs, _, x_mol_vecs = self.encode(x_jtenc_holder, x_mpn_holder)\n y_tree_vecs, y_tree_mess, y_mol_vecs = self.encode(y_jtenc_holder, y_mpn_holder)\n\n x_tree_vecs, x_mol_vecs, kl_div = self.fuse_pair(x_tree_vecs, x_mol_vecs, y_tree_vecs, y_mol_vecs, y_jtenc_holder[-1], y_mpn_holder[-1])\n\n word_loss, topo_loss, word_acc, topo_acc = self.decoder(y_batch, x_tree_vecs, x_mol_vecs)\n assm_loss, assm_acc = self.assm(y_batch, y_jtmpn_holder, x_mol_vecs, y_tree_mess)\n\n return word_loss + topo_loss + assm_loss + beta * kl_div, kl_div.item(), word_acc, topo_acc, assm_acc\n\n def assm(self, mol_batch, jtmpn_holder, x_mol_vecs, y_tree_mess):\n jtmpn_holder,batch_idx = jtmpn_holder\n fatoms,fbonds,agraph,bgraph,scope = jtmpn_holder\n batch_idx = create_var(batch_idx)\n\n cand_vecs = self.jtmpn(fatoms, fbonds, agraph, bgraph, scope, y_tree_mess)\n\n x_mol_vecs = x_mol_vecs.sum(dim=1) #average pooling?\n x_mol_vecs = x_mol_vecs.index_select(0, batch_idx)\n x_mol_vecs = self.A_assm(x_mol_vecs) #bilinear\n scores = torch.bmm(\n x_mol_vecs.unsqueeze(1),\n cand_vecs.unsqueeze(-1)\n ).squeeze()\n \n cnt,tot,acc = 0,0,0\n all_loss = []\n for i,mol_tree in enumerate(mol_batch):\n comp_nodes = [node for node in mol_tree.nodes if len(node.cands) > 1 and not node.is_leaf]\n cnt += len(comp_nodes)\n for node in comp_nodes:\n label = node.cands.index(node.label)\n ncand = len(node.cands)\n cur_score = scores.narrow(0, tot, ncand)\n tot += ncand\n\n if cur_score.data[label] >= cur_score.max().item():\n acc += 1\n\n label = create_var(torch.LongTensor([label]))\n all_loss.append( self.assm_loss(cur_score.view(1,-1), label) )\n \n all_loss = sum(all_loss) / len(mol_batch)\n return all_loss, acc * 1.0 / cnt\n\n def decode(self, x_tree_vecs, x_mol_vecs):\n #currently do not support batch decoding\n assert x_tree_vecs.size(0) == 1 and x_mol_vecs.size(0) == 1\n\n pred_root,pred_nodes = self.decoder.decode(x_tree_vecs, x_mol_vecs)\n if len(pred_nodes) == 0: return None\n elif len(pred_nodes) == 1: return pred_root.smiles\n\n #Mark nid & is_leaf & atommap\n for i,node in enumerate(pred_nodes):\n node.nid = i + 1\n node.is_leaf = (len(node.neighbors) == 1)\n if len(node.neighbors) > 1:\n set_atommap(node.mol, node.nid)\n\n scope = [(0, len(pred_nodes))]\n jtenc_holder,mess_dict = JTNNEncoder.tensorize_nodes(pred_nodes, scope)\n _,tree_mess = self.jtnn(*jtenc_holder)\n tree_mess = (tree_mess, mess_dict) #Important: tree_mess is a matrix, mess_dict is a python dict\n\n x_mol_vec_pooled = x_mol_vecs.sum(dim=1) #average pooling?\n x_mol_vec_pooled = self.A_assm(x_mol_vec_pooled).squeeze() #bilinear\n\n cur_mol = copy_edit_mol(pred_root.mol)\n global_amap = [{}] + [{} for node in pred_nodes]\n global_amap[1] = {atom.GetIdx():atom.GetIdx() for atom in cur_mol.GetAtoms()}\n\n cur_mol = self.dfs_assemble(tree_mess, x_mol_vec_pooled, pred_nodes, cur_mol, global_amap, [], pred_root, None)\n if cur_mol is None: \n return None\n\n cur_mol = cur_mol.GetMol()\n set_atommap(cur_mol)\n cur_mol = Chem.MolFromSmiles(Chem.MolToSmiles(cur_mol))\n return Chem.MolToSmiles(cur_mol) if cur_mol is not None else None\n \n def dfs_assemble(self, y_tree_mess, x_mol_vec_pooled, all_nodes, cur_mol, global_amap, fa_amap, cur_node, fa_node):\n fa_nid = fa_node.nid if fa_node is not None else -1\n prev_nodes = [fa_node] if fa_node is not None else []\n\n children = [nei for nei in cur_node.neighbors if nei.nid != fa_nid]\n neighbors = [nei for nei in children if nei.mol.GetNumAtoms() > 1]\n neighbors = sorted(neighbors, key=lambda x:x.mol.GetNumAtoms(), reverse=True)\n singletons = [nei for nei in children if nei.mol.GetNumAtoms() == 1]\n neighbors = singletons + neighbors\n\n cur_amap = [(fa_nid,a2,a1) for nid,a1,a2 in fa_amap if nid == cur_node.nid]\n cands = enum_assemble(cur_node, neighbors, prev_nodes, cur_amap)\n if len(cands) == 0:\n return None\n\n cand_smiles,cand_amap = zip(*cands)\n cands = [(smiles, all_nodes, cur_node) for smiles in cand_smiles]\n\n jtmpn_holder = JTMPN.tensorize(cands, y_tree_mess[1])\n fatoms,fbonds,agraph,bgraph,scope = jtmpn_holder\n cand_vecs = self.jtmpn(fatoms, fbonds, agraph, bgraph, scope, y_tree_mess[0])\n\n scores = torch.mv(cand_vecs, x_mol_vec_pooled)\n _,cand_idx = torch.sort(scores, descending=True)\n\n backup_mol = Chem.RWMol(cur_mol)\n #for i in range(cand_idx.numel()):\n for i in range( min(cand_idx.numel(), 5) ):\n cur_mol = Chem.RWMol(backup_mol)\n pred_amap = cand_amap[cand_idx[i].item()]\n new_global_amap = copy.deepcopy(global_amap)\n\n for nei_id,ctr_atom,nei_atom in pred_amap:\n if nei_id == fa_nid:\n continue\n new_global_amap[nei_id][nei_atom] = new_global_amap[cur_node.nid][ctr_atom]\n\n cur_mol = attach_mols(cur_mol, children, [], new_global_amap) #father is already attached\n new_mol = cur_mol.GetMol()\n new_mol = Chem.MolFromSmiles(Chem.MolToSmiles(new_mol))\n\n if new_mol is None: continue\n \n result = True\n for nei_node in children:\n if nei_node.is_leaf: continue\n cur_mol = self.dfs_assemble(y_tree_mess, x_mol_vec_pooled, all_nodes, cur_mol, new_global_amap, pred_amap, nei_node, cur_node)\n if cur_mol is None: \n result = False\n break\n if result: return cur_mol\n\n return None\n" ]
[ [ "torch.randn_like", "torch.nn.CrossEntropyLoss", "torch.mv", "torch.LongTensor", "torch.Tensor", "torch.cat", "torch.exp", "torch.nn.Linear", "torch.sort", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
katana17/pensieve-dev
[ "ccad1f64d2c50a0346ccce91c8c3b10eac08c30a" ]
[ "sim/rl_test.py" ]
[ "import os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nimport load_trace\nimport a3c\nimport fixed_env as env\n\nos.environ['CUDA_VISIBLE_DEVICES'] = ''\n\nS_INFO = 6 # bit_rate, buffer_size, next_chunk_size, bandwidth_measurement(throughput and time), chunk_til_video_end\nS_LEN = 8 # take how many frames in the past\nA_DIM = 6\nACTOR_LR_RATE = 0.0001\nCRITIC_LR_RATE = 0.001\nVIDEO_BIT_RATE = [300, 750, 1200, 1850, 2850, 4300] # Kbps\nBUFFER_NORM_FACTOR = 10.0\nCHUNK_TIL_VIDEO_END_CAP = 48.0\nM_IN_K = 1000.0\nREBUF_PENALTY = 4.3 # 1 sec rebuffering -> 3 Mbps\nSMOOTH_PENALTY = 1\nDEFAULT_QUALITY = 1 # default video quality without agent\nRANDOM_SEED = 42\nRAND_RANGE = 1000\nLOG_FILE = './test_results/log_sim_rl'\nTEST_TRACES = './cooked_test_traces/'\n# log in format of time_stamp bit_rate buffer_size rebuffer_time chunk_size download_time reward\nNN_MODEL = sys.argv[1]\n\n\ndef main():\n\n np.random.seed(RANDOM_SEED)\n\n assert len(VIDEO_BIT_RATE) == A_DIM\n\n all_cooked_time, all_cooked_bw, all_file_names = load_trace.load_trace(TEST_TRACES)\n\n net_env = env.Environment(all_cooked_time=all_cooked_time,\n all_cooked_bw=all_cooked_bw)\n\n log_path = LOG_FILE + '_' + all_file_names[net_env.trace_idx]\n log_file = open(log_path, 'w')\n\n with tf.Session() as sess:\n\n actor = a3c.ActorNetwork(sess,\n state_dim=[S_INFO, S_LEN], action_dim=A_DIM,\n learning_rate=ACTOR_LR_RATE)\n\n critic = a3c.CriticNetwork(sess,\n state_dim=[S_INFO, S_LEN],\n learning_rate=CRITIC_LR_RATE)\n\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver() # save neural net parameters\n\n # restore neural net parameters\n if NN_MODEL is not None: # NN_MODEL is the path to file\n saver.restore(sess, NN_MODEL)\n print(\"Testing model restored.\")\n\n time_stamp = 0\n\n last_bit_rate = DEFAULT_QUALITY\n bit_rate = DEFAULT_QUALITY\n\n action_vec = np.zeros(A_DIM)\n action_vec[bit_rate] = 1\n\n s_batch = [np.zeros((S_INFO, S_LEN))]\n a_batch = [action_vec]\n r_batch = []\n entropy_record = []\n\n video_count = 0\n\n while True: # serve video forever\n # the action is from the last decision\n # this is to make the framework similar to the real\n delay, sleep_time, buffer_size, rebuf, \\\n video_chunk_size, next_video_chunk_sizes, \\\n end_of_video, video_chunk_remain = \\\n net_env.get_video_chunk(bit_rate)\n\n time_stamp += delay # in ms\n time_stamp += sleep_time # in ms\n\n # reward is video quality - rebuffer penalty - smoothness\n reward = \\\n VIDEO_BIT_RATE[bit_rate] / M_IN_K \\\n - REBUF_PENALTY * rebuf \\\n - SMOOTH_PENALTY * np.abs(VIDEO_BIT_RATE[bit_rate] -\n VIDEO_BIT_RATE[last_bit_rate]) / M_IN_K\n\n r_batch.append(reward)\n\n last_bit_rate = bit_rate\n\n # log time_stamp, bit_rate, buffer_size, reward\n log_file.write(str(time_stamp / M_IN_K) + '\\t' +\n str(VIDEO_BIT_RATE[bit_rate]) + '\\t' +\n str(buffer_size) + '\\t' +\n str(rebuf) + '\\t' +\n str(video_chunk_size) + '\\t' +\n str(delay) + '\\t' +\n str(reward) + '\\n')\n log_file.flush()\n\n # retrieve previous state\n if len(s_batch) == 0:\n state = [np.zeros((S_INFO, S_LEN))]\n else:\n state = np.array(s_batch[-1], copy=True)\n\n # dequeue history record\n state = np.roll(state, -1, axis=1)\n\n # this should be S_INFO number of terms\n state[0, -1] = VIDEO_BIT_RATE[bit_rate] / float(np.max(VIDEO_BIT_RATE)) # last quality\n state[1, -1] = buffer_size / BUFFER_NORM_FACTOR # 10 sec\n state[2, -1] = float(video_chunk_size) / float(delay) / M_IN_K # kilo byte / ms\n state[3, -1] = float(delay) / M_IN_K / BUFFER_NORM_FACTOR # 10 sec\n state[4, :A_DIM] = np.array(next_video_chunk_sizes) / M_IN_K / M_IN_K # mega byte\n state[5, -1] = np.minimum(video_chunk_remain, CHUNK_TIL_VIDEO_END_CAP) / float(CHUNK_TIL_VIDEO_END_CAP)\n\n action_prob = actor.predict(np.reshape(state, (1, S_INFO, S_LEN)))\n action_cumsum = np.cumsum(action_prob)\n bit_rate = (action_cumsum > np.random.randint(1, RAND_RANGE) / float(RAND_RANGE)).argmax()\n # Note: we need to discretize the probability into 1/RAND_RANGE steps,\n # because there is an intrinsic discrepancy in passing single state and batch states\n\n s_batch.append(state)\n\n entropy_record.append(a3c.compute_entropy(action_prob[0]))\n\n if end_of_video:\n log_file.write('\\n')\n log_file.close()\n\n last_bit_rate = DEFAULT_QUALITY\n bit_rate = DEFAULT_QUALITY # use the default action here\n\n del s_batch[:]\n del a_batch[:]\n del r_batch[:]\n\n action_vec = np.zeros(A_DIM)\n action_vec[bit_rate] = 1\n\n s_batch.append(np.zeros((S_INFO, S_LEN)))\n a_batch.append(action_vec)\n entropy_record = []\n\n video_count += 1\n\n if video_count >= len(all_file_names):\n break\n\n log_path = LOG_FILE + '_' + all_file_names[net_env.trace_idx]\n log_file = open(log_path, 'w')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.minimum", "numpy.abs", "numpy.random.seed", "numpy.reshape", "numpy.cumsum", "tensorflow.global_variables_initializer", "numpy.max", "tensorflow.Session", "tensorflow.train.Saver", "numpy.array", "numpy.zeros", "numpy.roll", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Lwenqi/RL
[ "2cd0b410638a7b08159b7f8c388a6fd785e14e97" ]
[ "gym-duckietown/learning/imitation/tensorflow/model.py" ]
[ "import numpy as np\nimport tensorflow as tf\n\nfrom imitation.tensorflow._layers import one_residual\n\n\nclass TensorflowModel:\n def __init__(self, observation_shape, action_shape, graph_location, seed=1234):\n # model definition\n self._observation = None\n self._action = None\n self._computation_graph = None\n self._optimization_op = None\n\n self.tf_session = tf.compat.v1.InteractiveSession()\n\n # restoring\n self.tf_checkpoint = None\n self.tf_saver = None\n\n self.seed = seed\n\n self._initialize(observation_shape, action_shape, graph_location)\n\n def predict(self, state):\n action = self.tf_session.run(self._computation_graph, feed_dict={\n self._observation: [state],\n })\n return np.squeeze(action)\n\n def train(self, observations, actions):\n _, loss = self.tf_session.run([self._optimization_op, self._loss], feed_dict={\n self._observation: observations,\n self._action: actions\n })\n return loss\n\n def commit(self):\n self.tf_saver.save(self.tf_session, self.tf_checkpoint)\n\n def computation_graph(self):\n model = one_residual(self._preprocessed_state, seed=self.seed)\n model = tf.layers.dense(model, units=64, activation=tf.nn.relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=False, seed=self.seed),\n bias_initializer=tf.contrib.layers.xavier_initializer(uniform=False, seed=self.seed))\n model = tf.layers.dense(model, units=32, activation=tf.nn.relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=False, seed=self.seed),\n bias_initializer=tf.contrib.layers.xavier_initializer(uniform=False, seed=self.seed))\n\n model = tf.layers.dense(model, self._action.shape[1])\n\n return model\n\n def _optimizer(self):\n return tf.train.AdamOptimizer()\n\n def _loss_function(self):\n return tf.losses.mean_squared_error(self._action, self._computation_graph)\n\n def _initialize(self, input_shape, action_shape, storage_location):\n if not self._computation_graph:\n self._create(input_shape, action_shape)\n self._storing(storage_location)\n self.tf_session.run(tf.global_variables_initializer())\n\n def _pre_process(self):\n resize = tf.map_fn(lambda frame: tf.image.resize_images(frame, (60, 80)), self._observation)\n and_standardize = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), resize)\n self._preprocessed_state = and_standardize\n\n def _create(self, input_shape, output_shape):\n self._observation = tf.placeholder(dtype=tf.float32, shape=input_shape, name='state')\n self._action = tf.placeholder(dtype=tf.float32, shape=output_shape, name='action')\n self._pre_process()\n\n self._computation_graph = self.computation_graph()\n self._loss = self._loss_function()\n self._optimization_op = self._optimizer().minimize(self._loss)\n\n def _storing(self, location):\n self.tf_saver = tf.train.Saver()\n\n self.tf_checkpoint = tf.train.latest_checkpoint(location)\n if self.tf_checkpoint:\n self.tf_saver.restore(self.tf_session, self.tf_checkpoint)\n else:\n self.tf_checkpoint = location\n\n def close(self):\n self.tf_session.close()\n" ]
[ [ "tensorflow.losses.mean_squared_error", "tensorflow.train.latest_checkpoint", "tensorflow.image.resize_images", "numpy.squeeze", "tensorflow.layers.dense", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.image.per_image_standardization", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.train.AdamOptimizer", "tensorflow.compat.v1.InteractiveSession", "tensorflow.train.Saver" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
jkjkiiiii/PaddleHub
[ "061102402c5519ca7e1bfa2bb00a2cc40ec070a7" ]
[ "demo/text_classification/finetuned_model_to_module/module.py" ]
[ "# -*- coding:utf-8 -*-\n# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Finetuning on classification task \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nfrom paddlehub.common.logger import logger\nfrom paddlehub.module.module import moduleinfo, serving\nimport paddlehub as hub\n\n\n@moduleinfo(\n name=\"ernie_tiny_finetuned\",\n version=\"1.0.0\",\n summary=\"ERNIE tiny which was fine-tuned on the chnsenticorp dataset.\",\n author=\"anonymous\",\n author_email=\"\",\n type=\"nlp/semantic_model\")\nclass ERNIETinyFinetuned(hub.Module):\n def _initialize(self,\n ckpt_dir=\"ckpt_chnsenticorp\",\n num_class=2,\n max_seq_len=128,\n use_gpu=False,\n batch_size=1):\n self.ckpt_dir = os.path.join(self.directory, ckpt_dir)\n self.num_class = num_class\n self.MAX_SEQ_LEN = max_seq_len\n\n # Load Paddlehub ERNIE Tiny pretrained model\n self.module = hub.Module(name=\"ernie_tiny\")\n inputs, outputs, program = self.module.context(\n trainable=True, max_seq_len=max_seq_len)\n\n self.vocab_path = self.module.get_vocab_path()\n\n # Download dataset and use accuracy as metrics\n # Choose dataset: GLUE/XNLI/ChinesesGLUE/NLPCC-DBQA/LCQMC\n # metric should be acc, f1 or matthews\n metrics_choices = [\"acc\"]\n\n # For ernie_tiny, it use sub-word to tokenize chinese sentence\n # If not ernie tiny, sp_model_path and word_dict_path should be set None\n reader = hub.reader.ClassifyReader(\n vocab_path=self.module.get_vocab_path(),\n max_seq_len=max_seq_len,\n sp_model_path=self.module.get_spm_path(),\n word_dict_path=self.module.get_word_dict_path())\n\n # Construct transfer learning network\n # Use \"pooled_output\" for classification tasks on an entire sentence.\n # Use \"sequence_output\" for token-level output.\n pooled_output = outputs[\"pooled_output\"]\n\n # Setup feed list for data feeder\n # Must feed all the tensor of module need\n feed_list = [\n inputs[\"input_ids\"].name,\n inputs[\"position_ids\"].name,\n inputs[\"segment_ids\"].name,\n inputs[\"input_mask\"].name,\n ]\n\n # Setup runing config for PaddleHub Finetune API\n config = hub.RunConfig(\n use_data_parallel=False,\n use_cuda=use_gpu,\n batch_size=batch_size,\n checkpoint_dir=self.ckpt_dir,\n strategy=hub.AdamWeightDecayStrategy())\n\n # Define a classfication finetune task by PaddleHub's API\n self.cls_task = hub.TextClassifierTask(\n data_reader=reader,\n feature=pooled_output,\n feed_list=feed_list,\n num_classes=self.num_class,\n config=config,\n metrics_choices=metrics_choices)\n\n def predict(self, data, return_result=False, accelerate_mode=True):\n \"\"\"\n Get prediction results\n \"\"\"\n run_states = self.cls_task.predict(\n data=data,\n return_result=return_result,\n accelerate_mode=accelerate_mode)\n return run_states\n\n\nif __name__ == \"__main__\":\n ernie_tiny = ERNIETinyFinetuned(\n ckpt_dir=\"../ckpt_chnsenticorp\", num_class=2)\n\n # Data to be prdicted\n data = [[\"这个宾馆比较陈旧了,特价的房间也很一般。总体来说一般\"], [\"交通方便;环境很好;服务态度很好 房间较小\"],\n [\"19天硬盘就罢工了~~~算上运来的一周都没用上15天~~~可就是不能换了~~~唉~~~~你说这算什么事呀~~~\"]]\n\n index = 0\n run_states = ernie_tiny.predict(data=data)\n results = [run_state.run_results for run_state in run_states]\n for batch_result in results:\n # get predict index\n batch_result = np.argmax(batch_result, axis=2)[0]\n for result in batch_result:\n print(\"%s\\tpredict=%s\" % (data[index][0], result))\n index += 1\n" ]
[ [ "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
icewing1996/bert_dep
[ "692637bb9585363480f6a3b09ea355e5454d04b8" ]
[ "dep_parser.py" ]
[ "import modeling\nimport numpy as np\nimport tensorflow as tf\nimport linalg\n\nfrom tensorflow.contrib import rnn\nfrom tensorflow.contrib import crf\n\n\n\n\nclass Parser(object):\n\n\tdef __init__(self, is_training, num_head_labels, num_rel_labels, mlp_droput_rate, token_start_mask, arc_mlp_size, label_mlp_size, batch_size):\n\t\tself.is_training = is_training\n\t\tself.mlp_droput_rate = mlp_droput_rate\n\t\tself.arc_mlp_size = arc_mlp_size\n\t\tself.label_mlp_size = label_mlp_size\n\t\tself.token_start_mask = token_start_mask\n\t\tself.num_head_labels = num_head_labels\n\t\tself.num_rel_labels = num_rel_labels\t\t\n\t\tself.batch_size = batch_size\n\n\n\tdef __call__(self, inputs, gold_heads, gold_labels, head_label_ids_for_indexing, rel_label_ids_for_indexing):\n\t\t\n\t\tinputs = tf.layers.dropout(inputs, self.mlp_droput_rate, training=self.is_training)\t\n\t\twith tf.variable_scope('arc_h', reuse=tf.AUTO_REUSE):\n\t\t\tarc_h = self.MLP(inputs, self.arc_mlp_size)\n\n\t\twith tf.variable_scope('arc_d', reuse=tf.AUTO_REUSE):\n\t\t\tarc_d = self.MLP(inputs, self.arc_mlp_size)\n\n\t\twith tf.variable_scope('lab_h', reuse=tf.AUTO_REUSE):\n\t\t\tlab_h = self.MLP(inputs, self.label_mlp_size)\n\n\t\twith tf.variable_scope('lab_d', reuse=tf.AUTO_REUSE):\n\t\t\tlab_d = self.MLP(inputs, self.label_mlp_size)\n\n\t\twith tf.variable_scope('s_arc', reuse=tf.AUTO_REUSE):\n\t\t\ts_arc = self.biaffine(arc_d, arc_h, \n\t\t\t\t\t\t\tn_in=self.arc_mlp_size,\n\t\t\t\t\t\t\tbias_x=True,\n\t\t\t\t\t\t\tbias_y=False)\n\n\t\twith tf.variable_scope('s_lab', reuse=tf.AUTO_REUSE):\n\t\t\tlab_attn = self.biaffine(lab_d, lab_h, \n\t\t\t\t\t\t\t\tn_in=self.label_mlp_size,\n\t\t\t\t\t\t\t\tn_out=self.num_rel_labels,\n\t\t\t\t\t\t\t\tbias_x=True,\n\t\t\t\t\t\t\t\tbias_y=True)\n\n\t\t\ts_lab = tf.transpose(lab_attn, perm=[0, 2, 3, 1])\n\n\t\toutput = {}\n\t\t\n\t\tloss = self.get_loss(s_arc, s_lab, gold_heads, gold_labels, head_label_ids_for_indexing)\n\t\toutput['loss'] = loss\n\t\t\n\t\tif not self.is_training:\n\t\t\tpred_heads, pred_labels = self.decode(s_arc, s_lab)\n\t\t\t#pred_heads = self.decode(s_arc, s_lab)\n\t\t\t#arc_accuracy = self.get_accuracy(pred_heads, None, gold_heads, gold_labels)\n\t\t\tarc_accuracy, rel_accuracy = self.get_accuracy(pred_heads, pred_labels, gold_heads, gold_labels)\n\t\t\toutput['arc_accuracy'] = arc_accuracy\n\t\t\toutput['rel_accuracy'] = rel_accuracy\n\t\t\toutput['arc_predictions'] = pred_heads\n\t\t\toutput['rel_predictions'] = pred_labels\n\t\treturn output\n\t\t\n\n\tdef get_loss(self, s_arc, s_lab, gold_heads, gold_labels, head_label_ids_for_indexing):\n\t\ts_lab = self.select_indices(s_lab, head_label_ids_for_indexing)\t\t\n\t\tgold_heads = tf.one_hot(gold_heads, self.num_head_labels)\n\t\tgold_labels = tf.one_hot(gold_labels, self.num_rel_labels)\n\t\t# arc_loss = tf.losses.softmax_cross_entropy(gold_heads, s_arc, weights=self.token_start_mask, label_smoothing=0.9) \n\t\tlab_loss = tf.losses.softmax_cross_entropy(gold_labels, s_lab, weights=self.token_start_mask, label_smoothing=0.9)\n\t\t#loss = arc_loss\n\t\t# loss = arc_loss + lab_loss\n\t\tloss = lab_loss\n\t\treturn loss\n\n\tdef decode(self, s_arc, s_lab):\n\t\tpred_heads = tf.argmax(s_arc, -1)\n\t\ts_lab = self.select_indices(s_lab, pred_heads)\n\t\tpred_labels = tf.argmax(s_lab, -1)\n\t\t#return pred_heads\n\t\treturn pred_heads, pred_labels\n\n\tdef get_accuracy(self, pred_heads, pred_labels, gold_heads, gold_labels):\n\t\tarc_accuracy = tf.metrics.accuracy(gold_heads, pred_heads, self.token_start_mask)\n\t\trel_accuracy = tf.metrics.accuracy(gold_labels, pred_labels, self.token_start_mask)\n\t\treturn arc_accuracy, rel_accuracy\n\n\n\tdef MLP(self, inputs, mlp_size):\n\t\tmlp = tf.layers.dense(\n\t\t\t\t\tinputs,\n\t\t\t\t\tmlp_size,\n\t\t\t\t\tmodeling.gelu,\n\t\t\t\t\tkernel_initializer=tf.orthogonal_initializer())\n\t\tmlp = tf.layers.dropout(mlp, self.mlp_droput_rate, training=self.is_training)\t\t\t\n\t\treturn mlp\n\n\tdef biaffine(self, x, y, n_in, n_out=1, bias_x=True, bias_y=True):\n\t\tself.n_in = n_in\n\t\tself.n_out = n_out\n\t\tself.bias_x = bias_x\n\t\tself.bias_y = bias_y\n\t\tbatch_size, max_seq_length, embedding_size = modeling.get_shape_list(x, expected_rank=3)\n\t\tself.weight = tf.get_variable(\"biaffine_weight\", \n\t\t\t\t\t\t\t\t\tshape=[self.batch_size, n_out, n_in + bias_x, n_in + bias_y],\n\t\t\t\t\t\t\t\t\tdtype=tf.float32)\n\n\t\tif self.bias_x:\n\t\t\tx = tf.concat([x, tf.ones(tf.stack([batch_size, max_seq_length, 1]))], 2)\n\t\tif self.bias_y:\n\t\t\ty = tf.concat([y, tf.ones(tf.stack([batch_size, max_seq_length, 1]))], 2)\n\t\t# [batch_size, 1, seq_len, d]\n\t\tx = tf.expand_dims(x, 1)\n\t\tx = tf.broadcast_to(x, [batch_size, n_out, max_seq_length, n_in + bias_x])\n\t\t# [batch_size, 1, seq_len, d]\n\t\ty = tf.expand_dims(y, 1)\n\t\ty = tf.broadcast_to(y, [batch_size, n_out, max_seq_length, n_in + bias_y])\n\t\t# [batch_size, n_out, seq_len, d_1] @ [batch_size, n_out, d_1, d_2] @ [batch_size, n_out, d_2, seq_len]\n\t\t# => [batch_size, n_out, seq_len, d_2] @ [batch_size, 1, d_2, seq_len]\n\t\t# => [batch_size, n_out, seq_len, seq_len]\n\t\ts = x @ self.weight @ tf.transpose(y, perm=[0, 1, 3, 2])\n\t\t# remove dim 1 if n_out == 1\n\t\tif n_out == 1:\n\t\t\ts = tf.squeeze(s, 1)\n\n\t\treturn s\n\n\tdef select_indices(self, inputs, indices):\n\t\t# inputs = [batch_size, seq_len, seq_len, n_out]\n\t\t# indices = [batch_size, seq_len]\n\t\t# Construct nd_indices\n\t\tindices = tf.cast(indices, dtype=tf.int32)\n\t\tbatch_size, seq_len = modeling.get_shape_list(indices, expected_rank=2)\n\n\t\tbatches = tf.broadcast_to(tf.reshape(tf.range(batch_size),[batch_size,1]),[batch_size, seq_len])\n\t\tseqs = tf.broadcast_to(tf.range(seq_len), [batch_size, seq_len])\n\n\t\tnd_indices = tf.stack([batches, seqs, indices], axis=2)\n\t\tresult = tf.gather_nd(inputs, nd_indices)\n\t\treturn result\n\n\n\tdef crf_layer(self, logits):\n\t\t\"\"\"\n\t\tcalculate crf loss\n\t\t:param project_logits: [1, num_steps, num_tags]\n\t\t:return: scalar loss\n\t\t\"\"\"\n\t\twith tf.variable_scope(\"crf_loss\"):\n\t\t\ttrans = tf.get_variable(\n\t\t\t\t\"transitions\",\n\t\t\t\tshape=[self.num_labels, self.num_labels],\n\t\t\t\tinitializer=self.initializers.xavier_initializer())\n\t\t\tlog_likelihood, trans = tf.contrib.crf.crf_log_likelihood(\n\t\t\t\tinputs=logits,\n\t\t\t\ttag_indices=self.labels,\n\t\t\t\ttransition_params=trans,\n\t\t\t\tsequence_lengths=self.lengths)\n\t\t\treturn tf.reduce_mean(-log_likelihood), trans\n\n\n\n\n\nclass BLSTM_CRF(object):\n\tdef __init__(self, embedded_chars, hidden_unit, cell_type, num_layers, dropout_rate,\n\t\t\t\t initializers, num_labels, seq_length, labels, lengths, is_training):\n\t\t\"\"\"\n\t\tBLSTM-CRF 网络\n\t\t:param embedded_chars: Fine-tuning embedding input\n\t\t:param hidden_unit: LSTM的隐含单元个数\n\t\t:param cell_type: RNN类型(LSTM OR GRU DICNN will be add in feature)\n\t\t:param num_layers: RNN的层数\n\t\t:param droupout_rate: droupout rate\n\t\t:param initializers: variable init class\n\t\t:param num_labels: 标签数量\n\t\t:param seq_length: 序列最大长度\n\t\t:param labels: 真实标签\n\t\t:param lengths: [batch_size] 每个batch下序列的真实长度\n\t\t:param is_training: 是否是训练过程\n\t\t\"\"\"\n\t\tself.hidden_unit = hidden_unit\n\t\tself.dropout_rate = dropout_rate\n\t\tself.cell_type = cell_type\n\t\tself.num_layers = num_layers\n\t\tself.embedded_chars = embedded_chars\n\t\tself.initializers = initializers\n\t\tself.seq_length = seq_length\n\t\tself.num_labels = num_labels\n\t\tself.labels = labels\n\t\tself.lengths = lengths\n\t\tself.embedding_dims = embedded_chars.shape[-1].value\n\t\tself.is_training = is_training\n\n\tdef add_blstm_crf_layer(self, crf_only):\n\t\t\"\"\"\n\t\tblstm-crf网络\n\t\t:return: \n\t\t\"\"\"\n\t\tif self.is_training:\n\t\t\t# lstm input dropout rate i set 0.9 will get best score\n\t\t\tself.embedded_chars = tf.nn.dropout(self.embedded_chars, self.dropout_rate)\n\n\t\tif crf_only:\n\t\t\tlogits = self.project_crf_layer(self.embedded_chars)\n\t\telse:\n\t\t\t#blstm\n\t\t\tlstm_output = self.blstm_layer(self.embedded_chars)\n\t\t\t#project\n\t\t\tlogits = self.project_bilstm_layer(lstm_output)\n\t\t#crf\n\t\tloss, trans = self.crf_layer(logits)\n\t\t# CRF decode, pred_ids 是一条最大概率的标注路径\n\t\tpred_ids, _ = crf.crf_decode(potentials=logits, transition_params=trans, sequence_length=self.lengths)\n\t\treturn ((loss, logits, pred_ids))\n\n\tdef add_blstm_crf_layer_not_really_working(self, crf_only):\n\t\tif self.is_training:\n\t\t\t# lstm input dropout rate i set 0.9 will get best score\n\t\t\tself.embedded_chars = tf.nn.dropout(self.embedded_chars, self.dropout_rate)\n\n\t\t#blstm\n\t\tlstm_output = self.blstm_layer(self.embedded_chars)\n\t\t#project\n\t\tlogits = self.project_bilstm_layer(lstm_output)\n\t\tloss = tf.losses.softmax_cross_entropy(self.labels, logits, self.lengths, label_smoothing=0.9)\n\t\tpred_ids = tf.math.argmax(logits, -1)\n\n\t\treturn ((loss, logits, pred_ids))\n\n\tdef _which_cell(self):\n\t\t\"\"\"\n\t\tRNN 类型\n\t\t:return: \n\t\t\"\"\"\n\t\tcell_tmp = None\n\t\tif self.cell_type == 'lstm':\n\t\t\tcell_tmp = rnn.LayerNormBasicLSTMCell(self.hidden_unit, dropout_keep_prob=self.dropout_rate)\n\t\t\t#cell_tmp = rnn.BasicLSTMCell(self.hidden_unit)\n\t\telif self.cell_type == 'gru':\n\t\t\tcell_tmp = rnn.GRUCell(self.hidden_unit)\n\t\t# 是否需要进行dropout\n\t\tif self.dropout_rate is not None:\n\t\t\tcell_tmp = rnn.DropoutWrapper(cell_tmp, output_keep_prob=self.dropout_rate)\n\t\treturn cell_tmp\n\n\tdef _bi_dir_rnn(self):\n\t\t\"\"\"\n\t\t双向RNN\n\t\t:return:\n\t\t\"\"\"\n\t\tcell_fw = self._which_cell()\n\t\tcell_bw = self._which_cell()\n\t\tif self.dropout_rate is not None:\n\t\t\tcell_bw = rnn.DropoutWrapper(cell_bw, output_keep_prob=self.dropout_rate)\n\t\t\tcell_fw = rnn.DropoutWrapper(cell_fw, output_keep_prob=self.dropout_rate)\n\t\treturn cell_fw, cell_bw\n\tdef blstm_layer(self, embedding_chars):\n\t\t\"\"\"\n\t\t\t\t\n\t\t:return: \n\t\t\"\"\"\n\t\twith tf.variable_scope('rnn_layer'):\n\t\t\tcell_fw, cell_bw = self._bi_dir_rnn()\n\t\t\tif self.num_layers > 1:\n\t\t\t\tcell_fw = rnn.MultiRNNCell([cell_fw] * self.num_layers, state_is_tuple=True)\n\t\t\t\tcell_bw = rnn.MultiRNNCell([cell_bw] * self.num_layers, state_is_tuple=True)\n\n\t\t\toutputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, embedding_chars,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t dtype=tf.float32)\n\t\t\toutputs = tf.concat(outputs, axis=2)\n\t\treturn outputs\n\n\tdef project_bilstm_layer(self, lstm_outputs, name=None):\n\t\t\"\"\"\n\t\thidden layer between lstm layer and logits\n\t\t:param lstm_outputs: [batch_size, num_steps, emb_size] \n\t\t:return: [batch_size, num_steps, num_tags]\n\t\t\"\"\"\n\t\twith tf.variable_scope(\"project\" if not name else name):\n\t\t\twith tf.variable_scope(\"hidden\"):\n\t\t\t\tW = tf.get_variable(\"W\", shape=[self.hidden_unit * 2, self.hidden_unit],\n\t\t\t\t\t\t\t\t\tdtype=tf.float32, initializer=self.initializers.xavier_initializer())\n\n\t\t\t\tb = tf.get_variable(\"b\", shape=[self.hidden_unit], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\tinitializer=tf.zeros_initializer())\n\t\t\t\toutput = tf.reshape(lstm_outputs, shape=[-1, self.hidden_unit * 2])\n\t\t\t\thidden = tf.tanh(tf.nn.xw_plus_b(output, W, b))\n\n\t\t\t# project to score of tags\n\t\t\twith tf.variable_scope(\"logits\"):\n\t\t\t\tW = tf.get_variable(\"W\", shape=[self.hidden_unit, self.num_labels],\n\t\t\t\t\t\t\t\t\tdtype=tf.float32, initializer=self.initializers.xavier_initializer())\n\n\t\t\t\tb = tf.get_variable(\"b\", shape=[self.num_labels], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\tinitializer=tf.zeros_initializer())\n\n\t\t\t\tpred = tf.nn.xw_plus_b(hidden, W, b)\n\t\t\treturn tf.reshape(pred, [-1, self.seq_length, self.num_labels])\n\n\tdef project_crf_layer(self, embedding_chars, name=None):\n\t\t\"\"\"\n\t\thidden layer between input layer and logits\n\t\t:param lstm_outputs: [batch_size, num_steps, emb_size] \n\t\t:return: [batch_size, num_steps, num_tags]\n\t\t\"\"\"\n\t\twith tf.variable_scope(\"project\" if not name else name):\n\t\t\twith tf.variable_scope(\"logits\"):\n\t\t\t\tW = tf.get_variable(\"W\", shape=[self.embedding_dims, self.num_labels],\n\t\t\t\t\t\t\t\t\tdtype=tf.float32, initializer=self.initializers.xavier_initializer())\n\n\t\t\t\tb = tf.get_variable(\"b\", shape=[self.num_labels], dtype=tf.float32,\n\t\t\t\t\t\t\t\t\tinitializer=tf.zeros_initializer())\n\t\t\t\toutput = tf.reshape(self.embedded_chars, shape=[-1, self.embedding_dims]) #[batch_size, embedding_dims]\n\t\t\t\tpred = tf.tanh(tf.nn.xw_plus_b(output, W, b))\n\t\t\treturn tf.reshape(pred, [-1, self.seq_length, self.num_labels])\n\n\tdef crf_layer(self, logits):\n\t\t\"\"\"\n\t\tcalculate crf loss\n\t\t:param project_logits: [1, num_steps, num_tags]\n\t\t:return: scalar loss\n\t\t\"\"\"\n\t\twith tf.variable_scope(\"crf_loss\"):\n\t\t\ttrans = tf.get_variable(\n\t\t\t\t\"transitions\",\n\t\t\t\tshape=[self.num_labels, self.num_labels],\n\t\t\t\tinitializer=self.initializers.xavier_initializer())\n\t\t\tlog_likelihood, trans = tf.contrib.crf.crf_log_likelihood(\n\t\t\t\tinputs=logits,\n\t\t\t\ttag_indices=self.labels,\n\t\t\t\ttransition_params=trans,\n\t\t\t\tsequence_lengths=self.lengths)\n\t\t\treturn tf.reduce_mean(-log_likelihood), trans" ]
[ [ "tensorflow.get_variable", "tensorflow.metrics.accuracy", "tensorflow.concat", "tensorflow.contrib.rnn.GRUCell", "tensorflow.layers.dropout", "tensorflow.stack", "tensorflow.cast", "tensorflow.nn.bidirectional_dynamic_rnn", "tensorflow.orthogonal_initializer", "tensorflow.contrib.crf.crf_decode", "tensorflow.contrib.crf.crf_log_likelihood", "tensorflow.math.argmax", "tensorflow.contrib.rnn.LayerNormBasicLSTMCell", "tensorflow.squeeze", "tensorflow.contrib.rnn.MultiRNNCell", "tensorflow.losses.softmax_cross_entropy", "tensorflow.argmax", "tensorflow.nn.dropout", "tensorflow.nn.xw_plus_b", "tensorflow.gather_nd", "tensorflow.zeros_initializer", "tensorflow.one_hot", "tensorflow.transpose", "tensorflow.range", "tensorflow.reduce_mean", "tensorflow.contrib.rnn.DropoutWrapper", "tensorflow.broadcast_to", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12" ] } ]
SamKaiYang/timda_dual_arm
[ "8582945cb7bc9d955d224bffb5af2c207bbb311a" ]
[ "hand_eye/src/hand_eye/CharucoPosture.py" ]
[ "#!/usr/bin/env python\n\n# The following code is used to watch a video stream, detect Aruco markers, and use\n# a set of markers to determine the posture of the camera in relation to the plane\n# of markers.\n#\n# Assumes that all markers are on the same plane, for example on the same piece of paper\n#\n# Requires camera calibration (see the rest of the project for example calibration)\n\nimport rospy\nimport std_msgs, std_srvs\nimport numpy as np\nimport cv2\nimport cv2.aruco as aruco\nimport os\nimport pickle\nfrom aruco_hand_eye.srv import aruco_info, aruco_infoResponse\nimport time\nimport pyrealsense2 as rs\nNUMBER = 5\n# # Constant parameters used in Aruco methods\n# ARUCO_PARAMETERS = aruco.DetectorParameters_create()\n# ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_4X4_100)\n\n# Constant parameters used in Aruco methods\nARUCO_PARAMETERS = aruco.DetectorParameters_create()\nARUCO_DICT = aruco.Dictionary_get(aruco.DICT_4X4_50)\nCHARUCOBOARD_ROWCOUNT=7\nCHARUCOBOARD_COLCOUNT=5\n\n# Create grid board object we're using in our stream\nCHARUCO_BOARD = aruco.CharucoBoard_create(\n squaresX=CHARUCOBOARD_COLCOUNT,\n squaresY=CHARUCOBOARD_ROWCOUNT,\n squareLength=0.0359,\n markerLength=0.0244,\n # squareLength=0.04,\n # markerLength=0.02,\n dictionary=ARUCO_DICT)\n\n\n\nclass CharucoBoardPosture():\n def __init__(self, name, size):\n self.name = name\n self.markersize = size\n self.cnd = 0\n self.frameId = 0\n self.pipeline = rs.pipeline()\n config = rs.config()\n config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\n config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)\n\n self.pipeline.start(config)\n # Check for camera calibration data\n c_x = 643.47548083\n c_y = 363.67742746\n f_x = 906.60886808\n f_y = 909.34831447\n k_1 = 0.16962942\n k_2 = -0.5560001\n p_1 = 0.00116353\n p_2 = -0.00122694\n k_3 = 0.52491878\n\n c_x = 649.007507324219\n c_y = 356.122222900391\n f_x = 922.76806640625\n f_y = 923.262023925781\n \n self.cameraMatrix = np.array([[f_x, 0, c_x],\n [0, f_y, c_y],\n [0, 0, 1]])\n # self.distCoeffs = np.array([k_1, k_2, p_1, p_2, k_3])\n self.distCoeffs = np.array([0.0, 0, 0, 0, 0])\n\n \n # self.cameraMatrix = np.array([[1.38726465e+03, 0.00000000e+00, 9.67009977e+02], #pinto\n # [0.00000000e+00, 1.39067726e+03, 5.44111718e+02],\n # [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])\n # self.distCoeffs = np.array([ 0.1772611, -0.57056992, -0.0008356, 0.00099024, 0.52153116])\n\n # dist_coef = np.array([0, 0, 0, 0, 0])\n # self.cameraMatrix = np.array([[906.10541873, 0.0, 643.12531806],\n # [0.0, 904.68643316, 359.79710938],\n # [0.0, 0.0, 1.0 ]])\n # self.distCoeffs = np.array([1.53041876e-01, -4.08438606e-01, 1.53722452e-03, -3.95946669e-04, 2.56666605e-01])\n # if not os.path.exists('/home/iclab/wrs_ws/src/aruco_hand_eye/cfg/calibration.pckl'):\n # print(\"You need to calibrate the camera you'll be using. See calibration project directory for details.\")\n # self.cameraMatrix = [[603.00869939, 0.0, 318.46049727],\n # [0.0, 601.50770586, 251.87010006],\n # [0.0, 0.0, 1.0 ]]\n # self.distCoeffs = [[7.59282092e-02, 2.21483627e-01, 1.41152268e-03, -4.71388619e-04, -1.18482976e+00]]\n\n # #exit()\n # else:\n # f = open('/home/iclab/wrs_ws/src/aruco_hand_eye/cfg/calibration.pckl', 'rb')\n # (self.cameraMatrix, self.distCoeffs, _, _) = pickle.load(f)\n # f.close()\n # if self.cameraMatrix is None or self.distCoeffs is None:\n # print(\"Calibration issue. Remove ./calibration.pckl and recalibrate your camera with CalibrateCamera.py.\")\n # exit()\n # print(self.cameraMatrix)\n # print(' ')\n # print(self.distCoeffs)\n\n # Create grid board object we're using in our stream\n # board = aruco.GridBoard_create(\n # markersX=2,\n # markersY=2,\n # markerLength=0.09,\n # markerSeparation=0.01,\n # dictionary=ARUCO_DICT)\n\n # Create vectors we'll be using for rotations and translations for postures\n self.rvecs = None \n self.tvecs = None\n self.rvecs_arr = np.zeros((3, NUMBER))\n self.tvecs_arr = np.zeros((3, NUMBER))\n # cam = cv2.VideoCapture('gridboardiphonetest.mp4')\n # self.cam_left = cv2.VideoCapture(5)\n # self.cam_right = cv2.VideoCapture(10)\n self.cam = None\n self.QueryImg = None\n self.init_server()\n frames = self.pipeline.wait_for_frames()\n color_frame = frames.get_color_frame()\n self.QueryImg = np.asanyarray(color_frame.get_data())\n\n def init_server(self):\n self.server = rospy.Service('get_ar_marker', aruco_info, self.findCharucoBoard)\n\n def findCharucoBoard(self, req):\n self.rvecs_arr = np.zeros((3, NUMBER))\n self.tvecs_arr = np.zeros((3, NUMBER))\n res = aruco_infoResponse()\n\n for order in range (NUMBER):\n # Capturing each frame of our video stream\n # ret, self.QueryImg = self.cam.read()\n # frames = self.pipeline.wait_for_frames()\n # color_frame = frames.get_color_frame()\n # self.QueryImg = np.asanyarray(color_frame.get_data())\n # grayscale image\n gray = cv2.cvtColor(self.QueryImg, cv2.COLOR_BGR2GRAY)\n\n # Detect Aruco markers\n corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)\n\n # Refine detected markers\n # Eliminates markers not part of our board, adds missing markers to the board\n corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(\n image = gray,\n board = CHARUCO_BOARD,\n detectedCorners = corners,\n detectedIds = ids,\n rejectedCorners = rejectedImgPoints,\n cameraMatrix = self.cameraMatrix,\n distCoeffs = self.distCoeffs) \n\n # Require 15 markers before drawing axis\n if ids is not None and len(ids) > 10:\n response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(\n markerCorners=corners,\n markerIds=ids,\n image=gray,\n board=CHARUCO_BOARD)\n\n # Require more than 20 squares\n if response is not None and response > 20:\n # Estimate the posture of the charuco board, which is a construction of 3D space based on the 2D video \n pose, rvec, tvec = aruco.estimatePoseCharucoBoard(\n charucoCorners=charuco_corners, \n charucoIds=charuco_ids, \n board=CHARUCO_BOARD, \n cameraMatrix=self.cameraMatrix, \n distCoeffs=self.distCoeffs)\n \n # self.rvecs, self.tvecs = aruco.estimatePoseSingleMarkers(corners, self.markersize, self.cameraMatrix, self.distCoeffs)\n # for _id, rvec, tvec in zip(ids, self.rvecs, self.tvecs):\n if pose:\n if order == 0:\n print(\"=============================================\")\n print(rvec)\n print(tvec)\n for i in range(3):\n self.rvecs_arr[i][order] = rvec[i][0]\n self.tvecs_arr[i][order] = tvec[i][0]\n \n # self.QueryImg = aruco.drawAxis(self.QueryImg, self.cameraMatrix, self.distCoeffs, rvec, tvec, 0.02)\n cv2.waitKey(10)\n # Display our image\n # print('self.rvecs_arr = ', self.rvecs_arr)\n # print('self.tvecs_arr = ', self.tvecs_arr)\n cv2.destroyAllWindows()\n r_avg = np.zeros(3) \n t_avg = np.zeros(3)\n\n ra = self.rvecs_arr[0].nonzero()\n rb = self.rvecs_arr[1].nonzero()\n rc = self.rvecs_arr[2].nonzero()\n tx = self.tvecs_arr[0].nonzero()\n ty = self.tvecs_arr[1].nonzero()\n tz = self.tvecs_arr[2].nonzero()\n ra = self.rvecs_arr[0][ra]\n rb = self.rvecs_arr[1][rb]\n rc = self.rvecs_arr[2][rc]\n tx = self.tvecs_arr[0][tx]\n ty = self.tvecs_arr[1][ty]\n tz = self.tvecs_arr[2][tz]\n ra = np.sort(ra, kind = 'quicksort')\n rb = np.sort(rb, kind = 'quicksort')\n rc = np.sort(rc, kind = 'quicksort')\n tx = np.sort(tx, kind = 'quicksort')\n ty = np.sort(ty, kind = 'quicksort')\n tz = np.sort(tz, kind = 'quicksort')\n r = np.array((ra, rb, rc))\n t = np.array((tx, ty, tz))\n for i in range(3):\n rv, tv = r[i], t[i]\n \n while np.std(rv) > 0.01 and len(rv) >= NUMBER*0.2:\n if abs(rv[0] - np.average(rv)) > abs(rv[-1] - np.average(rv)):\n rv = np.delete(rv, 0)\n else:\n rv = np.delete(rv, -1)\n while np.std(tv) > 0.01 and len(tv) >= NUMBER*0.2:\n if abs(tv[0] - np.average(tv)) > abs(tv[-1] - np.average(tv)):\n tv = np.delete(tv, 0)\n else:\n tv = np.delete(tv, -1)\n \n r_avg[i] = np.average(rv)\n t_avg[i] = np.average(tv)\n \n # print('[_id, r,t] = ', [_id, r,t])\n # res.ids.append(_id)\n # res.rvecs = np.append(res.rvecs, r_avg)\n # res.tvecs = np.append(res.tvecs, t_avg)\n res.rvecs = r_avg\n res.tvecs = t_avg\n print('res.rvecs is ', res.rvecs)\n print('res.tvecs is ', res.tvecs)\n result = np.array(())\n result = np.append(result, [np.copy(r_avg), np.copy(t_avg)])\n \n result = result.reshape(2,3)\n\n if self.name == 'test':\n # Outline all of the markers detected in our image\n self.QueryImg = aruco.drawDetectedMarkers(self.QueryImg, corners, borderColor=(0, 0, 255))\n self.QueryImg = aruco.drawAxis(self.QueryImg, self.cameraMatrix, self.distCoeffs, result[0], result[1], 0.02)\n curr_path = os.path.dirname(os.path.abspath(__file__))\n filename = curr_path + \"/pic/camera-pic-of-charucoboard-\" + str(int(self.frameId)) + \".jpg\"\n cv2.imwrite(filename, self.QueryImg)\n self.frameId += 1\n # cv2.imwrite('./123%d.jpg'%self.cnd, self.QueryImg)\n # self.cnd += 1\n # cv2.namedWindow('Amanda', cv2.WINDOW_AUTOSIZE)\n # self.QueryImg = cv2.imread('./123%d.jpg'%self.cnd)\n # cv2.imshow('Amanda', self.QueryImg)\n # cv2.waitKey(1000)\n # cv2.destroyAllWindows()\n\n # time.sleep(2)\n print('------')\n # while not cv2.waitKey(1) & 0xFF == ord('q'):\n # pass\n # cv2.destroyAllWindows()\n return res\n \nif __name__ == '__main__':\n rospy.init_node('aruco_tracker')\n is_show = True\n name = 'test'\n size = rospy.get_param('~marker_size')\n if rospy.has_param('is_show'):\n is_show = rospy.get_param('is_show')\n if is_show == False:\n name = 'fuck_run'\n\n mp = CharucoBoardPosture(name, size)\n if mp.name == 'test':\n while mp.QueryImg is None:\n time.sleep(0.1)\n while not rospy.is_shutdown():\n frames = mp.pipeline.wait_for_frames()\n color_frame = frames.get_color_frame()\n mp.QueryImg = np.asanyarray(color_frame.get_data())\n cv2.imshow('Amanda', mp.QueryImg)\n cv2.waitKey(10)\n rospy.spin()\n cv2.destroyAllWindows()\n del mp\n # while True:\n # result = mp.findCharucoBoard()\n # print(result)\n # print(cv2.Rodrigues(result[0][1])[0])\n # # print('==========================')\n # if cv2.waitKey(0) & 0xFF == ord('q'):\n # break\n \n" ]
[ [ "numpy.sort", "numpy.copy", "numpy.std", "numpy.delete", "numpy.average", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fxbriol/probnum
[ "7e0e94cf9146aaa2b730b02c6d75a022cd629b5c", "7e0e94cf9146aaa2b730b02c6d75a022cd629b5c", "7e0e94cf9146aaa2b730b02c6d75a022cd629b5c", "7e0e94cf9146aaa2b730b02c6d75a022cd629b5c", "7e0e94cf9146aaa2b730b02c6d75a022cd629b5c" ]
[ "benchmarks/linearsolvers.py", "src/probnum/filtsmooth/optim/_iterated_component.py", "src/probnum/filtsmooth/gaussian/_kalmanposterior.py", "tests/test_filtsmooth/test_gaussian/test_approx/_linearization_test_interface.py", "src/probnum/randvars/_utils.py" ]
[ "\"\"\"Benchmarks for linear solvers.\"\"\"\nimport numpy as np\n\nfrom probnum import linops, problems, randvars\nfrom probnum.linalg import problinsolve\nfrom probnum.problems.zoo.linalg import random_sparse_spd_matrix, random_spd_matrix\n\nLINEAR_SYSTEMS = [\"dense\", \"sparse\", \"linop\"]\nLINSYS_DIMS = [100, 1000, 10000, 100000]\nQUANTITIES_OF_INTEREST = [\"x\", \"A\", \"Ainv\"]\n\n\ndef get_linear_system(name: str, dim: int):\n rng = np.random.default_rng(0)\n\n if name == \"dense\":\n if dim > 1000:\n raise NotImplementedError()\n A = random_spd_matrix(rng=rng, dim=dim)\n elif name == \"sparse\":\n A = random_sparse_spd_matrix(\n rng=rng, dim=dim, density=np.minimum(1.0, 1000 / dim**2)\n )\n elif name == \"linop\":\n if dim > 100:\n raise NotImplementedError()\n # TODO: Larger benchmarks currently fail. Remove once PLS refactor\n # (https://github.com/probabilistic-numerics/probnum/issues/51) is resolved\n A = linops.Scaling(factors=rng.normal(size=(dim,)))\n else:\n raise NotImplementedError()\n\n solution = rng.normal(size=(dim,))\n b = A @ solution\n return problems.LinearSystem(A=A, b=b, solution=solution)\n\n\ndef get_quantity_of_interest(\n qoi: str,\n x: randvars.RandomVariable,\n A: randvars.RandomVariable,\n Ainv: randvars.RandomVariable,\n):\n if qoi == \"x\":\n return x\n elif qoi == \"A\":\n return A\n elif qoi == \"Ainv\":\n return Ainv\n else:\n raise NotImplementedError()\n\n\nclass LinSolve:\n \"\"\"Benchmark solving a linear system.\"\"\"\n\n param_names = [\"linsys\", \"dim\"]\n params = [\n LINEAR_SYSTEMS,\n LINSYS_DIMS,\n ]\n\n def setup(self, linsys, dim):\n self.linsys = get_linear_system(name=linsys, dim=dim)\n xhat, _, _, _ = problinsolve(A=self.linsys.A, b=self.linsys.b)\n self.xhat = xhat\n\n def time_solve(self, linsys, dim):\n problinsolve(A=self.linsys.A, b=self.linsys.b)\n\n def peakmem_solve(self, linsys, dim):\n problinsolve(A=self.linsys.A, b=self.linsys.b)\n\n def track_residual_norm(self, linsys, dim):\n return np.linalg.norm(self.linsys.b - self.linsys.A @ self.xhat.mean)\n\n def track_error_2norm(self, linsys, dim):\n return np.linalg.norm(self.linsys.solution - self.xhat.mean)\n\n def track_error_Anorm(self, linsys, dim):\n diff = self.linsys.solution - self.xhat.mean\n return np.sqrt(np.inner(diff, self.linsys.A @ diff))\n\n\nclass PosteriorBelief:\n \"\"\"Benchmark computing derived quantities from the posterior belief.\"\"\"\n\n param_names = [\"linsys\", \"dim\", \"qoi\"]\n params = [LINEAR_SYSTEMS, LINSYS_DIMS, QUANTITIES_OF_INTEREST]\n\n def setup(self, linsys, dim, qoi):\n\n if dim > 1000:\n # Operations on posterior for large matrices can be very memory-intensive\n raise NotImplementedError()\n\n self.linsys = get_linear_system(name=linsys, dim=dim)\n x, A, Ainv, _ = problinsolve(A=self.linsys.A, b=self.linsys.b)\n self.qoi = get_quantity_of_interest(qoi, x, A, Ainv)\n\n def time_trace_cov(self, linsys, dim, qoi):\n self.qoi.cov.trace()\n\n def peakmem_trace_cov(self, linsys, dim, qoi):\n self.qoi.cov.trace()\n", "\"\"\"Iterated components for iterated filtering and smoothing.\"\"\"\n\nimport numpy as np\n\nfrom probnum import randprocs\nfrom probnum.filtsmooth.optim import _stopping_criterion\n\n\nclass IteratedDiscreteComponent(randprocs.markov.Transition):\n \"\"\"Iterated updates.\n\n Examples\n --------\n >>> from probnum.filtsmooth.optim import FiltSmoothStoppingCriterion\n >>> from probnum.filtsmooth.gaussian.approx import DiscreteEKFComponent\n >>> from probnum.problems.zoo.diffeq import logistic\n >>> from probnum.randprocs.markov.integrator import IntegratedWienerProcess\n >>> from probnum.randprocs.markov.discrete import NonlinearGaussian\n >>> from probnum.randvars import Constant\n >>> import numpy as np\n\n Set up an iterated component.\n\n >>> iwp = IntegratedWienerProcess(\n ... initarg=0., num_derivatives=2, wiener_process_dimension=1\n ... )\n >>> H0, H1 = iwp.transition.proj2coord(coord=0), iwp.transition.proj2coord(coord=1)\n >>> call = lambda t, x: H1 @ x - H0 @ x * (1 - H0 @ x)\n >>> jacob = lambda t, x: H1 - (1 - 2*(H0 @ x)) @ H0\n >>> nonlinear_model = NonlinearGaussian.from_callable(3, 1, call, jacob)\n >>> ekf = DiscreteEKFComponent(nonlinear_model)\n >>> comp = IteratedDiscreteComponent(ekf, FiltSmoothStoppingCriterion())\n\n Generate some random variables and pseudo observations.\n\n >>> some_array = np.array([0.1, 1., 2.])\n >>> some_rv = Constant(some_array)\n >>> rv, _ = iwp.transition.forward_realization(some_array , t=0., dt=0.1)\n >>> rv_observed, _ = comp.forward_rv(rv, t=0.2)\n >>> rv_observed *= 0.01 # mitigate zero data\n\n Its attributes are inherited from the component that is passed through.\n\n >>> print(comp.input_dim)\n 3\n >>> out, info = comp.forward_realization(some_array,some_rv,)\n >>> print(out.mean)\n [0.91]\n\n But its backward values are different, because of the iteration.\n\n >>> out_ekf, _ = ekf.backward_rv(rv_observed, rv)\n >>> print(out_ekf.mean)\n [ 0.17081493 0.15351366 -13.73607367]\n >>> out_iterated, _ = comp.backward_rv(rv_observed, rv)\n >>> print(out_iterated.mean)\n [ 0.17076427 0.15194483 -13.76505168]\n \"\"\"\n\n def __init__(\n self,\n component,\n stopcrit=None,\n ):\n self._component = component\n if stopcrit is None:\n self.stopcrit = _stopping_criterion.FiltSmoothStoppingCriterion()\n else:\n self.stopcrit = stopcrit\n\n super().__init__(input_dim=component.input_dim, output_dim=component.output_dim)\n\n # Iterated filtering implementation\n\n def backward_rv(\n self,\n rv_obtained,\n rv,\n rv_forwarded=None,\n gain=None,\n t=None,\n dt=None,\n _diffusion=1.0,\n _linearise_at=None,\n ):\n current_rv, info = self._component.backward_rv(\n rv_obtained=rv_obtained,\n rv=rv,\n t=t,\n dt=dt,\n _diffusion=_diffusion,\n _linearise_at=_linearise_at,\n )\n\n new_mean = current_rv.mean.copy()\n old_mean = np.inf * np.ones(current_rv.mean.shape)\n while not self.stopcrit(error=new_mean - old_mean, reference=new_mean):\n old_mean = new_mean.copy()\n current_rv, info = self._component.backward_rv(\n rv_obtained=rv_obtained,\n rv=rv,\n t=t,\n dt=dt,\n _diffusion=_diffusion,\n _linearise_at=current_rv,\n )\n new_mean = current_rv.mean.copy()\n return current_rv, info\n\n def backward_realization(\n self,\n realization_obtained,\n rv,\n rv_forwarded=None,\n gain=None,\n t=None,\n dt=None,\n _diffusion=1.0,\n _linearise_at=None,\n ):\n return self._backward_realization_via_backward_rv(\n realization_obtained,\n rv=rv,\n rv_forwarded=rv_forwarded,\n gain=gain,\n t=t,\n dt=dt,\n _diffusion=_diffusion,\n _linearise_at=_linearise_at,\n )\n\n # These need to be re-implemented here, because otherwise this class\n # cannot be instantiated (abc things)\n\n def forward_rv(\n self, rv, t, dt=None, compute_gain=False, _diffusion=1.0, _linearise_at=None\n ):\n return self._component.forward_rv(\n rv,\n t,\n dt=dt,\n compute_gain=compute_gain,\n _diffusion=_diffusion,\n _linearise_at=_linearise_at,\n )\n\n def forward_realization(\n self,\n realization,\n t,\n dt=None,\n compute_gain=False,\n _diffusion=1.0,\n _linearise_at=None,\n ):\n return self._component.forward_realization(\n realization,\n t,\n dt=dt,\n compute_gain=compute_gain,\n _diffusion=_diffusion,\n _linearise_at=_linearise_at,\n )\n\n # Pass on all the rest to the EKF/UKF component\n\n def __getattr__(self, attr):\n\n if attr in [\n \"backward_rv\",\n \"backward_realization\",\n ]:\n return self.attr\n return getattr(self._component, attr)\n", "\"\"\"Posterior over states after applying (Extended/Unscented) Kalman filtering/smoothing.\n\nContains the discrete time and function outputs. Provides dense output by being\ncallable. Can function values can also be accessed by indexing.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport abc\nfrom typing import Iterable, Optional, Union\n\nimport numpy as np\nfrom scipy import stats\n\nfrom probnum import randprocs, randvars, utils\nfrom probnum.filtsmooth import _timeseriesposterior\nfrom probnum.filtsmooth.gaussian import approx\nfrom probnum.typing import ArrayLike, FloatLike, IntLike, ShapeLike\n\nGaussMarkovPriorTransitionArgType = Union[\n randprocs.markov.discrete.LinearGaussian,\n approx.DiscreteEKFComponent,\n approx.DiscreteUKFComponent,\n randprocs.markov.continuous.LinearSDE,\n approx.ContinuousEKFComponent,\n]\n\"\"\"Any linear(ized) transition can define an (approximate) Gauss-Markov prior.\"\"\"\n\n\nclass KalmanPosterior(_timeseriesposterior.TimeSeriesPosterior, abc.ABC):\n \"\"\"Posterior distribution after approximate Gaussian filtering and smoothing.\n\n Parameters\n ----------\n locations :\n Locations / Times of the discrete-time estimates.\n states :\n Estimated states (in the state-space model view) of the discrete-time estimates.\n transition :\n Dynamics model used as a prior for the filter.\n \"\"\"\n\n def __init__(\n self,\n transition: GaussMarkovPriorTransitionArgType,\n locations: Optional[Iterable[FloatLike]] = None,\n states: Optional[Iterable[randvars.RandomVariable]] = None,\n diffusion_model=None,\n ) -> None:\n\n super().__init__(locations=locations, states=states)\n self.transition = transition\n\n self.diffusion_model = diffusion_model\n self.diffusion_model_has_been_provided = diffusion_model is not None\n\n @abc.abstractmethod\n def interpolate(\n self,\n t: FloatLike,\n previous_index: Optional[IntLike] = None,\n next_index: Optional[IntLike] = None,\n ) -> randvars.RandomVariable:\n raise NotImplementedError\n\n def sample(\n self,\n rng: np.random.Generator,\n t: Optional[ArrayLike] = None,\n size: Optional[ShapeLike] = (),\n ) -> np.ndarray:\n\n size = utils.as_shape(size)\n single_rv_shape = self.states[0].shape\n single_rv_ndim = self.states[0].ndim\n\n # Early exit if no dense output is required\n if t is None:\n base_measure_realizations = stats.norm.rvs(\n size=(size + self.locations.shape + single_rv_shape),\n random_state=rng,\n )\n return self.transform_base_measure_realizations(\n base_measure_realizations=base_measure_realizations, t=self.locations\n )\n\n # Compute the union (as sets) of t and self.locations\n # This allows that samples \"always pass\" the grid points.\n all_locations = np.union1d(t, self.locations)\n slice_these_out = np.where(np.isin(all_locations, t))[0]\n base_measure_realizations = stats.norm.rvs(\n size=(size + all_locations.shape + single_rv_shape),\n random_state=rng,\n )\n\n samples = self.transform_base_measure_realizations(\n base_measure_realizations=base_measure_realizations, t=all_locations\n )\n new_samples = np.take(\n samples, indices=slice_these_out, axis=-(single_rv_ndim + 1)\n )\n return new_samples\n\n @abc.abstractmethod\n def transform_base_measure_realizations(\n self,\n base_measure_realizations: np.ndarray,\n t: ArrayLike,\n ) -> np.ndarray:\n \"\"\"Transform samples from a base measure to samples from the KalmanPosterior.\n\n Here, the base measure is a multivariate standard Normal distribution.\n\n Parameters\n ----------\n base_measure_realizations :\n **Shape (*size, N, d).**\n Samples from a multivariate standard Normal distribution.\n `N` is either the `len(self.locations)` (if `t == None`),\n or `len(t) + 1` (if `t != None`). The reason for the `+1` in the latter\n is that samples at arbitrary locations need to be conditioned on\n a sample at the final time point.\n t :\n **Shape (N,).**\n Time points. Must include `self.locations`.Shape\n\n Returns\n -------\n np.ndarray\n **Shape (*size, N, d)**\n Transformed base measure realizations. If the inputs are samples\n from a multivariate standard Normal distribution, the results are\n `size` samples from the Kalman posterior at prescribed locations.\n \"\"\"\n raise NotImplementedError\n\n\nclass SmoothingPosterior(KalmanPosterior):\n \"\"\"Smoothing posterior.\n\n Parameters\n ----------\n locations : `array_like`\n Locations / Times of the discrete-time estimates.\n states : :obj:`list` of :obj:`RandomVariable`\n Estimated states (in the state-space model view) of the discrete-time estimates.\n transition : :obj:`Transition`\n Dynamics model used as a prior for the filter.\n filtering_posterior :\n Filtering posterior.\n \"\"\"\n\n def __init__(\n self,\n filtering_posterior: _timeseriesposterior.TimeSeriesPosterior,\n transition: GaussMarkovPriorTransitionArgType,\n locations: Iterable[FloatLike],\n states: Iterable[randvars.RandomVariable],\n diffusion_model=None,\n ):\n self.filtering_posterior = filtering_posterior\n super().__init__(\n transition=transition,\n locations=locations,\n states=states,\n diffusion_model=diffusion_model,\n )\n\n def interpolate(\n self,\n t: FloatLike,\n previous_index: Optional[IntLike] = None,\n next_index: Optional[IntLike] = None,\n ) -> randvars.RandomVariable:\n\n # Assert either previous_location or next_location is not None\n # Otherwise, there is no reference point that can be used for interpolation.\n if previous_index is None and next_index is None:\n raise ValueError\n\n previous_location = (\n self.locations[previous_index] if previous_index is not None else None\n )\n next_location = self.locations[next_index] if next_index is not None else None\n previous_state = (\n self.states[previous_index] if previous_index is not None else None\n )\n next_state = self.states[next_index] if next_index is not None else None\n\n # Corner case 1: point is on grid. In this case, don't compute anything.\n if t == previous_location:\n return previous_state\n if t == next_location:\n return next_state\n\n # This block avoids calling self.diffusion_model, because we do not want\n # to search the full index set -- we already know the index!\n # This is the reason that `Diffusion` objects implement a __getitem__.\n # The usual diffusion-index is the next index\n # ('Diffusion's include the right-hand side gridpoint!),\n # but if we are right of the domain, the previous_index matters.\n diffusion_index = next_index if next_index is not None else previous_index\n if diffusion_index >= len(self.locations) - 1:\n diffusion_index = -1\n if self.diffusion_model_has_been_provided:\n squared_diffusion = self.diffusion_model[diffusion_index]\n else:\n squared_diffusion = 1.0\n\n # Corner case 2: are extrapolating to the left\n if previous_location is None:\n raise NotImplementedError(\"Extrapolation to the left is not implemented.\")\n # The code below would more or less work,\n # but since forward and backward transitions\n # cannot handle negative time increments reliably,\n # we do not support it.\n #\n ############################################################\n #\n # dt = t - next_location\n # assert dt < 0.0\n # extrapolated_rv_left, _ = self.transition.forward_rv(\n # next_state, t=next_location, dt=dt, _diffusion=squared_diffusion\n # )\n # return extrapolated_rv_left\n #\n ############################################################\n\n # Corner case 3: we are extrapolating to the right\n if next_location is None:\n dt = t - previous_location\n assert dt > 0.0\n extrapolated_rv_right, _ = self.transition.forward_rv(\n previous_state, t=previous_location, dt=dt, _diffusion=squared_diffusion\n )\n return extrapolated_rv_right\n\n # Final case: we are interpolating. Both locations are not None.\n # In this case, filter from the the left to the middle point;\n # And compute a smoothing update from the middle to the RHS point.\n if np.abs(previous_index - next_index) > 1.1:\n raise ValueError\n dt_left = t - previous_location\n dt_right = next_location - t\n assert dt_left > 0.0\n assert dt_right > 0.0\n filtered_rv, _ = self.transition.forward_rv(\n rv=previous_state,\n t=previous_location,\n dt=dt_left,\n _diffusion=squared_diffusion,\n )\n smoothed_rv, _ = self.transition.backward_rv(\n rv_obtained=next_state,\n rv=filtered_rv,\n t=t,\n dt=dt_right,\n _diffusion=squared_diffusion,\n )\n return smoothed_rv\n\n def transform_base_measure_realizations(\n self,\n base_measure_realizations: np.ndarray,\n t,\n ) -> np.ndarray:\n\n # Early exit: recursively compute multiple samples\n # if the desired sample size is not equal to '()', which is the case if\n # the shape of base_measure_realization is not (len(locations), shape(RV))\n # t_shape = self.locations.shape if t is None else (len(t) + 1,)\n size_zero_shape = () + t.shape + self.states[0].shape\n if base_measure_realizations.shape != size_zero_shape:\n return np.array(\n [\n self.transform_base_measure_realizations(\n base_measure_realizations=base_real,\n t=t,\n )\n for base_real in base_measure_realizations\n ]\n )\n\n # Now we are in the setting of jointly sampling\n # a single realization from the posterior.\n # On time points inside the domain,\n # this is essentially a sequence of smoothing steps.\n\n t = np.asarray(t) if t is not None else None\n if not np.all(np.isin(self.locations, t)):\n raise ValueError(\n \"Base measure realizations cannot be transformed \"\n \"if the locations don't include self.locations.\"\n )\n\n if not np.all(np.diff(t) >= 0.0):\n raise ValueError(\"Time-points have to be sorted.\")\n\n # Find locations of the diffusions, which amounts to finding the locations\n # of the grid points in t (think: `all_locations`),\n # which is done via np.searchsorted:\n diffusion_indices = np.searchsorted(self.locations[:-2], t[1:])\n if self.diffusion_model_has_been_provided:\n squared_diffusion_list = self.diffusion_model[diffusion_indices]\n else:\n squared_diffusion_list = np.ones_like(t)\n\n # Split into interpolation and extrapolation samples.\n # For extrapolation, samples are propagated forwards.\n # Due to this distinction, we need to treat both cases differently.\n # Note: t=tmax is in two arrays!\n # This is on purpose, because sample realisations need to be\n # \"communicated\" between interpolation and extrapolation.\n t0, tmax = np.amin(self.locations), np.amax(self.locations)\n t_extra_left = t[t < t0]\n t_extra_right = t[tmax <= t]\n t_inter = t[(t0 <= t) & (t <= tmax)]\n\n if len(t_extra_left) > 0:\n raise NotImplementedError(\n \"Sampling on the left of the time-domain is not implemented.\"\n )\n\n # Split base measure realisations (which have, say, length N + M - 1):\n # the first N realizations belong to the interpolation samples,\n # and the final M realizations belong to the extrapolation samples.\n # Note again: the sample corresponding to tmax belongs to both groups.\n base_measure_reals_inter = base_measure_realizations[: len(t_inter)]\n base_measure_reals_extra_right = base_measure_realizations[\n -len(t_extra_right) :\n ]\n\n squared_diffusion_list_inter = squared_diffusion_list[: len(t_inter)]\n squared_diffusion_list_extra_right = squared_diffusion_list[\n -len(t_extra_right) :\n ]\n\n states = self.filtering_posterior(t)\n states_inter = states[: len(t_inter)]\n states_extra_right = states[-len(t_extra_right) :]\n\n samples_inter = np.array(\n self.transition.jointly_transform_base_measure_realization_list_backward(\n base_measure_realizations=base_measure_reals_inter,\n t=t_inter,\n rv_list=states_inter,\n _diffusion_list=squared_diffusion_list_inter,\n )\n )\n samples_extra = np.array(\n self.transition.jointly_transform_base_measure_realization_list_forward(\n base_measure_realizations=base_measure_reals_extra_right,\n t=t_extra_right,\n initrv=states_extra_right[0],\n _diffusion_list=squared_diffusion_list_extra_right,\n )\n )\n samples = np.concatenate((samples_inter[:-1], samples_extra), axis=0)\n return samples\n\n @property\n def _states_left_of_location(self):\n return self.filtering_posterior._states_left_of_location\n\n\nclass FilteringPosterior(KalmanPosterior):\n \"\"\"Filtering posterior.\"\"\"\n\n def interpolate(\n self,\n t: FloatLike,\n previous_index: Optional[IntLike] = None,\n next_index: Optional[IntLike] = None,\n ) -> randvars.RandomVariable:\n\n # Assert either previous_location or next_location is not None\n # Otherwise, there is no reference point that can be used for interpolation.\n if previous_index is None and next_index is None:\n raise ValueError\n\n previous_location = (\n self.locations[previous_index] if previous_index is not None else None\n )\n next_location = self.locations[next_index] if next_index is not None else None\n previous_state = (\n self.states[previous_index] if previous_index is not None else None\n )\n next_state = self.states[next_index] if next_index is not None else None\n\n # Corner case 1: point is on grid\n if t == previous_location:\n return previous_state\n if t == next_location:\n return next_state\n\n # Corner case 2: are extrapolating to the left\n if previous_location is None:\n raise NotImplementedError(\"Extrapolation to the left is not implemented.\")\n # The code below would work, but since forward and backward transitions\n # cannot handle negative time increments reliably, we do not support it.\n #\n ############################################################\n #\n # dt = t - next_location\n # assert dt < 0.0\n # extrapolated_rv_left, _ = self.transition.forward_rv(\n # next_state, t=next_location, dt=dt\n # )\n # return extrapolated_rv_left\n #\n ############################################################\n\n # Final case: we are extrapolating to the right.\n # This is also how the filter-posterior interpolates\n # (by extrapolating from the leftmost point)\n # previous_index is not None\n if self.diffusion_model_has_been_provided:\n diffusion_index = previous_index\n if diffusion_index >= len(self.locations) - 1:\n diffusion_index = -1\n diffusion = self.diffusion_model[diffusion_index]\n else:\n diffusion = 1.0\n dt_left = t - previous_location\n assert dt_left > 0.0\n filtered_rv, _ = self.transition.forward_rv(\n rv=previous_state, t=previous_location, dt=dt_left, _diffusion=diffusion\n )\n return filtered_rv\n\n def sample(\n self,\n rng: np.random.Generator,\n t: Optional[ArrayLike] = None,\n size: Optional[ShapeLike] = (),\n ) -> np.ndarray:\n # If this error would not be thrown here,\n # trying to sample from a FilteringPosterior\n # would call FilteringPosterior.transform_base_measure_realizations\n # which is not implemented.\n # Since an error thrown by that function instead of one thrown\n # by FilteringPosterior.sample\n # would likely by hard to parse by a user, we explicitly raise a\n # NotImplementedError here.\n raise NotImplementedError(\n \"Sampling from the FilteringPosterior is not implemented.\"\n )\n\n def transform_base_measure_realizations(\n self,\n base_measure_realizations: np.ndarray,\n t: Optional[ArrayLike] = None,\n ) -> np.ndarray:\n raise NotImplementedError(\n \"Transforming base measure realizations is not implemented.\"\n )\n", "\"\"\"Test interface for EKF and UKF.\"\"\"\n\nimport numpy as np\nimport pytest\n\nfrom probnum import filtsmooth, problems, randprocs, randvars\nimport probnum.problems.zoo.filtsmooth as filtsmooth_zoo\n\n\nclass InterfaceDiscreteLinearizationTest:\n \"\"\"Test approximate Gaussian filtering and smoothing.\n\n 1. forward_rv is unlocked by linearization\n 2. Applied to a linear model, the outcome is exactly the same\n as the original transition.\n 3. Smoothing RMSE < Filtering RMSE < Data RMSE on the pendulum example.\n \"\"\"\n\n # Replacement for an __init__ in the pytest language. See:\n # https://stackoverflow.com/questions/21430900/py-test-skips-test-class-if-constructor-is-defined # pylint: disable=\"line-too-long\"\n @pytest.fixture(autouse=True)\n def _setup(self):\n self.linearizing_component = None\n\n def test_transition_rv(self, rng):\n \"\"\"forward_rv() is only possible for the linearised model.\"\"\"\n # pylint: disable=not-callable\n _, info = filtsmooth_zoo.pendulum(rng=rng)\n non_linear_model = info[\"prior_process\"].transition\n initrv = info[\"prior_process\"].initrv\n linearised_model = self.linearizing_component(non_linear_model)\n\n # Baseline: non-linear model should not work\n with pytest.raises(NotImplementedError):\n non_linear_model.forward_rv(initrv, 0.0)\n\n # Linearized model works\n rv, _ = linearised_model.forward_rv(initrv, 0.0)\n assert isinstance(rv, randvars.RandomVariable)\n\n def test_exactness_linear_model(self, rng):\n \"\"\"Applied to a linear model, the results should be unchanged.\"\"\"\n # pylint: disable=not-callable\n regression_problem, info = filtsmooth_zoo.car_tracking(rng=rng)\n linear_model = info[\"prior_process\"].transition\n initrv = info[\"prior_process\"].initrv\n linearised_model = self.linearizing_component(linear_model)\n\n # Assert that the objects are different\n assert not isinstance(linear_model, type(linearised_model))\n\n # Assert that the give the same outputs.\n received, info1 = linear_model.forward_rv(initrv, 0.0)\n expected, info2 = linearised_model.forward_rv(initrv, 0.0)\n\n crosscov1 = info1[\"crosscov\"]\n crosscov2 = info2[\"crosscov\"]\n rtol, atol = 1e-9, 1e-9\n np.testing.assert_allclose(received.mean, expected.mean, rtol=rtol, atol=atol)\n np.testing.assert_allclose(received.cov, expected.cov, rtol=rtol, atol=atol)\n np.testing.assert_allclose(crosscov1, crosscov2, rtol=rtol, atol=atol)\n\n def test_filtsmooth_pendulum(self, rng):\n # pylint: disable=not-callable\n # Set up test problem\n\n # If this measurement variance is not really small, the sampled\n # test data can contain an outlier every now and then which\n # breaks the test, even though it has not been touched.\n regression_problem, info = filtsmooth_zoo.pendulum(\n rng=rng, measurement_variance=0.0001\n )\n prior_process = info[\"prior_process\"]\n measmods = regression_problem.measurement_models\n\n ekf_dyna = self.linearizing_component(prior_process.transition)\n ekf_meas = [self.linearizing_component(mm) for mm in measmods]\n\n regression_problem = problems.TimeSeriesRegressionProblem(\n locations=regression_problem.locations,\n observations=regression_problem.observations,\n measurement_models=ekf_meas,\n solution=regression_problem.solution,\n )\n\n initrv = prior_process.initrv\n prior_process = randprocs.markov.MarkovProcess(\n transition=ekf_dyna, initrv=initrv, initarg=regression_problem.locations[0]\n )\n method = filtsmooth.gaussian.Kalman(prior_process)\n\n # Compute filter/smoother solution\n posterior, _ = method.filtsmooth(regression_problem)\n filtms = posterior.filtering_posterior.states.mean\n smooms = posterior.states.mean\n\n # Compute RMSEs and assert they are well-behaved.\n comp = regression_problem.solution[:, 0]\n normaliser = np.sqrt(comp.size)\n filtrmse = np.linalg.norm(filtms[:, 0] - comp) / normaliser\n smoormse = np.linalg.norm(smooms[:, 0] - comp) / normaliser\n obs_rmse = (\n np.linalg.norm(regression_problem.observations[:, 0] - comp) / normaliser\n )\n\n assert smoormse < filtrmse < obs_rmse, (smoormse, filtrmse, obs_rmse)\n\n\nclass InterfaceContinuousLinearizationTest:\n \"\"\"Interface for tests of approximate, nonlinear Gaussian filtering and\n smoothing.\"\"\"\n\n # Replacement for an __init__ in the pytest language. See:\n # https://stackoverflow.com/questions/21430900/py-test-skips-test-class-if-constructor-is-defined\n @pytest.fixture(autouse=True)\n def _setup(self):\n self.linearizing_component = None\n\n def test_transition_rv(self, rng):\n \"\"\"forward_rv() not possible for original model but for the linearised model.\"\"\"\n # pylint: disable=not-callable\n _, info = filtsmooth_zoo.benes_daum(rng=rng)\n prior_process = info[\"prior_process\"]\n non_linear_model = prior_process.transition\n initrv = prior_process.initrv\n linearized_model = self.linearizing_component(non_linear_model)\n\n # Baseline: non-linear model should not work\n with pytest.raises(NotImplementedError):\n non_linear_model.forward_rv(initrv, t=0.0, dt=0.1)\n\n # Linearized model works\n rv, _ = linearized_model.forward_rv(initrv, t=0.0, dt=0.1)\n assert isinstance(rv, randvars.RandomVariable)\n\n def test_filtsmooth_benes_daum(self, rng):\n # pylint: disable=not-callable\n # Set up test problem\n\n # If this measurement variance is not really small, the sampled\n # test data can contain an outlier every now and then which\n # breaks the test, even though it has not been touched.\n time_grid = np.arange(0.0, 5.0, step=0.1)\n\n regression_problem, info = filtsmooth_zoo.benes_daum(\n rng=rng, measurement_variance=1e-1, time_grid=time_grid\n )\n prior_process = info[\"prior_process\"]\n ekf_dyna = self.linearizing_component(prior_process.transition)\n initrv = prior_process.initrv\n prior_process = randprocs.markov.MarkovProcess(\n transition=ekf_dyna, initrv=initrv, initarg=regression_problem.locations[0]\n )\n method = filtsmooth.gaussian.Kalman(prior_process)\n\n # Compute filter/smoother solution\n posterior, _ = method.filter(regression_problem)\n posterior = method.smooth(posterior)\n filtms = posterior.filtering_posterior.states.mean\n smooms = posterior.states.mean\n\n # Compute RMSEs and assert they are well-behaved.\n comp = regression_problem.solution[:, 0]\n normaliser = np.sqrt(comp.size)\n filtrmse = np.linalg.norm(filtms[:, 0] - comp) / normaliser\n smoormse = np.linalg.norm(smooms[:, 0] - comp) / normaliser\n obs_rmse = (\n np.linalg.norm(regression_problem.observations[:, 0] - comp) / normaliser\n )\n assert smoormse < filtrmse < obs_rmse\n", "\"\"\"Utility functions for random variables.\"\"\"\nfrom typing import Any\n\nimport numpy as np\nimport scipy.sparse\n\nimport probnum.linops\n\nfrom . import _constant, _random_variable, _scipy_stats\n\n\ndef asrandvar(obj: Any) -> _random_variable.RandomVariable:\n \"\"\"Convert ``obj`` to a :class:`RandomVariable`.\n\n Converts an object such as scalars, (sparse) arrays, or distribution-type objects to\n a ProbNum :class:`RandomVariable`.\n\n Parameters\n ----------\n obj :\n Object to be represented as a :class:`RandomVariable`.\n\n See Also\n --------\n RandomVariable : Class representing random variables.\n\n Examples\n --------\n >>> from scipy.stats import bernoulli\n >>> import probnum as pn\n >>> import numpy as np\n >>> bern = bernoulli(p=0.5)\n >>> bern_pn = pn.asrandvar(bern)\n >>> rng = np.random.default_rng(42)\n >>> bern_pn.sample(rng=rng, size=5)\n array([1, 0, 1, 1, 0])\n \"\"\"\n\n # pylint: disable=protected-access\n # RandomVariable\n if isinstance(obj, _random_variable.RandomVariable):\n return obj\n\n # Scalar\n if np.isscalar(obj):\n return _constant.Constant(support=obj)\n\n # Numpy array or sparse matrix\n if isinstance(obj, (np.ndarray, scipy.sparse.spmatrix)):\n return _constant.Constant(support=obj)\n\n # Linear Operators\n if isinstance(\n obj, (probnum.linops.LinearOperator, scipy.sparse.linalg.LinearOperator)\n ):\n return _constant.Constant(support=probnum.linops.aslinop(obj))\n\n # Scipy random variable\n if isinstance(\n obj,\n (\n scipy.stats._distn_infrastructure.rv_frozen,\n scipy.stats._multivariate.multi_rv_frozen,\n ),\n ):\n return _scipy_stats.wrap_scipy_rv(obj)\n\n raise ValueError(\n f\"Argument of type {type(obj)} cannot be converted to a random variable.\"\n )\n" ]
[ [ "numpy.inner", "numpy.minimum", "numpy.linalg.norm", "numpy.random.default_rng" ], [ "numpy.ones" ], [ "numpy.amax", "numpy.ones_like", "numpy.take", "numpy.abs", "numpy.asarray", "numpy.amin", "numpy.union1d", "numpy.concatenate", "scipy.stats.norm.rvs", "numpy.diff", "numpy.searchsorted", "numpy.isin" ], [ "numpy.arange", "numpy.linalg.norm", "numpy.sqrt", "numpy.testing.assert_allclose" ], [ "numpy.isscalar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
IrakozeFD/pyleecan
[ "5a93bd98755d880176c1ce8ac90f36ca1b907055", "5a93bd98755d880176c1ce8ac90f36ca1b907055", "5a93bd98755d880176c1ce8ac90f36ca1b907055", "5a93bd98755d880176c1ce8ac90f36ca1b907055", "5a93bd98755d880176c1ce8ac90f36ca1b907055", "5a93bd98755d880176c1ce8ac90f36ca1b907055", "5a93bd98755d880176c1ce8ac90f36ca1b907055", "5a93bd98755d880176c1ce8ac90f36ca1b907055", "5a93bd98755d880176c1ce8ac90f36ca1b907055", "5a93bd98755d880176c1ce8ac90f36ca1b907055", "5a93bd98755d880176c1ce8ac90f36ca1b907055", "5a93bd98755d880176c1ce8ac90f36ca1b907055", "5a93bd98755d880176c1ce8ac90f36ca1b907055" ]
[ "pyleecan/Methods/Slot/SlotW11/plot_schematics.py", "Tests/Methods/Slot/test_SlotW22_meth.py", "pyleecan/Methods/Slot/SlotW10/comp_surface.py", "pyleecan/Functions/Plot/plot_2D.py", "pyleecan/Methods/Slot/SlotM16/plot_schematics.py", "Tests/Methods/Mesh/test_get_solution.py", "pyleecan/Methods/Simulation/EEC_PMSM/gen_drive.py", "pyleecan/Methods/Machine/LamSlotWind/comp_mmf_unit.py", "pyleecan/Methods/Geometry/Arc2/get_middle.py", "pyleecan/Methods/Slot/SlotW23/comp_surface_active.py", "pyleecan/Methods/Slot/HoleM57/_comp_point_coordinate.py", "pyleecan/Methods/Slot/SlotW13/comp_angle_opening.py", "pyleecan/Methods/Mesh/ScalarProductL2/scalar_product.py" ]
[ "import matplotlib.pyplot as plt\nfrom numpy import pi, exp\n\nfrom ....Classes.Arc1 import Arc1\nfrom ....Classes.LamSlot import LamSlot\nfrom ....Classes.Segment import Segment\nfrom ....definitions import config_dict\nfrom ....Functions.Plot import (\n ARROW_COLOR,\n ARROW_WIDTH,\n MAIN_LINE_COLOR,\n MAIN_LINE_STYLE,\n MAIN_LINE_WIDTH,\n P_FONT_SIZE,\n SC_FONT_SIZE,\n SC_LINE_COLOR,\n SC_LINE_STYLE,\n SC_LINE_WIDTH,\n TEXT_BOX,\n plot_quote,\n)\nfrom ....Methods import ParentMissingError\n\nMAGNET_COLOR = config_dict[\"PLOT\"][\"COLOR_DICT\"][\"MAGNET_COLOR\"]\n\n\ndef plot_schematics(\n self,\n is_default=False,\n is_add_point_label=False,\n is_add_schematics=True,\n is_add_main_line=True,\n type_add_active=True,\n save_path=None,\n is_show_fig=True,\n):\n \"\"\"Plot the schematics of the slot\n\n Parameters\n ----------\n self : SlotW11\n A SlotW11 object\n is_default : bool\n True: plot default schematics, else use current slot values\n is_add_point_label : bool\n True to display the name of the points (Z1, Z2....)\n is_add_schematics : bool\n True to display the schematics information (W0, H0...)\n is_add_main_line : bool\n True to display \"main lines\" (slot opening and 0x axis)\n type_add_active : int\n 0: No active surface, 1: active surface as winding, 2: active surface as magnet\n save_path : str\n full path including folder, name and extension of the file to save if save_path is not None\n is_show_fig : bool\n To call show at the end of the method\n \"\"\"\n\n # Use some default parameter\n if is_default:\n slot = type(self)(\n Zs=8, H0=10e-3, W0=20e-3, H1=10e-3, H2=40e-3, W1=40e-3, W2=50e-3, R1=5e-3\n )\n lam = LamSlot(\n Rint=0.135, Rext=0.3, is_internal=False, is_stator=True, slot=slot\n )\n slot.plot_schematics(\n is_default=False,\n is_add_point_label=is_add_point_label,\n is_add_schematics=is_add_schematics,\n is_add_main_line=is_add_main_line,\n type_add_active=type_add_active,\n save_path=save_path,\n is_show_fig=is_show_fig,\n )\n else:\n # Getting the main plot\n if self.parent is None:\n raise ParentMissingError(\"Error: The slot is not inside a Lamination\")\n lam = self.parent\n lam.plot(alpha=pi / self.Zs, is_show_fig=False) # center slot on Ox axis\n fig = plt.gcf()\n ax = plt.gca()\n point_dict = self._comp_point_coordinate()\n if self.is_outwards():\n sign = 1\n else:\n sign = -1\n # Adding point label\n if is_add_point_label:\n for name, Z in point_dict.items():\n ax.text(\n Z.real,\n Z.imag,\n name,\n fontsize=P_FONT_SIZE,\n bbox=TEXT_BOX,\n )\n\n # Adding schematics\n if is_add_schematics:\n # W0\n line = Segment(point_dict[\"Z1\"], point_dict[\"Z10\"])\n line.plot(\n fig=fig,\n ax=ax,\n color=ARROW_COLOR,\n linewidth=ARROW_WIDTH,\n label=\"W0\",\n offset_label=self.H0 * 0.15 + 1j * self.W0 * 0.1,\n is_arrow=True,\n fontsize=SC_FONT_SIZE,\n )\n # W1\n line = Segment(point_dict[\"Z3\"], point_dict[\"Z8\"])\n line.plot(\n fig=fig,\n ax=ax,\n color=ARROW_COLOR,\n linewidth=ARROW_WIDTH,\n label=\"W1\",\n offset_label=self.H0 * 0.15 + 1j * self.W1 * 0.3,\n is_arrow=True,\n fontsize=SC_FONT_SIZE,\n )\n # W2\n Zlim1 = point_dict[\"Z5\"].real + sign * self.R1 + 1j * point_dict[\"Z4\"].imag\n Zlim2 = point_dict[\"Z6\"].real + sign * self.R1 + 1j * point_dict[\"Z7\"].imag\n plot_quote(\n point_dict[\"Z4\"],\n Zlim1,\n Zlim2,\n point_dict[\"Z7\"],\n offset_label=sign * 0.05 * self.H2,\n fig=fig,\n ax=ax,\n label=\"W2\",\n )\n # H0\n line = Segment(point_dict[\"Z10\"], point_dict[\"Z9\"])\n line.plot(\n fig=fig,\n ax=ax,\n label=\"H0\",\n color=ARROW_COLOR,\n linewidth=ARROW_WIDTH,\n offset_label=1j * self.W0 * 0.15,\n is_arrow=True,\n fontsize=SC_FONT_SIZE,\n )\n # H1\n line = Segment(point_dict[\"Z2\"].real, point_dict[\"Z3\"].real)\n line.plot(\n fig=fig,\n ax=ax,\n label=\"H1\",\n color=ARROW_COLOR,\n linewidth=ARROW_WIDTH,\n offset_label=1j * self.W0 * 0.15,\n is_arrow=True,\n fontsize=SC_FONT_SIZE,\n )\n # H2\n line = Segment(point_dict[\"Z3\"].real, point_dict[\"Z5\"].real)\n line.plot(\n fig=fig,\n ax=ax,\n label=\"H2\",\n color=ARROW_COLOR,\n linewidth=ARROW_WIDTH,\n offset_label=1j * self.W0 * 0.15,\n is_arrow=True,\n fontsize=SC_FONT_SIZE,\n )\n # R1\n line = Segment(point_dict[\"Zc1\"], point_dict[\"Z5\"])\n line.plot(\n fig=fig,\n ax=ax,\n label=\"R1\",\n color=ARROW_COLOR,\n linewidth=ARROW_WIDTH,\n offset_label=1j * self.W0 * 0.15,\n is_arrow=True,\n fontsize=SC_FONT_SIZE,\n )\n\n if is_add_main_line:\n # Ox axis\n line = Segment(0, lam.Rext * 1.5)\n line.plot(\n fig=fig,\n ax=ax,\n color=MAIN_LINE_COLOR,\n linestyle=MAIN_LINE_STYLE,\n linewidth=MAIN_LINE_WIDTH,\n )\n # Top arc\n line = Arc1(\n begin=point_dict[\"Z1\"],\n end=point_dict[\"Z10\"],\n radius=self.get_Rbo(),\n is_trigo_direction=True,\n )\n line.plot(\n fig=fig,\n ax=ax,\n color=MAIN_LINE_COLOR,\n linestyle=MAIN_LINE_STYLE,\n linewidth=MAIN_LINE_WIDTH,\n )\n # H1 line\n line = Segment(point_dict[\"Z2\"], point_dict[\"Z9\"])\n line.plot(\n fig=fig,\n ax=ax,\n color=MAIN_LINE_COLOR,\n linestyle=MAIN_LINE_STYLE,\n linewidth=MAIN_LINE_WIDTH,\n )\n # R1 circle\n line = Segment(point_dict[\"Zc1\"], point_dict[\"Z5\"])\n line.plot(\n fig=fig,\n ax=ax,\n color=MAIN_LINE_COLOR,\n linestyle=MAIN_LINE_STYLE,\n linewidth=MAIN_LINE_WIDTH,\n )\n line = Segment(point_dict[\"Zc1\"], point_dict[\"Z4\"])\n line.plot(\n fig=fig,\n ax=ax,\n color=MAIN_LINE_COLOR,\n linestyle=MAIN_LINE_STYLE,\n linewidth=MAIN_LINE_WIDTH,\n )\n line = Segment(point_dict[\"Zc2\"], point_dict[\"Z6\"])\n line.plot(\n fig=fig,\n ax=ax,\n color=MAIN_LINE_COLOR,\n linestyle=MAIN_LINE_STYLE,\n linewidth=MAIN_LINE_WIDTH,\n )\n line = Segment(point_dict[\"Zc2\"], point_dict[\"Z7\"])\n line.plot(\n fig=fig,\n ax=ax,\n color=MAIN_LINE_COLOR,\n linestyle=MAIN_LINE_STYLE,\n linewidth=MAIN_LINE_WIDTH,\n )\n\n if type_add_active == 1:\n self.plot_active(fig=fig, is_show_fig=False)\n elif type_add_active == 2:\n self.plot_active(\n fig=fig, is_show_fig=False, enforced_default_color=MAGNET_COLOR\n )\n\n # Zooming and cleaning\n W = max([self.W1, self.W0, self.W2]) * 0.6\n Rint = min(point_dict[\"Z6\"].real, point_dict[\"Z1\"].real)\n Rext = max(point_dict[\"Z6\"].real, point_dict[\"Z1\"].real)\n\n plt.axis(\"equal\")\n ax.set_xlim(Rint, Rext)\n ax.set_ylim(-W, W)\n fig.canvas.set_window_title(type(self).__name__ + \" Schematics\")\n ax.set_title(\"\")\n ax.get_legend().remove()\n ax.set_axis_off()\n\n # Save / Show\n if save_path is not None:\n fig.savefig(save_path)\n plt.close()\n\n if is_show_fig:\n fig.show()\n", "# -*- coding: utf-8 -*-\nimport pytest\n\nfrom pyleecan.Classes.Arc2 import Arc2\nfrom pyleecan.Classes.Segment import Segment\n\nfrom pyleecan.Classes.SlotW22 import SlotW22\nfrom numpy import pi, ndarray, cos, sin, arcsin, exp, angle\nfrom pyleecan.Classes.SurfLine import SurfLine\nfrom pyleecan.Classes.LamSlot import LamSlot\nfrom pyleecan.Classes.Slot import Slot\n\n# For AlmostEqual\nDELTA = 1e-4\n\nslotW22_test = list()\n\n# Internal Slot\nlam = LamSlot(is_internal=True, Rext=1)\nlam.slot = SlotW22(Zs=36, W0=pi / 72, W2=pi / 36, H0=6e-3, H2=40e-3)\nslotW22_test.append(\n {\"test_obj\": lam, \"S_exp\": 3.660915e-03, \"SW_exp\": 3.3999e-03, \"H_exp\": 0.046}\n)\n\n# External Slot\nlam = LamSlot(is_internal=False, Rint=1)\nlam.slot = SlotW22(Zs=36, W0=pi / 72, W2=pi / 36, H0=6e-3, H2=40e-3)\nslotW22_test.append(\n {\"test_obj\": lam, \"S_exp\": 3.844e-03, \"SW_exp\": 3.5814e-03, \"H_exp\": 0.046}\n)\n\n\[email protected]\nclass Test_SlotW22_meth(object):\n \"\"\"pytest for SlotW22 methods\"\"\"\n\n @pytest.mark.parametrize(\"test_dict\", slotW22_test)\n def test_schematics(self, test_dict):\n \"\"\"Check that the schematics is correct\"\"\"\n test_obj = test_dict[\"test_obj\"]\n point_dict = test_obj.slot._comp_point_coordinate()\n\n # Check width\n assert angle(point_dict[\"Z1\"]) == pytest.approx(-test_obj.slot.W0 / 2)\n assert angle(point_dict[\"Z2\"]) == pytest.approx(-test_obj.slot.W0 / 2)\n assert angle(point_dict[\"Z3\"]) == pytest.approx(-test_obj.slot.W2 / 2)\n assert angle(point_dict[\"Z4\"]) == pytest.approx(-test_obj.slot.W2 / 2)\n assert angle(point_dict[\"Z7\"]) == pytest.approx(test_obj.slot.W0 / 2)\n assert angle(point_dict[\"Z8\"]) == pytest.approx(test_obj.slot.W0 / 2)\n assert angle(point_dict[\"Z5\"]) == pytest.approx(test_obj.slot.W2 / 2)\n assert angle(point_dict[\"Z6\"]) == pytest.approx(test_obj.slot.W2 / 2)\n # Check height\n assert abs(point_dict[\"Z1\"] - point_dict[\"Z2\"]) == pytest.approx(\n test_obj.slot.H0\n )\n assert abs(point_dict[\"Z3\"] - point_dict[\"Z4\"]) == pytest.approx(\n test_obj.slot.H2\n )\n assert abs(point_dict[\"Z8\"] - point_dict[\"Z7\"]) == pytest.approx(\n test_obj.slot.H0\n )\n assert abs(point_dict[\"Z5\"] - point_dict[\"Z6\"]) == pytest.approx(\n test_obj.slot.H2\n )\n\n @pytest.mark.parametrize(\"test_dict\", slotW22_test)\n def test_build_geometry_active(self, test_dict):\n \"\"\"Check that the active geometry is correctly split\"\"\"\n test_obj = test_dict[\"test_obj\"]\n surf_list = test_obj.slot.build_geometry_active(Nrad=3, Ntan=2)\n\n # Check label\n assert surf_list[0].label == \"Wind_Stator_R0_T0_S0\"\n assert surf_list[1].label == \"Wind_Stator_R1_T0_S0\"\n assert surf_list[2].label == \"Wind_Stator_R2_T0_S0\"\n assert surf_list[3].label == \"Wind_Stator_R0_T1_S0\"\n assert surf_list[4].label == \"Wind_Stator_R1_T1_S0\"\n assert surf_list[5].label == \"Wind_Stator_R2_T1_S0\"\n # Check tangential position\n assert surf_list[0].point_ref.imag < 0\n assert surf_list[1].point_ref.imag < 0\n assert surf_list[2].point_ref.imag < 0\n assert surf_list[3].point_ref.imag > 0\n assert surf_list[4].point_ref.imag > 0\n assert surf_list[5].point_ref.imag > 0\n # Check radial position\n if test_obj.is_internal:\n # Tan=0\n assert surf_list[0].point_ref.real > surf_list[1].point_ref.real\n assert surf_list[1].point_ref.real > surf_list[2].point_ref.real\n # Tan=1\n assert surf_list[3].point_ref.real > surf_list[4].point_ref.real\n assert surf_list[4].point_ref.real > surf_list[5].point_ref.real\n else:\n # Tan=0\n assert surf_list[0].point_ref.real < surf_list[1].point_ref.real\n assert surf_list[1].point_ref.real < surf_list[2].point_ref.real\n # Tan=1\n assert surf_list[3].point_ref.real < surf_list[4].point_ref.real\n assert surf_list[4].point_ref.real < surf_list[5].point_ref.real\n\n @pytest.mark.parametrize(\"test_dict\", slotW22_test)\n def test_comp_surface(self, test_dict):\n \"\"\"Check that the computation of the surface is correct\"\"\"\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface()\n\n a = result\n b = test_dict[\"S_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n assert abs((a - b) / a - 0) < DELTA, msg\n\n # Check that the analytical method returns the same result as the numerical one\n b = Slot.comp_surface(test_obj.slot)\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n assert abs((a - b) / a - 0) < DELTA, msg\n\n @pytest.mark.parametrize(\"test_dict\", slotW22_test)\n def test_comp_surface_active(self, test_dict):\n \"\"\"Check that the computation of the winding surface is correct\"\"\"\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface_active()\n\n a = result\n b = test_dict[\"SW_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n assert abs((a - b) / a - 0) < DELTA, msg\n\n # Check that the analytical method returns the same result as the numerical one\n b = Slot.comp_surface_active(test_obj.slot)\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n assert abs((a - b) / a - 0) < DELTA, msg\n\n @pytest.mark.parametrize(\"test_dict\", slotW22_test)\n def test_comp_height(self, test_dict):\n \"\"\"Check that the computation of the height is correct\"\"\"\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_height()\n\n a = result\n b = test_dict[\"H_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n assert abs((a - b) / a - 0) < DELTA, msg\n\n # Check that the analytical method returns the same result as the numerical one\n b = Slot.comp_height(test_obj.slot)\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n assert abs((a - b) / a - 0) < DELTA, msg\n\n @pytest.mark.parametrize(\"test_dict\", slotW22_test)\n def test_comp_angle_opening(self, test_dict):\n \"\"\"Check that the computation of the average opening angle iscorrect\"\"\"\n test_obj = test_dict[\"test_obj\"]\n a = test_obj.slot.comp_angle_opening()\n assert a == test_obj.slot.W0\n # Check that the analytical method returns the same result as the numerical one\n b = Slot.comp_angle_opening(test_obj.slot)\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n assert abs((a - b) / a - 0) < DELTA, msg\n\n @pytest.mark.parametrize(\"test_dict\", slotW22_test)\n def test_comp_angle_active_eq(self, test_dict):\n \"\"\"Check that the computation of the average angle is correct\"\"\"\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_angle_active_eq()\n\n a = result\n b = test_obj.slot.W2\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n assert abs((a - b) / a - 0) < DELTA, msg\n\n def test_get_surface_active(self):\n \"\"\"Check that the get_surface_active works when stator = false\"\"\"\n lam = LamSlot(is_internal=True, Rext=0.1325, is_stator=False)\n lam.slot = SlotW22(Zs=36, W0=pi / 72, W2=pi / 36, H0=6e-3, H2=40e-3)\n result = lam.slot.get_surface_active()\n assert result.label == \"Wind_Rotor_R0_T0_S0\"\n assert len(result.get_lines()) == 6\n", "# -*- coding: utf-8 -*-\n\nfrom numpy import sin\n\n\ndef comp_surface(self):\n \"\"\"Compute the Slot total surface (by analytical computation).\n Caution, the bottom of the Slot is an Arc\n\n Parameters\n ----------\n self : SlotW10\n A SlotW10 object\n\n Returns\n -------\n S: float\n Slot total surface [m**2]\n\n \"\"\"\n\n Rbo = self.get_Rbo()\n\n S1 = self.H0 * self.W0\n H1 = self.get_H1()\n S2 = 0.5 * (self.W0 + self.W1) * H1\n Swind = self.comp_surface_active()\n\n # The bottom is an arc\n alpha = self.comp_angle_opening()\n Sarc = (Rbo ** 2.0) / 2.0 * (alpha - sin(alpha))\n\n # Because Slamination = S - Zs * Sslot\n if self.is_outwards():\n return S1 + S2 + Swind - Sarc\n else:\n return S1 + S2 + Swind + Sarc\n", "# -*- coding: utf-8 -*-\n\nfrom itertools import repeat\n\nimport matplotlib.pyplot as plt\nfrom numpy import argmin, abs, squeeze, split, ndarray\n\nfrom ...Functions.init_fig import init_fig\nfrom ...definitions import config_dict\n\n# Import values from config dict\nFONT_NAME = config_dict[\"PLOT\"][\"FONT_NAME\"]\nFONT_SIZE_TITLE = config_dict[\"PLOT\"][\"FONT_SIZE_TITLE\"]\nFONT_SIZE_LABEL = config_dict[\"PLOT\"][\"FONT_SIZE_LABEL\"]\nFONT_SIZE_LEGEND = config_dict[\"PLOT\"][\"FONT_SIZE_LEGEND\"]\n\n\ndef plot_2D(\n Xdatas,\n Ydatas,\n legend_list=[\"\"],\n color_list=[(0, 0, 1, 0.5)],\n linestyle_list=[\"-\"],\n linewidth_list=[2],\n title=\"\",\n xlabel=\"\",\n ylabel=\"\",\n fig=None,\n ax=None,\n is_logscale_x=False,\n is_logscale_y=False,\n is_disp_title=True,\n is_grid=True,\n type_plot=\"curve\",\n fund_harm=None,\n x_min=None,\n x_max=None,\n y_min=None,\n y_max=None,\n xticks=None,\n save_path=None,\n barwidth=100,\n is_show_fig=None,\n win_title=None,\n):\n \"\"\"Plots a 2D graph (curve, bargraph or barchart) comparing fields in Ydatas\n\n Parameters\n ----------\n Xdatas : ndarray\n array of x-axis values\n Ydatas : list\n list of y-axes values\n legend_list : list\n list of legends\n color_list : list\n list of colors to use for each curve\n linewidth_list : list\n list of line width to use for each curve\n title : str\n title of the graph\n xlabel : str\n label for the x-axis\n ylabel : str\n label for the y-axis\n fig : Matplotlib.figure.Figure\n existing figure to use if None create a new one\n ax : Matplotlib.axes.Axes object\n ax on which to plot the data\n is_logscale_x : bool\n boolean indicating if the x-axis must be set in logarithmic scale\n is_logscale_y : bool\n boolean indicating if the y-axis must be set in logarithmic scale\n is_disp_title : bool\n boolean indicating if the title must be displayed\n is_grid : bool\n boolean indicating if the grid must be displayed\n type_plot : str\n type of 2D graph : \"curve\", \"bargraph\", \"barchart\" or \"quiver\"\n fund_harm : float\n frequency/order/wavenumber of the fundamental harmonic that must be displayed in red in the fft\n x_min : float\n minimum value for the x-axis\n x_max : float\n maximum value for the x-axis\n y_min : float\n minimum value for the y-axis\n y_max : float\n maximum value for the y-axis\n xticks : list\n list of ticks to use for the x-axis\n save_path : str\n full path including folder, name and extension of the file to save if save_path is not None\n barwidth : float\n barwidth scaling factor, only if type_plot = \"bargraph\"\n is_show_fig : bool\n True to show figure after plot\n win_title : str\n Title of the plot window\n \"\"\"\n\n # Set is_show_fig if is None\n if is_show_fig is None:\n is_show_fig = True if fig is None else False\n\n # Set figure if needed\n if fig is None and ax is None:\n (fig, ax, _, _) = init_fig(fig=None, shape=\"rectangle\")\n\n # Number of curves on a axe\n ndatas = len(Ydatas)\n\n # Retrocompatibility\n if isinstance(Xdatas, ndarray):\n Xdatas = [Xdatas]\n\n if len(Xdatas) == 1:\n i_Xdatas = [0 for i in range(ndatas)]\n else:\n i_Xdatas = range(ndatas)\n\n # Expend default argument\n if 1 == len(color_list) < ndatas:\n # Set the same color for all curves\n color_list = list(repeat(color_list[0], ndatas))\n if 1 == len(linewidth_list) < ndatas:\n # Set the same color for all curves\n linewidth_list = list(repeat(linewidth_list[0], ndatas))\n if 1 == len(linestyle_list) < ndatas:\n # Set the same linestyles for all curves\n linestyle_list = list(repeat(linestyle_list[0], ndatas))\n if 1 == len(legend_list) < ndatas:\n # Set no legend for all curves\n legend_list = list(repeat(\"\", ndatas))\n no_legend = True\n else:\n no_legend = False\n\n # Plot\n if type_plot == \"curve\":\n for i in range(ndatas):\n ax.plot(\n Xdatas[i_Xdatas[i]],\n Ydatas[i],\n color=color_list[i],\n label=legend_list[i],\n linewidth=linewidth_list[i],\n ls=linestyle_list[i],\n )\n if xticks is not None:\n ax.xaxis.set_ticks(xticks)\n elif type_plot == \"bargraph\":\n positions = range(-ndatas + 1, ndatas, 2)\n for i in range(ndatas):\n # width = (Xdatas[i_Xdatas[i]][1] - Xdatas[i_Xdatas[i]][0]) / ndatas\n width = Xdatas[i_Xdatas[i]][-1] / barwidth\n barlist = ax.bar(\n Xdatas[i_Xdatas[i]] + positions[i] * width / (2 * ndatas),\n Ydatas[i],\n color=color_list[i],\n width=width,\n label=legend_list[i],\n )\n if fund_harm is not None: # Find fundamental\n imax = argmin(abs(Xdatas[i] - fund_harm))\n barlist[imax].set_edgecolor(\"k\")\n barlist[imax].set_facecolor(\"k\")\n\n if xticks is not None:\n ax.xaxis.set_ticks(xticks)\n elif type_plot == \"barchart\":\n for i in range(ndatas):\n if i == 0:\n ax.bar(\n range(len(Xdatas[i_Xdatas[i]])),\n Ydatas[i],\n color=color_list[i],\n width=0.5,\n label=legend_list[i],\n )\n else:\n ax.bar(\n range(len(Xdatas[i_Xdatas[i]])),\n Ydatas[i],\n edgecolor=color_list[i],\n width=0.5,\n fc=\"None\",\n lw=1,\n label=legend_list[i],\n )\n plt.xticks(\n range(len(Xdatas[i_Xdatas[i]])),\n [str(f) for f in Xdatas[i_Xdatas[i]]],\n rotation=90,\n )\n elif type_plot == \"quiver\":\n for i in range(ndatas):\n x = [e[0] for e in Xdatas[i_Xdatas[i]]]\n y = [e[1] for e in Xdatas[i_Xdatas[i]]]\n vect_list = split(Ydatas[i], 2)\n ax.quiver(x, y, squeeze(vect_list[0]), squeeze(vect_list[1]))\n ax.axis(\"equal\")\n\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_xlim([x_min, x_max])\n ax.set_ylim([y_min, y_max])\n\n if is_logscale_x:\n ax.set_xscale(\"log\")\n\n if is_logscale_y:\n ax.set_yscale(\"log\")\n\n if is_disp_title:\n ax.set_title(title)\n\n if is_grid:\n ax.grid()\n\n if ndatas > 1 and not no_legend:\n ax.legend(prop={\"family\": FONT_NAME, \"size\": FONT_SIZE_LEGEND})\n\n plt.tight_layout()\n for item in (\n [ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()\n ):\n item.set_fontname(FONT_NAME)\n item.set_fontsize(FONT_SIZE_LABEL)\n ax.title.set_fontname(FONT_NAME)\n ax.title.set_fontsize(FONT_SIZE_TITLE)\n\n if save_path is not None:\n fig.savefig(save_path)\n plt.close()\n\n if is_show_fig:\n fig.show()\n\n if win_title:\n fig.canvas.set_window_title(win_title)\n", "import matplotlib.pyplot as plt\nfrom numpy import pi, exp\n\nfrom ....Classes.Arc1 import Arc1\nfrom ....Classes.LamSlot import LamSlot\nfrom ....Classes.Segment import Segment\nfrom ....definitions import config_dict\nfrom ....Functions.Plot import (\n ARROW_COLOR,\n ARROW_WIDTH,\n MAIN_LINE_COLOR,\n MAIN_LINE_STYLE,\n MAIN_LINE_WIDTH,\n P_FONT_SIZE,\n SC_FONT_SIZE,\n SC_LINE_COLOR,\n SC_LINE_STYLE,\n SC_LINE_WIDTH,\n TEXT_BOX,\n plot_quote,\n)\nfrom ....Methods import ParentMissingError\n\nMAGNET_COLOR = config_dict[\"PLOT\"][\"COLOR_DICT\"][\"MAGNET_COLOR\"]\n\n\ndef plot_schematics(\n self,\n is_default=False,\n is_add_point_label=False,\n is_add_schematics=True,\n is_add_main_line=True,\n type_add_active=True,\n save_path=None,\n is_show_fig=True,\n):\n \"\"\"Plot the schematics of the slot\n\n Parameters\n ----------\n self : SlotM16\n A SlotM16 object\n is_default : bool\n True: plot default schematics, else use current slot values\n is_add_point_label : bool\n True to display the name of the points (Z1, Z2....)\n is_add_schematics : bool\n True to display the schematics information (W0, H0...)\n is_add_main_line : bool\n True to display \"main lines\" (slot opening and 0x axis)\n type_add_active : int\n 0: No active surface, 1: active surface as winding, 2: active surface as magnet\n save_path : str\n full path including folder, name and extension of the file to save if save_path is not None\n is_show_fig : bool\n To call show at the end of the method\n \"\"\"\n\n # Use some default parameter\n if is_default:\n slot = type(self)(Zs=4, W0=0.02, H0=0.01, H1=0.06, W1=0.04)\n lam = LamSlot(\n Rint=80e-3, Rext=240e-3, is_internal=True, is_stator=False, slot=slot\n )\n slot.plot_schematics(\n is_default=False,\n is_add_point_label=is_add_point_label,\n is_add_schematics=is_add_schematics,\n is_add_main_line=is_add_main_line,\n type_add_active=type_add_active,\n save_path=save_path,\n is_show_fig=is_show_fig,\n )\n else:\n # Getting the main plot\n if self.parent is None:\n raise ParentMissingError(\"Error: The slot is not inside a Lamination\")\n lam = self.parent\n lam.plot(alpha=pi / self.Zs, is_show_fig=False) # center slot on Ox axis\n fig = plt.gcf()\n ax = plt.gca()\n point_dict = self._comp_point_coordinate()\n if self.is_outwards():\n sign = +1\n else:\n sign = -1\n\n # Adding point label\n if is_add_point_label:\n for name, Z in point_dict.items():\n ax.text(\n Z.real,\n Z.imag,\n name,\n fontsize=P_FONT_SIZE,\n bbox=TEXT_BOX,\n )\n\n # Adding schematics\n if is_add_schematics:\n # W0\n line = Segment(point_dict[\"Z7\"], point_dict[\"Z2\"])\n line.plot(\n fig=fig,\n ax=ax,\n color=ARROW_COLOR,\n linewidth=ARROW_WIDTH,\n label=\"W0\",\n offset_label=self.H0 * 0.2,\n is_arrow=True,\n fontsize=SC_FONT_SIZE,\n )\n # W1\n line = Segment(point_dict[\"Z5\"], point_dict[\"Z4\"])\n line.plot(\n fig=fig,\n ax=ax,\n color=ARROW_COLOR,\n linewidth=ARROW_WIDTH,\n label=\"W1\",\n offset_label=self.H0 * 0.2,\n is_arrow=True,\n fontsize=SC_FONT_SIZE,\n )\n # H0\n plot_quote(\n Z1=point_dict[\"Z1\"],\n Zlim1=point_dict[\"Z1\"].real + 1j * point_dict[\"Z3\"].imag,\n Zlim2=point_dict[\"Z3\"],\n Z2=point_dict[\"Z2\"],\n offset_label=1j * 0.1 * self.W0,\n fig=fig,\n ax=ax,\n label=\"H0\",\n )\n # H1\n line = Segment(point_dict[\"Z5\"], point_dict[\"Z6\"])\n line.plot(\n fig=fig,\n ax=ax,\n color=ARROW_COLOR,\n linewidth=ARROW_WIDTH,\n label=\"H1\",\n offset_label=1j * self.W0 * 0.1,\n is_arrow=True,\n fontsize=SC_FONT_SIZE,\n )\n\n if is_add_main_line:\n # Ox axis\n line = Segment(0, lam.Rext * 1.5)\n line.plot(\n fig=fig,\n ax=ax,\n color=MAIN_LINE_COLOR,\n linestyle=MAIN_LINE_STYLE,\n linewidth=MAIN_LINE_WIDTH,\n )\n # Top arc\n line = Arc1(\n begin=point_dict[\"Z1\"],\n end=point_dict[\"Z8\"],\n radius=self.get_Rbo(),\n is_trigo_direction=True,\n )\n line.plot(\n fig=fig,\n ax=ax,\n color=MAIN_LINE_COLOR,\n linestyle=MAIN_LINE_STYLE,\n linewidth=MAIN_LINE_WIDTH,\n )\n\n if type_add_active == 1:\n self.plot_active(fig=fig, is_show_fig=False)\n elif type_add_active == 2:\n self.plot_active(\n fig=fig, is_show_fig=False, enforced_default_color=MAGNET_COLOR\n )\n\n # Zooming and cleaning\n W = self.W1 / 2 * 1.3\n Rint, Rext = self.comp_radius()\n\n plt.axis(\"equal\")\n ax.set_xlim(Rint, Rext)\n ax.set_ylim(-W, W)\n fig.canvas.set_window_title(type(self).__name__ + \" Schematics\")\n ax.set_title(\"\")\n ax.get_legend().remove()\n ax.set_axis_off()\n\n # Save / Show\n if save_path is not None:\n fig.savefig(save_path)\n plt.close()\n\n if is_show_fig:\n fig.show()\n", "# -*- coding: utf-8 -*-\n\nimport pytest\nfrom numpy import array, zeros, abs as np_abs\nfrom random import randint\nfrom unittest import TestCase\n\nfrom SciDataTool import DataTime, Data1D, DataLinspace, VectorField\n\nfrom pyleecan.Classes.SolutionData import SolutionData\nfrom pyleecan.Classes.SolutionMat import SolutionMat\nfrom pyleecan.Classes.SolutionVector import SolutionVector\n\n\[email protected]\[email protected]\n# @pytest.mark.DEV\nclass Test_get_soltution(TestCase):\n \"\"\" Tests for get_solution method from Solution classes\"\"\"\n\n def test_SolutionMat(self):\n DELTA = 1e-10\n\n solution = SolutionMat()\n solution.field = array([[1, 2, 3], [2, 3, 4]])\n solution.axis_name = [\"time\", \"indice\"]\n solution.axis_size = [2, 3]\n\n # result without explicit solution indices, i.e. solution.indice = None\n sol1 = solution.get_solution(indice=[0, 1])\n sol2 = solution.get_solution(indice=[0, 1, 2])\n sol3 = solution.get_solution(indice=[0, 1, 2, 3])\n\n expected = array([[1, 2], [2, 3]])\n result = np_abs(expected - sol1.field).sum()\n msg = \"Wrong result: returned \" + str(sol1) + \", expected: \" + str(expected)\n self.assertAlmostEqual(result, 0, msg=msg, delta=DELTA)\n\n expected = array([[1, 2, 3], [2, 3, 4]])\n result = np_abs(expected - sol2.field).sum()\n msg = \"Wrong result: returned \" + str(sol2) + \", expected: \" + str(expected)\n self.assertAlmostEqual(result, 0, msg=msg, delta=DELTA)\n\n result = np_abs(expected - sol3.field).sum()\n msg = \"Wrong result: returned \" + str(sol3) + \", expected: \" + str(expected)\n self.assertAlmostEqual(result, 0, msg=msg, delta=DELTA)\n\n # set explicit solution indices\n solution.indice = [999, 2000, 11857]\n\n # request indices that are part of the solution\n sol4 = solution.get_solution(indice=[999, 2000])\n sol5 = solution.get_solution(indice=[999, 2000, 11857])\n\n # request an indice that is not part of the solution\n sol6 = solution.get_solution(indice=[999, 2000, 11857, 1])\n\n expected = array([[1, 2], [2, 3]])\n result = np_abs(expected - sol4.field).sum()\n msg = \"Wrong result: returned \" + str(sol4) + \", expected: \" + str(expected)\n self.assertAlmostEqual(result, 0, msg=msg, delta=DELTA)\n\n expected = array([[1, 2, 3], [2, 3, 4]])\n result = np_abs(expected - sol5.field).sum()\n msg = \"Wrong result: returned \" + str(sol5) + \", expected: \" + str(expected)\n self.assertAlmostEqual(result, 0, msg=msg, delta=DELTA)\n\n result = np_abs(expected - sol6.field).sum()\n msg = \"Wrong result: returned \" + str(sol6) + \", expected: \" + str(expected)\n self.assertAlmostEqual(result, 0, msg=msg, delta=DELTA)\n\n def test_SolutionVector(self):\n DELTA = 1e-10\n\n Indices_Cell = Data1D(name=\"indice\", values=[0, 1, 2, 4], is_components=True)\n Time = DataLinspace(name=\"time\", unit=\"s\", initial=0, final=1, number=2)\n\n H = zeros((2, 4, 2))\n H[:, :, 0] = array([[1, 2, 3, 4], [2, 3, 4, 5]])\n H[:, :, 1] = array([[4, 5, 6, 7], [5, 6, 7, 8]])\n\n # Store the results for H\n componentsH = {}\n axes = [Time, Indices_Cell]\n\n Hx_data = DataTime(\n name=\"Hx\", unit=\"A/m\", symbol=\"Hx\", axes=axes, values=H[:, :, 0]\n )\n Hy_data = DataTime(\n name=\"Hy\", unit=\"A/m\", symbol=\"Hy\", axes=axes, values=H[:, :, 1]\n )\n\n componentsH[\"comp_x\"] = Hx_data\n componentsH[\"comp_y\"] = Hy_data\n\n vecH = VectorField(name=\"Magnetic Field\", symbol=\"H\", components=componentsH)\n solution = SolutionVector(field=vecH, type_cell=\"triangle\", label=\"H\")\n\n sol = solution.get_solution(indice=[1, 2, 4])\n field = sol.get_field()\n\n expected = H[:, 1:, :]\n result = np_abs(expected - field).sum()\n msg = \"Wrong result: returned \" + str(field) + \", expected: \" + str(expected)\n self.assertAlmostEqual(result, 0, msg=msg, delta=DELTA)\n\n def test_SolutionData(self):\n DELTA = 1e-10\n\n Indices_Cell = Data1D(name=\"indice\", values=[0, 1, 2, 4], is_components=True)\n Time = DataLinspace(name=\"time\", unit=\"s\", initial=0, final=1, number=2)\n axes = [Time, Indices_Cell]\n data = array([[1, 2, 3, 4], [2, 3, 4, 5]])\n\n # Store the results for H\n H = DataTime(name=\"Hx\", unit=\"A/m\", symbol=\"Hx\", axes=axes, values=data)\n\n solution = SolutionData(field=H, type_cell=\"triangle\", label=\"H\")\n\n field = solution.get_solution(indice=[1, 2, 4]).get_field()\n\n expected = data[:, 1:]\n result = np_abs(expected - field).sum()\n msg = \"Wrong result: returned \" + str(field) + \", expected: \" + str(expected)\n self.assertAlmostEqual(result, 0, msg=msg, delta=DELTA)\n\n field = solution.get_solution(indice=[1, 2, 4, 5]).get_field()\n result = np_abs(expected - field).sum()\n msg = \"Wrong result: returned \" + str(field) + \", expected: \" + str(expected)\n self.assertAlmostEqual(result, 0, msg=msg, delta=DELTA)\n\n\nif __name__ == \"__main__\":\n test = Test_get_soltution()\n test.test_SolutionData()\n test.test_SolutionMat()\n test.test_SolutionVector()\n", "# -*- coding: utf-8 -*-\n\nfrom ....Functions.Electrical.coordinate_transformation import n2dq\nfrom numpy import split, transpose, mean, pi\n\nimport matplotlib.pyplot as plt\n\n\ndef gen_drive(self, output):\n \"\"\"Generate the drive for the equivalent electrical circuit\n\n Parameters\n ----------\n self : EEC_PMSM\n an EEC_PMSM object\n output : Output\n an Output object\n \"\"\"\n\n qs = output.simu.machine.stator.winding.qs\n felec = output.elec.felec\n time = output.elec.Time.get_values()\n\n # Compute voltage\n Voltage = self.drive.get_wave()\n\n # d,q transform\n voltage = Voltage.values\n voltage_dq = split(\n n2dq(transpose(voltage), -2 * pi * felec * time, n=qs), 2, axis=1\n )\n\n fig = plt.figure()\n plt.plot(time[:50], voltage[0, :50], color=\"tab:blue\", label=\"A\")\n plt.plot(time[:50], voltage[1, :50], color=\"tab:red\", label=\"B\")\n plt.plot(time[:50], voltage[2, :50], color=\"tab:olive\", label=\"C\")\n plt.plot(time[:50], voltage_dq[0][:50], color=\"k\", label=\"D\")\n plt.plot(time[:50], voltage_dq[1][:50], color=\"g\", label=\"Q\")\n plt.legend()\n fig.savefig(\"test_tension.png\")\n\n # Store into EEC parameters\n self.parameters[\"Ud\"] = mean(voltage_dq[0])\n self.parameters[\"Uq\"] = mean(voltage_dq[1])\n", "# -*- coding: utf-8 -*-\nfrom numpy import pi, linspace, zeros, ones, dot, squeeze\nfrom SciDataTool import Data1D, DataTime\nfrom ....Functions.Electrical.coordinate_transformation import dq2n\nfrom ....Functions.Winding.gen_phase_list import gen_name\n\n\ndef comp_mmf_unit(self, Na=None, Nt=None, freq=1):\n \"\"\"Compute the winding Unit magnetomotive force\n\n Parameters\n ----------\n self : LamSlotWind\n an LamSlotWind object\n Na : int\n Space discretization for offline computation (otherwise use out.elec.angle)\n Nt : int\n Time discretization for offline computation (otherwise use out.elec.time)\n freq : float\n Stator current frequency to consider\n\n Returns\n -------\n MMF_U : SciDataTool.Classes.DataND.DataND\n Unit magnetomotive force (Na,Nt)\n WF : SciDataTool.Classes.DataND.DataND\n Winding functions (qs,Na)\n\n \"\"\"\n\n # Get stator winding number of phases\n qs = self.winding.qs\n\n # Get spatial symmetry\n per_a, _, _, _ = self.comp_periodicity()\n\n # Define the space dicretization\n angle = linspace(0, 2 * pi / per_a, Na, endpoint=False)\n\n # Define the time dicretization\n time = linspace(0, 1 / freq, Nt, endpoint=False)\n\n # Compute the winding function and mmf\n wf = self.comp_wind_function(angle=angle, per_a=per_a)\n\n # Compute unit current function of time applying constant Id=1 Arms, Iq=0\n Idq = zeros((Nt, 2))\n Idq[:, 0] = ones(Nt)\n I = dq2n(Idq, 2 * pi * freq * time, n=qs, is_n_rms=False)\n\n # Compute unit mmf\n mmf_u = squeeze(dot(I, wf))\n\n # Create a Data object\n Time = Data1D(name=\"time\", unit=\"s\", values=time)\n Angle = Data1D(\n name=\"angle\",\n unit=\"rad\",\n symmetries={\"period\": per_a},\n values=angle,\n normalizations={\"space_order\": self.get_pole_pair_number()},\n )\n Phase = Data1D(\n name=\"phase\",\n unit=\"\",\n values=gen_name(qs),\n is_components=True,\n )\n MMF_U = DataTime(\n name=\"Unit MMF\",\n unit=\"p.u.\",\n symbol=\"Magnitude\",\n axes=[Time, Angle],\n values=mmf_u,\n )\n\n WF = DataTime(\n name=\"Winding Functions\",\n unit=\"p.u.\",\n symbol=\"Magnitude\",\n axes=[Phase, Angle],\n values=wf,\n )\n\n return MMF_U, WF\n", "# -*- coding: utf-8 -*-\nfrom numpy import abs as np_abs, angle as np_angle, exp\n\n\ndef get_middle(self):\n \"\"\"Return the point at the middle of the arc\n\n Parameters\n ----------\n self : Arc2\n An Arc2 object\n\n Returns\n -------\n Zmid: complex\n Complex coordinates of the middle of the Arc2\n \"\"\"\n\n self.check()\n\n # We use the complex representation of the point\n z1 = self.begin\n zc = self.center\n\n # Geometric transformation : center is the origine, angle(begin) = 0\n Zstart = (z1 - zc) * exp(-1j * np_angle(z1 - zc))\n\n # Generation of the point by rotation\n Zmid = Zstart * exp(1j * self.angle / 2.0)\n\n # Geometric transformation : return to the main axis\n Zmid = Zmid * exp(1j * np_angle(z1 - zc)) + zc\n\n # Return (0,0) if the point is too close from 0\n if np_abs(Zmid) < 1e-6:\n Zmid = 0\n\n return Zmid\n", "# -*- coding: utf-8 -*-\n\nfrom numpy import arcsin, sin, sqrt\n\n\ndef comp_surface_active(self):\n \"\"\"Compute the Slot inner surface for winding (by analytical computation)\n\n Parameters\n ----------\n self : SlotW21\n A SlotW21 object\n\n Returns\n -------\n Swind: float\n Slot inner surface for winding [m**2]\n\n \"\"\"\n\n Rbo = self.get_Rbo()\n\n # By Pythagore\n # self.H2 projection\n H2 = sqrt(self.H2 ** 2 - ((self.W2 - self.W1) / 2.0) ** 2)\n S2 = 0.5 * (self.W1 + self.W2) * H2\n\n if self.is_outwards():\n Rslot = Rbo + self.comp_height() # External radius of the slot\n alpha = float(2 * arcsin(self.W2 / (2 * Rslot))) # W2 in rad\n S3 = (Rslot ** 2.0) / 2.0 * (alpha - sin(alpha))\n return S2 + S3\n else:\n Rslot = Rbo - self.comp_height() # External radius of the slot\n alpha = float(2 * arcsin(self.W2 / (2 * Rslot))) # W2 in rad\n S3 = (Rslot ** 2.0) / 2.0 * (alpha - sin(alpha))\n return S2 - S3\n", "from numpy import arcsin, cos, exp, angle, pi, sin, tan, array\nfrom pyleecan.Functions.Geometry.inter_line_line import inter_line_line\n\n\ndef _comp_point_coordinate(self):\n \"\"\"Compute the point coordinates needed to plot the Slot.\n\n Parameters\n ----------\n self : HoleM53\n A HoleM53 object\n\n Returns\n -------\n point_dict: dict\n A dict of the slot coordinates\n \"\"\"\n Rext = self.get_Rext()\n\n # \"Tooth\" angle (P1',0,P1)\n alpha_T = 2 * arcsin(self.W3 / (2 * (Rext - self.H1)))\n # magnet pole pitch angle (Z1,0,Z1')\n alpha_S = (2 * pi / self.Zh) - alpha_T\n # Angle (P1,P1',P4') and (P5',P4', )\n alpha = (pi - self.W0) / 2\n # Half slot pitch\n hssp = pi / self.Zh\n\n Z1 = (Rext - self.H1) * exp(-1j * alpha_S / 2)\n x11 = 2 * sin(alpha_S / 2) * (Rext - self.H1) # Distance from P1 to P1'\n # In rect triangle P4, P1, perp (P1,P1') with P4\n H = tan(alpha) * (x11 / 2 - self.W1 / 2)\n Z4 = Z1.real - H - 1j * self.W1 / 2\n\n x45 = self.H2 / cos(alpha) # distance from P4 to P5\n Z5 = Z4 - x45\n\n # Get coordinates of \"random\" points on (P5,P8) and (P1,P8)\n # In ref P4 center and P1 on X+ axis\n Z58 = (self.W4 - 1j * self.H2) * exp(1j * angle(Z1 - Z4)) + Z4\n # In the tooth ref\n Z18 = (Rext - self.H1 - self.H2 + 1j * self.W3 / 2) * exp(-1j * hssp)\n Z8 = inter_line_line(Z5, Z58, Z1, Z18)[0]\n\n # In ref \"b\" P4 center and P1 on X+ axis\n Z8b = (Z8 - Z4) * exp(-1j * angle(Z1 - Z4))\n Z9 = (Z8b + 1j * self.H2) * exp(1j * angle(Z1 - Z4)) + Z4\n Z2 = (Z8b + 1j * self.H2 - self.W2) * exp(1j * angle(Z1 - Z4)) + Z4\n Z3 = (Z8b + 1j * self.H2 - self.W2 - self.W4) * exp(1j * angle(Z1 - Z4)) + Z4\n Z7 = (Z8b - self.W2) * exp(1j * angle(Z1 - Z4)) + Z4\n Z6 = (Z8b - self.W2 - self.W4) * exp(1j * angle(Z1 - Z4)) + Z4\n\n point_dict = dict()\n point_dict[\"Z1\"] = Z1\n point_dict[\"Z2\"] = Z2\n point_dict[\"Z3\"] = Z3\n point_dict[\"Z4\"] = Z4\n point_dict[\"Z5\"] = Z5\n point_dict[\"Z6\"] = Z6\n point_dict[\"Z7\"] = Z7\n point_dict[\"Z8\"] = Z8\n point_dict[\"Z9\"] = Z9\n\n # Symmetry\n point_dict[\"Z1s\"] = Z1.conjugate()\n point_dict[\"Z2s\"] = Z2.conjugate()\n point_dict[\"Z3s\"] = Z3.conjugate()\n point_dict[\"Z4s\"] = Z4.conjugate()\n point_dict[\"Z5s\"] = Z5.conjugate()\n point_dict[\"Z6s\"] = Z6.conjugate()\n point_dict[\"Z7s\"] = Z7.conjugate()\n point_dict[\"Z8s\"] = Z8.conjugate()\n point_dict[\"Z9s\"] = Z9.conjugate()\n point_dict[\"Zc0\"] = inter_line_line(Z3, Z2, point_dict[\"Z3s\"], point_dict[\"Z2s\"])[0]\n return point_dict\n", "# -*- coding: utf-8 -*-\n\nfrom numpy import arcsin\n\n\ndef comp_angle_opening(self):\n \"\"\"Compute the average opening angle of the Slot\n\n Parameters\n ----------\n self : SlotW13\n A SlotW13 object\n\n Returns\n -------\n alpha: float\n Average opening angle of the slot [rad]\n\n \"\"\"\n\n Rbo = self.get_Rbo()\n\n return float(2 * arcsin(self.W0 / (2 * Rbo)))\n", "# -*- coding: utf-8 -*-\n\nimport numpy as np\n\n\ndef scalar_product(self, funca, funcb, detJ, weights, nb_gauss_points):\n \"\"\"Scalar product of shape functions with L2 gauss integration\n\n Parameters\n ----------\n self : ScalarProductL2\n a ScalarProductL2 object\n funca : ndarray\n vertice of the cell\n nba : ndarray\n coordinates of a point\n funcb : ndarray\n vertice of the cell\n nbb : ndarray\n coordinates of a point\n detJ : ndarray\n jacobian determinant evaluated for each gauss point\n weights : ndarray\n gauss weights\n nb_gauss_points : int\n number of gauss points\n\n Returns\n -------\n l2_scal : ndarray\n a L2 scalar product\n \"\"\"\n\n func_a_w_dJ = np.zeros(funca.shape)\n for i in range(nb_gauss_points):\n func_a_w_dJ[i, :] = funca[i, :] * weights[i] * detJ[i]\n\n l2_scal_mat = np.squeeze(np.tensordot(func_a_w_dJ, funcb, axes=([0], [0])))\n\n return l2_scal_mat\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.axis", "matplotlib.pyplot.close", "matplotlib.pyplot.gcf" ], [ "numpy.angle" ], [ "numpy.sin" ], [ "numpy.split", "matplotlib.pyplot.tight_layout", "numpy.abs", "numpy.squeeze", "matplotlib.pyplot.close" ], [ "matplotlib.pyplot.gca", "matplotlib.pyplot.axis", "matplotlib.pyplot.close", "matplotlib.pyplot.gcf" ], [ "numpy.array", "numpy.zeros", "numpy.abs" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.plot", "numpy.mean", "numpy.transpose", "matplotlib.pyplot.figure" ], [ "numpy.dot", "numpy.zeros", "numpy.linspace", "numpy.ones" ], [ "numpy.angle", "numpy.exp", "numpy.abs" ], [ "numpy.sin", "numpy.sqrt", "numpy.arcsin" ], [ "numpy.arcsin", "numpy.cos", "numpy.sin", "numpy.tan", "numpy.angle", "numpy.exp" ], [ "numpy.arcsin" ], [ "numpy.zeros", "numpy.tensordot" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Wakinguup/DRG
[ "c6134e3e4e13c55efe3290e722a60006723519a5" ]
[ "tools/test_net_VCOCO_sp_object_centric.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# Set up custom environment before nearly anything else is imported\n# NOTE: this should be the first import (no not reorder)\nfrom maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip\n\nimport argparse\nimport os\nimport pickle\nimport json\nimport logging\n\nimport torch\nfrom PIL import Image\nimport numpy as np\nfrom tqdm import tqdm\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.structures.image_list import to_image_list\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\nfrom maskrcnn_benchmark.data.transforms import build_transforms\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\nfrom maskrcnn_benchmark.utils.collect_env import collect_env_info\nfrom maskrcnn_benchmark.utils.comm import synchronize, get_rank\nfrom maskrcnn_benchmark.utils.timer import Timer, get_time_str\nfrom maskrcnn_benchmark.utils.logger import setup_logger\nfrom maskrcnn_benchmark.utils.miscellaneous import mkdir\nfrom maskrcnn_benchmark.data.datasets.evaluation.vcoco.vsrl_eval import VCOCOeval\nfrom maskrcnn_benchmark.config.paths_catalog import DatasetCatalog\nfrom maskrcnn_benchmark.utils.apply_prior import apply_prior_Graph\n\n# Check if we can enable mixed-precision via apex.amp\ntry:\n from apex import amp\nexcept ImportError:\n raise ImportError('Use APEX for mixed precision via apex.amp')\n\n# apply_prior prior_mask\n# 0 - -\n# 1 Y -\n# 2 - Y\n# 3 Y Y\n\ndef bbox_iou(boxA, boxB):\n\n ixmin = np.maximum(boxA[0], boxB[0])\n iymin = np.maximum(boxA[1], boxB[1])\n ixmax = np.minimum(boxA[2], boxB[2])\n iymax = np.minimum(boxA[3], boxB[3])\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = ((boxB[2] - boxB[0] + 1.) * (boxB[3] - boxB[1] + 1.) +\n (boxA[2] - boxA[0] + 1.) *\n (boxA[3] - boxA[1] + 1.) - inters)\n\n overlaps = inters / uni\n return overlaps\n\n\ndef bbox_trans(human_box_ori, object_box_ori, size=64):\n human_box = human_box_ori.copy()\n object_box = object_box_ori.copy()\n\n InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),\n max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]\n\n height = InteractionPattern[3] - InteractionPattern[1] + 1\n width = InteractionPattern[2] - InteractionPattern[0] + 1\n\n if height > width:\n ratio = 'height'\n else:\n ratio = 'width'\n\n # shift the top-left corner to (0,0)\n\n human_box[0] -= InteractionPattern[0]\n human_box[2] -= InteractionPattern[0]\n human_box[1] -= InteractionPattern[1]\n human_box[3] -= InteractionPattern[1]\n object_box[0] -= InteractionPattern[0]\n object_box[2] -= InteractionPattern[0]\n object_box[1] -= InteractionPattern[1]\n object_box[3] -= InteractionPattern[1]\n\n if ratio == 'height': # height is larger than width\n\n human_box[0] = 0 + size * human_box[0] / height\n human_box[1] = 0 + size * human_box[1] / height\n human_box[2] = (size * width / height - 1) - size * (width - 1 - human_box[2]) / height\n human_box[3] = (size - 1) - size * (height - 1 - human_box[3]) / height\n\n object_box[0] = 0 + size * object_box[0] / height\n object_box[1] = 0 + size * object_box[1] / height\n object_box[2] = (size * width / height - 1) - size * (width - 1 - object_box[2]) / height\n object_box[3] = (size - 1) - size * (height - 1 - object_box[3]) / height\n\n # Need to shift horizontally\n InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),\n max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]\n # assert (InteractionPattern[0] == 0) & (InteractionPattern[1] == 0) & (InteractionPattern[3] == 63) & (InteractionPattern[2] <= 63)\n if human_box[3] > object_box[3]:\n human_box[3] = size - 1\n else:\n object_box[3] = size - 1\n\n shift = size / 2 - (InteractionPattern[2] + 1) / 2\n\n human_box += [shift, 0, shift, 0]\n object_box += [shift, 0, shift, 0]\n\n else: # width is larger than height\n\n human_box[0] = 0 + size * human_box[0] / width\n human_box[1] = 0 + size * human_box[1] / width\n human_box[2] = (size - 1) - size * (width - 1 - human_box[2]) / width\n human_box[3] = (size * height / width - 1) - size * (height - 1 - human_box[3]) / width\n\n object_box[0] = 0 + size * object_box[0] / width\n object_box[1] = 0 + size * object_box[1] / width\n object_box[2] = (size - 1) - size * (width - 1 - object_box[2]) / width\n object_box[3] = (size * height / width - 1) - size * (height - 1 - object_box[3]) / width\n\n # Need to shift vertically\n InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]),\n max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]\n\n # assert (InteractionPattern[0] == 0) & (InteractionPattern[1] == 0) & (InteractionPattern[2] == 63) & (InteractionPattern[3] <= 63)\n\n if human_box[2] > object_box[2]:\n human_box[2] = size - 1\n else:\n object_box[2] = size - 1\n\n shift = size / 2 - (InteractionPattern[3] + 1) / 2\n\n human_box = human_box + [0, shift, 0, shift]\n object_box = object_box + [0, shift, 0, shift]\n\n return np.round(human_box), np.round(object_box)\n\n\ndef generate_spatial(human_box, object_box):\n H, O = bbox_trans(human_box, object_box)\n Pattern = np.zeros((2, 64, 64))\n Pattern[0, int(H[1]):int(H[3]) + 1, int(H[0]):int(H[2]) + 1] = 1\n Pattern[1, int(O[1]):int(O[3]) + 1, int(O[0]):int(O[2]) + 1] = 1\n\n return Pattern\n\n\ndef im_detect(model, im_dir, image_id, Test_RCNN, fastText, prior_mask, Action_dic_inv, object_thres, human_thres, prior_flag, detection, detect_object_centric_dict, device, cfg):\n ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n DATA_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'Data'))\n if \"train\" in im_dir:\n im_file = os.path.join(DATA_DIR, im_dir, 'COCO_train2014_' + (str(image_id)).zfill(12) + '.jpg')\n else:\n im_file = os.path.join(DATA_DIR, im_dir, 'COCO_val2014_' + (str(image_id)).zfill(12) + '.jpg')\n img_original = Image.open(im_file)\n img_original = img_original.convert('RGB')\n im_shape = (img_original.height, img_original.width) # (480, 640)\n transforms = build_transforms(cfg, is_train=False)\n worddim = fastText[1].shape[1]\n\n for object_out in Test_RCNN[image_id]:\n if (np.max(object_out[5]) > object_thres): # and (object_out[1] == 'Object'): # This is a valid object # it is possible to have human-human interaction\n\n h_box = np.empty((0, 4), dtype=np.float32)\n object_word_embedding = np.empty((0, worddim), dtype=np.float32)\n human_score = np.empty((0, 1), dtype=np.float32)\n object_class = np.empty((0, 1), dtype=np.int32)\n Weight_mask = np.empty((0, 29), dtype=np.float32)\n\n for human in Test_RCNN[image_id]:\n if (human[1] == 'Human') and (np.max(human[5]) > human_thres) and not (np.all(human[2] == object_out[2])): # This is a valid human\n h_box_ = np.array([human[2][0], human[2][1], human[2][2], human[2][3]]).reshape(1,4)\n h_box = np.concatenate((h_box, h_box_), axis=0)\n\n object_word_embedding_ = fastText[object_out[4]]\n object_word_embedding = np.concatenate((object_word_embedding, object_word_embedding_), axis=0)\n\n # Pattern_ = generate_spatial(human[2], object_out[2]).reshape(1, 2, 64, 64)\n # Pattern = np.concatenate((Pattern, Pattern_), axis=0)\n\n human_score = np.concatenate((human_score, np.max(human[5]).reshape(1,1)), axis=0)\n object_class = np.concatenate((object_class, np.array(object_out[4]).reshape(1,1)), axis=0)\n\n Weight_mask_ = prior_mask[:,object_out[4]].reshape(1,29)\n Weight_mask = np.concatenate((Weight_mask, Weight_mask_), axis=0)\n\n o_box = np.array([object_out[2][0], object_out[2][1], object_out[2][2], object_out[2][3]]).reshape(1,4)\n\n if len(h_box) == 0:\n continue\n\n blobs = {}\n pos_num = len(h_box)\n blobs['pos_num'] = pos_num\n human_boxes_cpu = h_box.reshape(pos_num, 4)\n human_boxes = torch.FloatTensor(human_boxes_cpu)\n object_boxes_cpu = np.tile(o_box, [len(h_box), 1]).reshape(pos_num, 4)\n object_boxes = torch.FloatTensor(object_boxes_cpu)\n\n blobs['object_word_embeddings_object_centric'] = torch.FloatTensor(object_word_embedding).reshape(pos_num, worddim)\n\n human_boxlist = BoxList(human_boxes, img_original.size, mode=\"xyxy\") # image_size=(width, height)\n object_boxlist = BoxList(object_boxes, img_original.size, mode=\"xyxy\") # image_size=(width, height)\n\n img, human_boxlist, object_boxlist = transforms(img_original, human_boxlist, object_boxlist)\n\n spatials = []\n for human_box, object_box in zip(human_boxlist.bbox, object_boxlist.bbox):\n ho_spatial = generate_spatial(human_box.numpy(), object_box.numpy()).reshape(1, 2, 64, 64)\n spatials.append(ho_spatial)\n blobs['spatials_object_centric'] = torch.FloatTensor(spatials).reshape(-1, 2, 64, 64)\n blobs['human_boxes'], blobs['object_boxes'] = (human_boxlist,), (object_boxlist,)\n\n for key in blobs.keys():\n if not isinstance(blobs[key], int) and not isinstance(blobs[key], tuple):\n blobs[key] = blobs[key].to(device)\n elif isinstance(blobs[key], tuple):\n blobs[key] = [boxlist.to(device) for boxlist in blobs[key]]\n\n image_list = to_image_list(img, cfg.DATALOADER.SIZE_DIVISIBILITY)\n image_list = image_list.to(device)\n\n # compute predictions\n model.eval()\n with torch.no_grad():\n prediction_HO, prediction_H, prediction_O, prediction_sp = model(image_list, blobs)\n\n #convert to np.array\n prediction_HO = prediction_HO.data.cpu().numpy()\n prediction_H = prediction_H.data.cpu().numpy()\n # prediction_O = prediction_O.data.cpu().numpy()\n prediction_sp = prediction_sp.data.cpu().numpy()\n\n # test sp branch only\n prediction_HO = prediction_sp\n\n if prior_flag == 1:\n prediction_HO = apply_prior_Graph(object_class, prediction_HO)\n if prior_flag == 2:\n prediction_HO = prediction_HO * Weight_mask\n if prior_flag == 3:\n prediction_HO = apply_prior_Graph(object_class, prediction_HO)\n prediction_HO = prediction_HO * Weight_mask\n\n # save image information\n for idx in range(pos_num):\n human_out = human_boxes_cpu[idx, :]\n dic = {}\n dic['image_id'] = image_id\n dic['person_box'] = human_out\n dic['person_score'] = human_score[idx][0]\n dic['prediction_H'] = prediction_H[idx]\n dic['prediction_sp'] = prediction_sp[idx] # before prior\n dic['object_box'] = object_out[2]\n dic['O_score'] = np.max(object_out[5])\n dic['O_class'] = object_out[4]\n Score_obj = prediction_HO[idx] * np.max(object_out[5])\n Score_obj = np.concatenate((object_out[2], Score_obj), axis=0)\n dic['Score_obj'] = Score_obj\n\n detect_object_centric_dict[image_id].append(dic)\n\n\ndef run_test(\n model,\n dataset_name=None,\n im_dir=None,\n test_detection=None,\n word_embeddings=None,\n test_image_id_list=None,\n prior_mask=None,\n action_dic_inv=None,\n output_file=None,\n output_dict_file=None,\n object_thres=0.4,\n human_thres=0.6,\n prior_flag=1,\n device=torch.device(\"cuda\"),\n cfg=None\n):\n\n logger = logging.getLogger(\"DRG.inference\")\n logger.info(\"Start evaluation on {} dataset({} images).\".format(dataset_name, len(test_image_id_list)))\n total_timer = Timer()\n inference_timer = Timer()\n total_timer.tic()\n\n np.random.seed(cfg.TEST.RNG_SEED)\n detection = []\n detect_object_centric_dict = {}\n\n for count, image_id in enumerate(tqdm(test_image_id_list)):\n detect_object_centric_dict[image_id] = []\n im_detect(model, im_dir, image_id, test_detection, word_embeddings, prior_mask, action_dic_inv, object_thres, human_thres,\n prior_flag, detection, detect_object_centric_dict, device, cfg)\n\n pickle.dump(detect_object_centric_dict, open(output_dict_file, \"wb\"))\n\n for image_id, detected_lists in detect_object_centric_dict.items():\n visited_human_list = []\n for detected_human in detected_lists:\n exist_human = 0\n for visites_human in visited_human_list:\n if bbox_iou(visites_human, detected_human['person_box']) > 0.98:\n exist_human = 1\n break\n if exist_human == 1:\n continue\n visited_human_list.append(detected_human['person_box'])\n dic = {}\n dic['image_id'] = image_id\n dic['person_box'] = detected_human['person_box']\n prediction_H = detected_human['prediction_H']\n person_score = detected_human['person_score']\n Score_obj_list = []\n for detected_object in detected_lists:\n if bbox_iou(detected_object['person_box'], detected_human['person_box']) > 0.98:\n Score_obj_list.append(detected_object['Score_obj'])\n # prediction_H.append(detected_object['prediction_H'])\n\n Score_obj = np.asarray(Score_obj_list)\n max_idx = np.argmax(Score_obj, 0)[4:]\n\n # agent mAP\n for i in range(29):\n #'''\n # walk, smile, run, stand\n if (i == 3) or (i == 17) or (i == 22) or (i == 27):\n agent_name = action_dic_inv[i] + '_agent'\n dic[agent_name] = person_score * prediction_H[i]\n continue\n\n # cut\n if i == 2:\n agent_name = 'cut_agent'\n dic[agent_name] = person_score * max(Score_obj[max_idx[2]][4 + 2], Score_obj[max_idx[4]][4 + 4])\n continue\n if i == 4:\n continue\n\n # eat\n if i == 9:\n agent_name = 'eat_agent'\n dic[agent_name] = person_score * max(Score_obj[max_idx[9]][4 + 9], Score_obj[max_idx[16]][4 + 16])\n continue\n if i == 16:\n continue\n\n # hit\n if i == 19:\n agent_name = 'hit_agent'\n dic[agent_name] = person_score * max(Score_obj[max_idx[19]][4 + 19], Score_obj[max_idx[20]][4 + 20])\n continue\n if i == 20:\n continue\n\n # These 2 classes need to save manually because there is '_' in action name\n if i == 6:\n agent_name = 'talk_on_phone_agent'\n dic[agent_name] = person_score * Score_obj[max_idx[i]][4 + i]\n continue\n\n if i == 8:\n agent_name = 'work_on_computer_agent'\n dic[agent_name] = person_score * Score_obj[max_idx[i]][4 + i]\n continue\n\n # all the rest\n agent_name = action_dic_inv[i].split(\"_\")[0] + '_agent'\n dic[agent_name] = person_score * Score_obj[max_idx[i]][4 + i]\n\n # role mAP\n for i in range(29):\n # walk, smile, run, stand. Won't contribute to role mAP\n if (i == 3) or (i == 17) or (i == 22) or (i == 27):\n dic[action_dic_inv[i]] = np.append(np.full(4, np.nan).reshape(1,4), person_score * prediction_H[i])\n continue\n\n # Impossible to perform this action\n if person_score * Score_obj[max_idx[i]][4 + i] == 0:\n dic[action_dic_inv[i]] = np.append(np.full(4, np.nan).reshape(1,4), person_score * Score_obj[max_idx[i]][4 + i])\n\n # Action with >0 score\n else:\n dic[action_dic_inv[i]] = np.append(Score_obj[max_idx[i]][:4], person_score * Score_obj[max_idx[i]][4 + i])\n\n detection.append(dic)\n\n # wait for all processes to complete before measuring the time\n synchronize()\n total_time = total_timer.toc()\n total_time_str = get_time_str(total_time)\n\n num_devices = 1\n logger.info(\n \"Total run time: {} ({} s / img per device, on {} devices)\".format(\n total_time_str, total_time * num_devices / len(test_image_id_list), num_devices\n )\n )\n total_infer_time = get_time_str(inference_timer.total_time)\n logger.info(\n \"Model inference time: {} ({} s / img per device, on {} devices)\".format(\n total_infer_time,\n inference_timer.total_time * num_devices / len(test_image_id_list),\n num_devices,\n )\n )\n\n pickle.dump(detection, open(output_file, \"wb\" ) )\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch Object Detection Inference\")\n parser.add_argument(\n \"--config-file\",\n default=\"configs/e2e_faster_rcnn_R_50_FPN_1x_sp_only_object_centric.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=0)\n parser.add_argument(\n \"--ckpt\",\n help=\"The path to the checkpoint for test, default is the latest checkpoint.\",\n default=None,\n )\n parser.add_argument(\n \"--dataset_name\",\n help=\"vcoco_val_test_object_centric or vcoco_test_object_centric\",\n default=None,\n )\n parser.add_argument('--num_iteration', dest='num_iteration',\n help='Specify which weight to load',\n default=-1, type=int)\n parser.add_argument('--object_thres', dest='object_thres',\n help='Object threshold',\n default=0.1, type=float) # used to be 0.4 or 0.05\n parser.add_argument('--human_thres', dest='human_thres',\n help='Human threshold',\n default=0.8, type=float)\n parser.add_argument('--prior_flag', dest='prior_flag',\n help='whether use prior_flag',\n default=1, type=int)\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n args = parser.parse_args()\n\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n distributed = num_gpus > 1 and torch.cuda.is_available()\n\n if distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=\"env://\"\n )\n synchronize()\n\n ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n # DATA_DIR = os.path.abspath(os.path.join(ROOT_DIR, 'Data'))\n args.config_file = os.path.join(ROOT_DIR, args.config_file)\n\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n save_dir = \"\"\n logger = setup_logger(\"DRG\", save_dir, get_rank())\n logger.info(\"Using {} GPUs\".format(num_gpus))\n logger.info(cfg)\n\n logger.info(\"Collecting env info (might take some time)\")\n logger.info(\"\\n\" + collect_env_info())\n\n model = build_detection_model(cfg)\n # model.to(cfg.MODEL.DEVICE)\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n model.to(device)\n\n # Initialize mixed-precision if necessary\n use_mixed_precision = cfg.DTYPE == 'float16'\n amp_handle = amp.init(enabled=use_mixed_precision, verbose=cfg.AMP_VERBOSE)\n\n output_dir = cfg.OUTPUT_DIR\n checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)\n\n if args.num_iteration != -1:\n args.ckpt = os.path.join(cfg.OUTPUT_DIR, 'model_%07d.pth' % args.num_iteration)\n ckpt = cfg.MODEL.WEIGHT if args.ckpt is None else args.ckpt\n logger.info(\"Testing checkpoint {}\".format(ckpt))\n _ = checkpointer.load(ckpt, use_latest=args.ckpt is None)\n\n # iou_types = (\"bbox\",)\n output_folders = [None] * len(cfg.DATASETS.TEST)\n # dataset_names = cfg.DATASETS.TEST\n dataset_names = (args.dataset_name,)\n if cfg.OUTPUT_DIR:\n for idx, dataset_name in enumerate(dataset_names):\n if args.num_iteration != -1:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference_sp\", dataset_name, \"model_%07d\" % args.num_iteration)\n else:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference_sp\", dataset_name)\n mkdir(output_folder)\n output_folders[idx] = output_folder\n\n opt = {}\n # opt['word_dim'] = 300\n for output_folder, dataset_name in zip(output_folders, dataset_names):\n data = DatasetCatalog.get(dataset_name)\n data_args = data[\"args\"]\n im_dir = data_args['im_dir']\n test_detection = pickle.load(open(data_args['test_detection_file'], \"rb\"), encoding='latin1')\n prior_mask = pickle.load(open(data_args['prior_mask'], \"rb\"), encoding='latin1')\n action_dic = json.load(open(data_args['action_index']))\n action_dic_inv = {y: x for x, y in action_dic.items()}\n vcoco_test_ids = open(data_args['vcoco_test_ids_file'], 'r')\n test_image_id_list = [int(line.rstrip()) for line in vcoco_test_ids]\n vcocoeval = VCOCOeval(data_args['vcoco_test_file'], data_args['ann_file'], data_args['vcoco_test_ids_file'])\n word_embeddings = pickle.load(open(data_args['word_embedding_file'], \"rb\"), encoding='latin1')\n output_file = os.path.join(output_folder, 'detection_object_centric.pkl')\n output_dict_file = os.path.join(output_folder, 'detection_object_centric_all_pairs_{}_new.pkl'.format(dataset_name))\n\n logger.info(\"Output will be saved in {}\".format(output_file))\n logger.info(\"Start evaluation on {} dataset({} images).\".format(dataset_name, len(test_image_id_list)))\n\n run_test(\n model,\n dataset_name=dataset_name,\n im_dir=im_dir,\n test_detection=test_detection,\n word_embeddings=word_embeddings,\n test_image_id_list=test_image_id_list,\n prior_mask=prior_mask,\n action_dic_inv=action_dic_inv,\n output_file=output_file,\n output_dict_file=output_dict_file,\n object_thres=args.object_thres,\n human_thres=args.human_thres,\n prior_flag=args.prior_flag,\n device=device,\n cfg=cfg\n )\n\n synchronize()\n\n vcocoeval._do_eval(output_file, ovr_thresh=0.5)\n\n # data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)\n # for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):\n # inference(\n # model,\n # data_loader_val,\n # dataset_name=dataset_name,\n # iou_types=iou_types,\n # box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,\n # bbox_aug=cfg.TEST.BBOX_AUG.ENABLED,\n # device=cfg.MODEL.DEVICE,\n # expected_results=cfg.TEST.EXPECTED_RESULTS,\n # expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n # output_folder=output_folder,\n # )\n # synchronize()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.minimum", "numpy.asarray", "numpy.round", "numpy.max", "numpy.concatenate", "numpy.all", "torch.FloatTensor", "torch.no_grad", "torch.cuda.is_available", "torch.device", "torch.distributed.init_process_group", "numpy.full", "numpy.argmax", "numpy.zeros", "numpy.append", "numpy.array", "numpy.maximum", "numpy.random.seed", "torch.cuda.set_device", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AlbertoEsc/cuicuilco
[ "6817316024e21c7e5dded3836bd685eb9cb06365" ]
[ "lattice.py" ]
[ "#####################################################################################################################\n# lattice: This module implements functions that are useful to build receptive fields localized over a lattice #\n# generalizing rectangular swichtboards. It is part of the Cuicuilco framework. #\n# Also a specialized localized/sparse receptive field is supported. #\n# #\n# By Alberto Escalante. [email protected] #\n# Ruhr-University-Bochum, Institute for Neural Computation, Group of Prof. Dr. Wiskott #\n#####################################################################################################################\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\nimport numpy\n\n\ndef compute_lattice_matrix(v1, v2, mask, x_in_channels, y_in_channels, in_channel_dim=1, n0_1=0, n0_2=0,\n wrap_x=False, wrap_y=False, input_dim=None, dtype=None, ignore_cover=True,\n allow_nonrectangular_lattice=False):\n \"\"\"Computes the coordinates of the lattice points that lie within the image\"\"\"\n if v1[1] != 0 | v1[0] <= 0 | v2[0] < 0 | v2[1] <= 0:\n err = \"v1 must be horizontal: v1[0] > 0, v1[1] = 0, v2[0] >= 0, v2[1] > 0\"\n raise Exception(err) \n\n if in_channel_dim != 1:\n err = \"only single channel inputs supported now\"\n raise Exception(err) \n\n # assume no wrapping\n image = numpy.array(range(0, x_in_channels * y_in_channels))\n image.reshape((y_in_channels, x_in_channels))\n sub_image = numpy.array(range(0, mask.shape[0] * mask.shape[1]))\n sub_image.reshape((mask.shape[0], mask.shape[1]))\n mask_i = mask.astype(\"int\")\n mask_height, mask_width = mask.shape\n out_channel_dim = mask_i.sum()\n # print \"Mask shape is \", mask.shape\n \n mat_height = (y_in_channels - mask.shape[0]) // v2[1] + 1\n mat_width = (x_in_channels-mask.shape[1]) // v1[0] + 1\n \n mat = numpy.ones((mat_height, mat_width, 2)) * -1\n # Create Index Matrix, -1 entries equal empty cell\n # print \"Mat shape is \", mat.shape\n ind_y = 0\n for iy in range(0, mat_height):\n # x,y are real subimage positions\n # ix, iy are the coefficients of x,y in base v1 and v2\n # ind_y, ind_x are the indices in the matrix mat that contains the centers (upper-left corners) of each subimage\n y = iy * v2[1]\n min_ix = -1 * numpy.int(iy * v2[0] / v1[0])\n max_ix = numpy.floor((x_in_channels - mask.shape[1] - iy * v2[0]) * 1.0 / v1[0])\n max_ix = numpy.int(max_ix)\n ind_x = 0\n for ix in range(min_ix, max_ix + 1):\n x = iy * v2[0] + ix * v1[0]\n # print \"value of ind_x, ind_y = \", (ind_x, ind_y)\n # print \"Adding Point (\", x, \", \", y, \")\"\n mat[ind_y, ind_x] = (x, y)\n ind_x = ind_x + 1\n ind_y = ind_y + 1\n\n if not allow_nonrectangular_lattice:\n if mat_width > 1:\n if (-1, -1) in mat[:, mat_width-1]:\n mat = mat[:, :mat_width-2]\n else:\n print(\"Warning, mat_width <= 1 !!!\")\n return mat\n\n\ndef compute_lattice_matrix_connections_with_input_dim(v1, v2, preserve_mask, x_in_channels, y_in_channels,\n in_channel_dim=1, allow_nonrectangular_lattice=False):\n print(\"shape of preserve_mask is: \", preserve_mask.shape)\n print(\"x_in_channels = \", x_in_channels)\n print(\"y_in_channels = \", y_in_channels)\n\n if in_channel_dim > 1:\n if in_channel_dim is not preserve_mask.shape[2]:\n err = \"preserve_mask.shape[2] and in_channel_dim do not agree!!! \"\n raise Exception(err)\n\n preserve_mask = preserve_mask.flatten().reshape(preserve_mask.shape[0],\n in_channel_dim * preserve_mask.shape[1])\n v1 = list(v1)\n v2 = list(v2)\n # remember, vectors have coordinates x, y\n v1[0] = v1[0] * in_channel_dim\n v2[0] = v2[0] * in_channel_dim\n\n x_in_channels = x_in_channels * in_channel_dim\n y_in_channels = y_in_channels\n\n # in_channel_dim = 1\n # lat_mat = compute_lattice_matrix(v1, v2, preserve_mask, x_in_channels, y_in_channels, in_channel_dim,\n # allow_nonrectangular_lattice=allow_nonrectangular_lattice)\n return compute_lattice_matrix_connections(v1, v2, preserve_mask, x_in_channels, y_in_channels,\n allow_nonrectangular_lattice=allow_nonrectangular_lattice)\n\n\ndef compute_lattice_matrix_connections(v1, v2, preserve_mask, x_in_channels, y_in_channels, in_channel_dim=1,\n allow_nonrectangular_lattice=False, verbose=0):\n if in_channel_dim > 1:\n err = \"Error, feature not supported in_channel_dim > 1\"\n raise Exception(err)\n\n lat_mat = compute_lattice_matrix(v1, v2, preserve_mask, x_in_channels, y_in_channels,\n allow_nonrectangular_lattice=allow_nonrectangular_lattice)\n\n if verbose:\n print(\"lat_mat =\", lat_mat)\n \n image_positions = numpy.array(range(0, x_in_channels * y_in_channels))\n image_positions = image_positions.reshape(y_in_channels, x_in_channels)\n\n mask_indices = image_positions[0:preserve_mask.shape[0], 0:preserve_mask.shape[1]][preserve_mask].flatten()\n\n connections = None\n for ind_y in range(lat_mat.shape[0]):\n for ind_x in range(lat_mat.shape[1]):\n if lat_mat[ind_y, ind_x][0] != -1:\n if connections is None:\n connections = numpy.array(mask_indices +\n (lat_mat[ind_y, ind_x][0] + lat_mat[ind_y, ind_x][1]*x_in_channels))\n else:\n connections = numpy.concatenate((connections, mask_indices +\n (lat_mat[ind_y, ind_x][0] +\n lat_mat[ind_y, ind_x][1]*x_in_channels)))\n else:\n print(\"Void entry in lattice_matrix skipped\")\n\n if verbose:\n print(\"Connections are: \", connections.astype('int'))\n return connections.astype('int'), lat_mat\n\n\n# base_size either 2 or 3, but other base sizes might also work\ndef compute_lsrf_n_values(xy_in_channels, base_size, increment):\n n_values = []\n current_size = base_size\n\n n_values.append(base_size)\n prev_n = base_size\n while 1:\n next_n = prev_n * 2 + increment\n if next_n >= xy_in_channels:\n break\n n_values.append(next_n)\n\n prev_n = next_n\n current_size = current_size * 2\n return n_values[::-1]\n\n\n# Improvement> let nx_value and ny_value become vectors y_\ndef compute_lsrf_preserve_masks(x_field_channels, y_field_channels, nx_value, ny_value, in_channel_dim): \n if in_channel_dim > 1:\n preserve_mask_local = numpy.ones((y_field_channels, x_field_channels, in_channel_dim)) > 0.5\n else:\n preserve_mask_local = numpy.ones((y_field_channels, x_field_channels)) > 0.5\n \n if nx_value is not None and nx_value > 0:\n h_vector_sparse = numpy.ones((1, nx_value+1)) > 0.5\n # h_vector_sparse[0][x_field_channels:nx_value] = False\n h_vector_sparse[0][x_field_channels:nx_value] = False\n else:\n h_vector_sparse = numpy.ones((1, x_field_channels)) > 0.5\n\n if ny_value is not None and ny_value > 0:\n v_vector_sparse = numpy.ones((ny_value+1, 1)) > 0.5\n v_vector_sparse[y_field_channels:ny_value, 0] = False\n else:\n v_vector_sparse = numpy.ones((y_field_channels, 1)) > 0.5\n \n if in_channel_dim > 1:\n vector_in_channel_dim = numpy.ones((1, 1, in_channel_dim)) > 0.5\n preserve_mask_sparse = (v_vector_sparse * h_vector_sparse)[:, :, numpy.newaxis] * vector_in_channel_dim\n else:\n preserve_mask_sparse = (v_vector_sparse * h_vector_sparse)\n # print v_vector_sparse, h_vector_sparse, vector_in_channel_dim\n return preserve_mask_local, preserve_mask_sparse\n\n\n# Wrapper to support in_channel > 1\ndef compute_lsrf_matrix_connections_with_input_dim(v1, v2, preserve_mask_local, preserve_mask_sparse, x_in_channels,\n y_in_channels, in_channel_dim=1,\n allow_nonrectangular_lattice=False, verbose=False):\n if verbose:\n print(\"shape of preserve_mask_local is: \", preserve_mask_local.shape)\n if preserve_mask_sparse is not None:\n print(\"shape of preserve_mask_sparse is: \", preserve_mask_sparse.shape)\n print(\"x_in_channels = \", x_in_channels)\n print(\"y_in_channels = \", y_in_channels)\n\n if in_channel_dim > 1:\n if in_channel_dim != preserve_mask_sparse.shape[2]:\n err = \"preserve_mask_sparse.shape[2] and in_channel_dim do not agree!!! \"\n raise Exception(err)\n elif in_channel_dim != preserve_mask_local.shape[2]:\n err = \"preserve_mask_local.shape[2] and in_channel_dim do not agree!!! \"\n raise Exception(err)\n\n preserve_mask_local = preserve_mask_local.flatten().reshape(preserve_mask_local.shape[0],\n in_channel_dim * preserve_mask_local.shape[1])\n if preserve_mask_sparse is not None:\n preserve_mask_sparse = \\\n preserve_mask_sparse.flatten().reshape(preserve_mask_sparse.shape[0],\n in_channel_dim * preserve_mask_sparse.shape[1])\n v1 = list(v1)\n v2 = list(v2)\n # remember, vectors have coordinates x, y\n v1[0] = v1[0] * in_channel_dim\n v2[0] = v2[0] * in_channel_dim\n\n x_in_channels = x_in_channels * in_channel_dim\n y_in_channels = y_in_channels\n in_channel_dim = 1\n \n # lat_mat = compute_lattice_matrix(v1, v2, preserve_mask, x_in_channels, y_in_channels, in_channel_dim,\n # allow_nonrectangular_lattice=allow_nonrectangular_lattice)\n if preserve_mask_sparse is not None:\n return compute_lsrf_matrix_connections(v1, v2, preserve_mask_local, preserve_mask_sparse, x_in_channels,\n y_in_channels, in_channel_dim=1,\n allow_nonrectangular_lattice=allow_nonrectangular_lattice)\n else:\n return compute_lattice_matrix_connections(v1, v2, preserve_mask_local, x_in_channels, y_in_channels,\n in_channel_dim=1,\n allow_nonrectangular_lattice=allow_nonrectangular_lattice)\n \n\n# This code should be backwards compatible!!!, only the preserve masks should have changed!!!\n# For the lsrf, typically: v1=(2,0), v2=(0,1), preserve_mask = [1M 1M 0M 0M 1M] for suitable square matrices 1M and 0M.\n# Add checking for too small matrix compared to masks\ndef compute_lsrf_matrix_connections(v1, v2, preserve_mask_local, preserve_mask_sparse, x_in_channels, y_in_channels,\n in_channel_dim=1, allow_nonrectangular_lattice=False, verbose=False):\n \"\"\" Implementation of (LSRF) Localized/Sparse receptive field. \"\"\"\n if preserve_mask_sparse is None:\n print(\"Defaulting to compute_lattice_matrix_connections\")\n return compute_lattice_matrix_connections(v1, v2, preserve_mask_local, x_in_channels, y_in_channels,\n in_channel_dim=1, allow_nonrectangular_lattice=False, verbose=False)\n\n if in_channel_dim > 1:\n err = \"Error, feature not supported in_channel_dim > 1\"\n raise Exception(err)\n\n lat_mat = compute_lattice_matrix(v1, v2, preserve_mask_local, x_in_channels, y_in_channels,\n allow_nonrectangular_lattice=allow_nonrectangular_lattice)\n\n if verbose:\n print(\"lat_mat =\", lat_mat)\n\n image_positions = numpy.array(range(0, x_in_channels * y_in_channels))\n image_positions = image_positions.reshape(y_in_channels, x_in_channels)\n\n if verbose:\n print(image_positions)\n print(preserve_mask_sparse)\n\n mask_indices = image_positions[0:preserve_mask_sparse.shape[0],\n 0:preserve_mask_sparse.shape[1]][preserve_mask_sparse].flatten()\n\n mask_x_coordinates = mask_indices % x_in_channels\n mask_y_coordinates = mask_indices // x_in_channels\n\n connections = None\n for ind_y in range(lat_mat.shape[0]):\n for ind_x in range(lat_mat.shape[1]):\n if lat_mat[ind_y, ind_x][0] != -1:\n # print \"ind_y, ind_x, mask_x_coordinates, mask_x_coordinates = \", ind_y, ind_x,\n # mask_x_coordinates, mask_x_coordinates\n # print \"lat_mat[ind_y, ind_x] = \", lat_mat[ind_y, ind_x]\n new_x_coordinates = (mask_x_coordinates + lat_mat[ind_y, ind_x][0]) % x_in_channels\n new_y_coordinates = (mask_y_coordinates + lat_mat[ind_y, ind_x][1]) % y_in_channels\n\n # new_connections = numpy.array(mask_indices + (lat_mat[ind_y, ind_x][0] +\n # lat_mat[ind_y, ind_x][1]*x_in_channels))\n new_connections = new_y_coordinates * x_in_channels + new_x_coordinates\n\n if connections is None:\n connections = new_connections\n else:\n connections = numpy.concatenate((connections, new_connections))\n else:\n print(\"Void entry in lattice_matrix skipped\")\n\n if verbose:\n print(\"Connections are: \", connections.astype('int'))\n return connections.astype('int'), lat_mat\n\n\n# TODO: The following is test code, should me moved into a test module or submodule\n# x_in_channels = 32\n# y_in_channels = 1\n# x_field_channels = 2\n# y_field_channels = 1\n# nx_value = 30\n# ny_value = None\n# base = 2\n# increment = 2\n# in_channel_dim=2\n# print compute_lsrf_n_values(x_in_channels, base, increment)\n# mask_local, mask_sparse = compute_lsrf_preserve_masks(x_field_channels, y_field_channels, 30, None, in_channel_dim)\n# # print mask_sparse, mask_sparse.shape\n# # print mask_local, mask_local.shape\n# connections, lat_mat = compute_lsrf_matrix_connections_with_input_dim(v1=(2,0), v2=(0,1),\n# preserve_mask_local=mask_local, preserve_mask_sparse = mask_sparse, x_in_channels=x_in_channels,\n# y_in_channels=y_in_channels, in_channel_dim=in_channel_dim, allow_nonrectangular_lattice=False, verbose=0)\n# # print \"Connections=\", connections\n\n# This code also appears to be useful for testing\n# print \"lat_mat =\", lat_mat\n# image_positions = numpy.array(range(0, y_in_channels * x_in_channels))\n# image_positions = image_positions.reshape(y_in_channels, x_in_channels)\n##\n##\n# mask_indices = image_positions[0:preserve_mask.shape[0], 0:preserve_mask.shape[1]][preserve_mask].flatten()\n##\n# connections = None\n# for ind_y in range(lat_mat.shape[0]):\n# for ind_x in range(lat_mat.shape[1]):\n# if(lat_mat[ind_y, ind_x][0] != -1):\n# if connections is None:\n# connections = numpy.array(mask_indices + (lat_mat[ind_y, ind_x][0] +\n# lat_mat[ind_y, ind_x][1]*x_in_channels))\n# else:\n# connections = numpy.concatenate((connections, mask_indices + (lat_mat[ind_y, ind_x][0] +\n# lat_mat[ind_y, ind_x][1]*x_in_channels) ))\n# else:\n# print \"Void entry in lattice_matrix skipped (to avoid asymmetry)\"\n##\n##\n# print \"Connections are: \", connections.astype('int')\n# return (connections.astype('int'), lat_mat)" ]
[ [ "numpy.ones", "numpy.concatenate", "numpy.int", "numpy.floor", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
simhag/Compositional-Pre-Training-for-Semantic-Parsing-with-BERT
[ "352baf443f0fcfde0f275521b5927b17a5c0c2df", "352baf443f0fcfde0f275521b5927b17a5c0c2df" ]
[ "src/sanity_check.py", "src/semantic_parser.py" ]
[ "import torch\nfrom argparse import ArgumentParser\nimport os\nfrom utils import read_GeoQuery, data_iterator\nfrom pytorch_pretrained_bert.modeling import BertModel\nfrom tokens_vocab import Vocab\nimport domains\nfrom semantic_parser import TSP, BSP\nfrom utils import get_dataset_finish_by, save_model, get_dataset, load_model\nfrom tensorboardX import SummaryWriter\nimport time\nimport math\nimport numpy as np\nfrom tqdm import tqdm\nimport warnings\n\nparser = ArgumentParser()\nparser.add_argument(\"--data_folder\", type=str, default=\"geoQueryData\")\nparser.add_argument(\"--out_folder\", type=str, default=\"outputs\")\nparser.add_argument(\"--BERT\", default=\"bert-base-uncased\", type=str, help=\"bert-base-uncased, bert-large-uncased\")\nparser.add_argument(\"--batch_size\", default=16, type=int)\nparser.add_argument(\"--clip_grad\", default=5.0, type=float)\nparser.add_argument(\"--d_model\", default=128, type=int)\nparser.add_argument(\"--d_int\", default=512, type=int)\nparser.add_argument(\"--dropout\", default=0.1, type=float)\nparser.add_argument(\"--lr\", default=0.001, type=float)\nparser.add_argument(\"--models_path\", default='models', type=str)\nparser.add_argument(\"--epoch_to_load\", default=40, type=int)\nparser.add_argument(\"--seed\", default=1515, type=int)\nparser.add_argument(\"--shuffle\", default=True, type=bool)\nparser.add_argument(\"--log_dir\", default='logs', type=str)\nparser.add_argument(\"--log\", default=True, type=bool)\nparser.add_argument(\"--epochs\", default=50, type=int)\nparser.add_argument(\"--save_every\", default=5, type=int)\nparser.add_argument(\"--n_layers\", default=2, type=int)\nparser.add_argument(\"--decoding\", default='greedy', type=str)\nparser.add_argument(\"--evaluation\", default='strict', type=str)\nparser.add_argument(\"--beam_size\", default=5, type=int)\nparser.add_argument(\"--max_decode_len\", default=105, type=int)\nparser.add_argument(\"--domain\", default='geoquery', type=str)\n\ndef sanity_check(arg_parser):\n '''\n Check whether the decoding produces [UNK]\n '''\n test_dataset = get_dataset_finish_by(arg_parser.data_folder, 'test','280.tsv')\n vocab = Vocab(arg_parser.BERT)\n file_path = os.path.join(arg_parser.models_path, f\"TSP_epoch_{arg_parser.epoch_to_load}.pt\")\n model = TSP(input_vocab=vocab, target_vocab=vocab, d_model=arg_parser.d_model, d_int=arg_parser.d_model,\n n_layers=arg_parser.n_layers, dropout_rate=arg_parser.dropout)\n load_model(file_path=file_path, model=model)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model = model.to(device)\n\n top_parsing_outputs, gold_queries = decoding(model, test_dataset, arg_parser)\n\n for sentence in top_parsing_outputs:\n if '[UNK]' in sentence[0]:\n warnings.warn('[UNK] in the decoding')\n\ndef decoding(loaded_model, test_dataset, arg_parser):\n beam_size = arg_parser.beam_size\n max_len = arg_parser.max_decode_len\n decoding_method = loaded_model.beam_search if arg_parser.decoding == 'beam_search' else loaded_model.decode_greedy\n loaded_model.eval()\n hypotheses = []\n gold_queries = []\n scores = 0\n count = 0\n with torch.no_grad():\n for src_sent_batch, gold_target in tqdm(data_iterator(test_dataset, batch_size=1, shuffle=False), total=280):\n example_hyps = decoding_method(sources=src_sent_batch, max_len=max_len, beam_size=beam_size)\n hypotheses.append(example_hyps)\n gold_queries.append(gold_target[0])\n return hypotheses, gold_queries\n\nif __name__ == '__main__':\n args = parser.parse_args()\n sanity_check(args)\n", "from collections import namedtuple\nfrom typing import List\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.utils\nfrom BERT_encoder import BERT\nfrom tokens_vocab import Vocab # for debugging\nfrom tokens_embeddings import DecoderEmbeddings, PositionalEncoding\nfrom torch.autograd import Variable\nfrom transformer import Transformer, DecoderLayer, TransformerEncoder, EncoderLayer\n\nHypothesis = namedtuple('Hypothesis', ['value', 'score'])\n\n\ndef initialize_weights(model):\n for p in model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform(p)\n return\n\n\nclass TSP(nn.Module):\n \"\"\" Transformer Semantic Parser:\n - Transformer Encoder\n - Transformer Decoder\n \"\"\"\n\n def __init__(self, input_vocab, target_vocab, d_model=512, d_int=2048, d_k=64, h=8, n_layers=6, dropout_rate=0.1,\n max_len_pe=200, bert_name=None):\n \"\"\"\n :param input_vocab: Vocab based on BERT tokenizer\n :param target_vocab: Vocab based on BERT tokenizer, requires embedding. Fields tokenizer, tokenizer.ids_to_tokens = ordered_dict\n pad=0, start=1, end=2\n :param size: Size of the BERT model: base or large\n :param d_model: dimension of transformer embeddings #TODO add linear layer to map BERT output to dim 512?\n :param dropout_rate:dropout, default 0.1\n \"\"\"\n super(TSP, self).__init__()\n self.dropout_rate = dropout_rate\n self.input_vocab = input_vocab\n self.target_vocab = target_vocab\n self.model_embeddings_source = nn.Sequential(DecoderEmbeddings(vocab=self.input_vocab, embed_size=d_model),\n PositionalEncoding(d_model=d_model, dropout=dropout_rate,\n max_len=max_len_pe))\n self.model_embeddings_target = nn.Sequential(DecoderEmbeddings(vocab=self.target_vocab, embed_size=d_model),\n PositionalEncoding(d_model=d_model, dropout=dropout_rate,\n max_len=max_len_pe))\n self.encoder = TransformerEncoder(\n layer=EncoderLayer(d_model=d_model, d_int=d_int, d_k=d_k, d_v=d_k, h=h,\n p_drop=dropout_rate), n_layer=n_layers)\n self.decoder = Transformer(\n layer=DecoderLayer(d_model=d_model, d_int=d_int, d_k=d_k, d_v=d_k, h=h, p_drop=dropout_rate),\n n_layer=n_layers)\n self.linear_projection = nn.Linear(d_model, len(self.target_vocab.tokenizer.ids_to_tokens), bias=False)\n self.dropout = nn.Dropout(self.dropout_rate)\n\n self.device = self.linear_projection.weight.device\n\n initialize_weights(self.encoder)\n initialize_weights(self.decoder)\n initialize_weights(self.linear_projection)\n initialize_weights(self.model_embeddings_source)\n initialize_weights(self.model_embeddings_target)\n\n def forward(self, sources: List[str], targets: List[str]) -> torch.Tensor:\n \"\"\"\n :param source: source strings of size bsize\n :param target: target strings of sizes bsize\n :return: scores, sum of log prob of outputs\n \"\"\"\n # Take source sentences bsize strings\n # Convert to tokens\n # Keep in minde the nb of tokens per batch example\n # Pad and convert to input tensor for BERT\n source_tokens = self.input_vocab.to_input_tokens(sources)\n source_lengths = [len(s) for s in source_tokens]\n source_tensor = self.model_embeddings_source(self.input_vocab.to_input_tensor(sources, device=self.device))\n # feed to Transformer encoder\n input_padding_mask = self.generate_sent_masks(source_tensor, source_lengths)\n encoder_output = self.encode(source_tensor,\n padding_mask=input_padding_mask) # size batch, maxlen, d_model #no mask right? output c'est un tuple?\n # use lengths kept in mind to get mask over the encoder output (padding mask)\n # Take target and get tokens\n target_tokens = self.target_vocab.to_input_tokens(targets)\n # Add END at the end to get the target we will compare to for log probs\n target_tokens_y = [tokens + ['[END]'] for tokens in target_tokens]\n # Add START at the beginning to get the target we use along with the decoder to generate log probs\n target_tokens = [['[START]'] + tokens for tokens in target_tokens]\n # To be fed to the decoder\n target_tokens_padded = self.target_vocab.tokens_to_tensor(target_tokens,\n device=self.device) # size bsize, max_len\n # To be used for log_probs\n target_y_padded = self.target_vocab.tokens_to_tensor(target_tokens_y, device=self.device) # size bsize, max_len\n\n # Mask for the decoder: for padding AND autoregressive constraints\n target_tokens_mask = TSP.generate_target_mask(target_tokens_padded, pad_idx=0) # size bsize, maxlen, maxlen\n # Ready for the decoder with source, its mask, target, its mask\n decoder_output = self.decode(input_dec=self.model_embeddings_target(target_tokens_padded),\n output_enc=encoder_output, multihead1_mask=target_tokens_mask,\n multihead2_mask=input_padding_mask)\n\n # Projection of the decoder output in linear layer without bias and logsoftmax\n P = F.log_softmax(self.linear_projection(decoder_output), dim=-1) # size bsize, max_len, len_vocab pour oim\n\n # Zero out, probabilities for which we have nothing in the target text\n target_masks_y = (target_y_padded != 0).float()\n\n # Compute log probability of generating true target words -> dark magic I need to check\n target_gold_words_log_prob = torch.gather(P, index=target_y_padded.unsqueeze(-1), dim=-1).squeeze(\n -1) * target_masks_y\n scores = target_gold_words_log_prob.sum()\n return scores\n\n def encode(self, source_tensor, padding_mask):\n # simply apply BERT, may need the forward though\n return self.encoder(input_enc=source_tensor, multihead_mask=padding_mask)\n\n @staticmethod\n def subsequent_mask(size):\n \"Mask out subsequent positions.\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n def generate_sent_masks(self, enc_output, source_lengths):\n \"\"\"\n source_lengths list of ints, len=bsize\n enc_output of size bsize, len, d_model\n :rtype: enc_masks: long tensor of size bsize, len\n \"\"\"\n enc_masks = torch.ones(enc_output.size(0), enc_output.size(1), dtype=torch.long)\n for e_id, src_len in enumerate(source_lengths):\n enc_masks[e_id, src_len:] = 0\n return enc_masks.to(self.device).unsqueeze(-2)\n\n @staticmethod\n def generate_target_mask(target_padded, pad_idx):\n \"\"\"\n target padded = long tensor of size bsize, max_len_tokens\n :rtype: mask of dimension\n \"\"\"\n tgt_mask = (target_padded != pad_idx).unsqueeze(-2) # bsize, 1, len\n tgt_mask = tgt_mask & Variable(TSP.subsequent_mask(target_padded.size(-1)).type_as(tgt_mask.data))\n return tgt_mask # size b, max_len, max_len\n\n def decode(self, input_dec, output_enc, multihead1_mask, multihead2_mask):\n \"\"\"\n :param encoder_output: size (b, len, dim_bert)\n :param enc_masks: size (b, len)\n :param target_padded: size (b, len')\n :return:\n \"\"\"\n return self.decoder(input_dec=input_dec, output_enc=output_enc, multihead1_mask=multihead1_mask,\n multihead2_mask=multihead2_mask)\n\n def decode_greedy(self, src_sent, max_len, *args, **kwargs):\n \"\"\"\n :param src_sent: [ str ] str is the input test example to encode-decode\n :param max_len: max len -in tokens of the input\n :param args:\n :param kwargs:\n :return:list[str] list of the list of tokens for the decoded query\n \"\"\"\n source_tokens = self.input_vocab.to_input_tokens(src_sent)\n source_lengths = [len(s) for s in source_tokens]\n source_tensor = self.model_embeddings_source(self.input_vocab.to_input_tensor(src_sent, device=self.device))\n # feed to Transformer encoder\n input_padding_mask = self.generate_sent_masks(source_tensor, source_lengths)\n encoder_output = self.encode(source_tensor,\n padding_mask=input_padding_mask) # size batch, maxlen, d_model #no mask right? output c'est un tuple?\n # use lengths kept in mind to get mask over the encoder output (padding mask)\n\n target_tokens = [['[START]'] for _ in range(source_tensor.size(0))]\n target_tokens_padded = self.target_vocab.tokens_to_tensor(target_tokens,\n device=self.device) # size bsize, max_len\n target_tokens_mask = TSP.generate_target_mask(target_tokens_padded, pad_idx=0) # size bsize, maxlen, maxlen\n # Ready for the decoder with source, its mask, target, its mask\n\n for i in range(max_len - 1):\n decoder_output = self.decode(input_dec=self.model_embeddings_target(target_tokens_padded),\n output_enc=encoder_output, \\\n multihead1_mask=target_tokens_mask, multihead2_mask=input_padding_mask)\n\n P = F.log_softmax(self.linear_projection(decoder_output), dim=-1)\n _, next_word = torch.max(P[:, -1], dim=-1)\n\n new_token = self.target_vocab.tokenizer.ids_to_tokens[next_word.item()]\n if new_token == '[END]':\n break\n target_tokens = [tokens + [new_token] for tokens in target_tokens]\n target_tokens_padded = self.target_vocab.tokens_to_tensor(target_tokens, device=self.device)\n target_tokens_mask = TSP.generate_target_mask(target_tokens_padded, pad_idx=0)\n return [target_token[1:] for target_token in target_tokens]\n\n def beam_search(self, src_sent, beam_size, max_len):\n len_vocab = len(self.input_vocab.tokenizer.ids_to_tokens)\n\n source_tokens = self.input_vocab.to_input_tokens(src_sent)\n source_lengths = [len(s) for s in source_tokens]\n source_tensor = self.model_embeddings_source(self.input_vocab.to_input_tensor(src_sent, device=self.device))\n # feed to Transformer encoder\n input_padding_mask = self.generate_sent_masks(source_tensor, source_lengths)\n encoder_output = self.encode(source_tensor, padding_mask=input_padding_mask) # size 1, maxlen, d_model\n\n hypotheses = [['[START]']]\n hyp_scores = torch.zeros(len(hypotheses), dtype=torch.float, device=self.device)\n len_hyps = [1]\n hypotheses_padded = self.target_vocab.tokens_to_tensor(hypotheses, device=self.device)\n hyp_tokens_mask = TSP.generate_target_mask(hypotheses_padded, pad_idx=0)\n completed_hypotheses = []\n\n t = 0\n while len(completed_hypotheses) < 2 * beam_size and t < max_len:\n t += 1\n hyp_num = len(hypotheses)\n\n exp_encoder_output = encoder_output.expand(hyp_num, encoder_output.size(1), encoder_output.size(2))\n\n decoder_output = self.decode(input_dec=self.model_embeddings_target(hypotheses_padded),\n output_enc=exp_encoder_output, multihead1_mask=hyp_tokens_mask,\n multihead2_mask=input_padding_mask)\n\n P = F.log_softmax(self.linear_projection(decoder_output), dim=-1) # size hyp_num, max_len, len_vocab\n updates = [P[i, len_i - 1, :] for i, len_i in enumerate(len_hyps)] # n_hyp tensors of size len_vocab\n score_updates = torch.stack(updates)\n continuating_scores = (hyp_scores.unsqueeze(1).expand_as(score_updates) + score_updates).view(\n -1) # size n_hyp x vocab\n top_scores, top_positions = torch.topk(continuating_scores, k=beam_size)\n prev_hyp_ids = top_positions // len_vocab\n hyp_word_ids = top_positions % len_vocab\n\n new_hypotheses = []\n new_len_hyps = []\n new_hyp_scores = []\n\n for prev_hyp_id, hyp_word_id, cand_new_hyp_score in zip(prev_hyp_ids, hyp_word_ids, top_scores):\n prev_hyp_id = prev_hyp_id.item()\n hyp_word_id = hyp_word_id.item()\n cand_new_hyp_score = cand_new_hyp_score.item()\n\n hyp_token = self.input_vocab.tokenizer.ids_to_tokens[int(hyp_word_id)]\n\n new_hyp_sent = hypotheses[prev_hyp_id] + [hyp_token]\n if hyp_token == '[END]':\n completed_hypotheses.append(Hypothesis(value=new_hyp_sent[1:-1], # on jerte le start et le end\n score=cand_new_hyp_score))\n else:\n new_hypotheses.append(new_hyp_sent)\n new_len_hyps.append(len_hyps[prev_hyp_id] + 1)\n new_hyp_scores.append(cand_new_hyp_score)\n\n if len(completed_hypotheses) >= 2 * beam_size:\n break\n\n if len(new_hypotheses) == 0:\n hypotheses = [['[START]']]\n hyp_scores = torch.zeros(len(hypotheses), dtype=torch.float, device=self.device)\n len_hyps = [1]\n else:\n hypotheses = new_hypotheses\n len_hyps = new_len_hyps\n hyp_scores = torch.tensor(new_hyp_scores, dtype=torch.float, device=self.device)\n hypotheses_padded = self.target_vocab.tokens_to_tensor(hypotheses, device=self.device)\n hyp_tokens_mask = TSP.generate_target_mask(hypotheses_padded, pad_idx=0)\n\n completed_hypotheses.sort(key=lambda hyp: hyp.score, reverse=True)\n return [hyp.value for hyp in completed_hypotheses[:beam_size]]\n\n\nclass BSP(nn.Module):\n \"\"\" BERT Semantic Parser:\n - BERT Encoder\n - Transformer Decoder\n \"\"\"\n\n def __init__(self, input_vocab, target_vocab, d_model=512, d_int=2048, d_k=64, h=8, n_layers=6,\n dropout_rate=0.1, max_len_pe=200, bert_name=None):\n \"\"\"\n :param input_vocab: Vocab based on BERT tokenizer\n :param target_vocab: Vocab based on BERT tokenizer, requires embedding. Fields tokenizer, tokenizer.ids_to_tokens = ordered_dict\n pad=0, start=1, end=2\n :param size: Size of the BERT model: base or large\n :param d_model: dimension of transformer embeddings\n :param dropout_rate:dropout, default 0.1\n \"\"\"\n super(BSP, self).__init__()\n\n self.dropout_rate = dropout_rate\n self.input_vocab = input_vocab\n self.target_vocab = target_vocab\n self.hidden_size = 768 if bert_name == 'base' else 1024\n self.encoder = BERT(bert_name=bert_name, d_model=d_model)\n self.model_embeddings_target = nn.Sequential(DecoderEmbeddings(vocab=self.target_vocab, embed_size=d_model),\n PositionalEncoding(d_model=d_model, dropout=dropout_rate,\n max_len=max_len_pe))\n self.decoder = Transformer(\n layer=DecoderLayer(d_model=d_model, d_int=d_int, d_k=d_k, d_v=d_k, h=h, p_drop=dropout_rate),\n n_layer=n_layers)\n self.linear_projection = nn.Linear(d_model, len(self.target_vocab.tokenizer.ids_to_tokens), bias=False)\n self.dropout = nn.Dropout(self.dropout_rate)\n\n self.device = self.linear_projection.weight.device\n\n initialize_weights(self.decoder)\n initialize_weights(self.linear_projection)\n initialize_weights(self.model_embeddings_target)\n\n def forward(self, sources: List[str], targets: List[str]) -> torch.Tensor:\n \"\"\"\n :param source: source strings of size bsize\n :param target: target strings of sizes bsize\n :return: scores, sum of log prob of outputs\n \"\"\"\n \"\"\"\n :param source: source strings of size bsize\n :param target: target strings of sizes bsize\n :return: scores, sum of log prob of outputs\n \"\"\"\n # Take source sentences bsize strings\n # Convert to tokens\n # Keep in minde the nb of tokens per batch example\n # Pad and convert to input tensor for BERT\n source_tokens = self.input_vocab.to_input_tokens(sources)\n source_lengths = [len(s) for s in source_tokens]\n source_tensor = self.input_vocab.to_input_tensor(sources, device=self.device)\n # feed to Transformer encoder\n input_padding_mask = self.generate_sent_masks(source_tensor, source_lengths)\n encoder_output = self.encode(input_ids=source_tensor, attention_mask=input_padding_mask.squeeze(\n 1)) # size batch, maxlen, d_model #no mask right? output c'est un tuple?\n # use lengths kept in mind to get mask over the encoder output (padding mask)\n # Take target and get tokens\n target_tokens = self.target_vocab.to_input_tokens(targets)\n # Add END at the end to get the target we will compare to for log probs\n target_tokens_y = [tokens + ['[END]'] for tokens in target_tokens]\n # Add START at the beginning to get the target we use along with the decoder to generate log probs\n target_tokens = [['[START]'] + tokens for tokens in target_tokens]\n # To be fed to the decoder\n target_tokens_padded = self.target_vocab.tokens_to_tensor(target_tokens,\n device=self.device) # size bsize, max_len\n # To be used for log_probs\n target_y_padded = self.target_vocab.tokens_to_tensor(target_tokens_y, device=self.device) # size bsize, max_len\n\n # Mask for the decoder: for padding AND autoregressive constraints\n target_tokens_mask = BSP.generate_target_mask(target_tokens_padded, pad_idx=0) # size bsize, maxlen, maxlen\n # Ready for the decoder with source, its mask, target, its mask\n decoder_output = self.decode(input_dec=self.model_embeddings_target(target_tokens_padded),\n output_enc=encoder_output, multihead1_mask=target_tokens_mask,\n multihead2_mask=input_padding_mask)\n\n # Projection of the decoder output in linear layer without bias and logsoftmax\n P = F.log_softmax(self.linear_projection(decoder_output), dim=-1) # size bsize, max_len, len_vocab pour oim\n\n # Zero out, probabilities for which we have nothing in the target text\n target_masks_y = (target_y_padded != 0).float()\n\n # Compute log probability of generating true target words -> dark magic I need to check\n target_gold_words_log_prob = torch.gather(P, index=target_y_padded.unsqueeze(-1), dim=-1).squeeze(\n -1) * target_masks_y\n scores = target_gold_words_log_prob.sum()\n return scores\n\n def encode(self, input_ids, attention_mask):\n # simply apply BERT, may need the forward though\n return self.encoder(input=input_ids, mask=attention_mask)\n\n @staticmethod\n def subsequent_mask(size):\n \"Mask out subsequent positions.\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n def generate_sent_masks(self, enc_output, source_lengths):\n \"\"\"\n source_lengths list of ints, len=bsize\n enc_output of size bsize, len, d_model\n :rtype: enc_masks: long tensor of size bsize, len\n \"\"\"\n enc_masks = torch.ones(enc_output.size(0), enc_output.size(1), dtype=torch.long)\n for e_id, src_len in enumerate(source_lengths):\n enc_masks[e_id, src_len:] = 0\n return enc_masks.to(self.device).unsqueeze(-2)\n\n @staticmethod\n def generate_target_mask(target_padded, pad_idx):\n \"\"\"\n target padded = long tensor of size bsize, max_len_tokens\n :rtype: mask of dimension\n \"\"\"\n tgt_mask = (target_padded != pad_idx).unsqueeze(-2) # bsize, 1, len\n tgt_mask = tgt_mask & Variable(TSP.subsequent_mask(target_padded.size(-1)).type_as(tgt_mask.data))\n return tgt_mask # size b, max_len, max_len\n\n def decode(self, input_dec, output_enc, multihead1_mask, multihead2_mask):\n \"\"\"\n :param encoder_output: size (b, len, dim_bert)\n :param enc_masks: size (b, len)\n :param target_padded: size (b, len')\n :return:\n \"\"\"\n return self.decoder(input_dec=input_dec, output_enc=output_enc, multihead1_mask=multihead1_mask,\n multihead2_mask=multihead2_mask)\n\n def decode_greedy(self, src_sent, max_len, *args, **kwargs):\n \"\"\"\n :param src_sent: [ str ] str is the input test example to encode-decode\n :param max_len: max len -in tokens of the input\n :param args:\n :param kwargs:\n :return:list[str] list of the list of tokens for the decoded query\n \"\"\"\n source_tokens = self.input_vocab.to_input_tokens(src_sent)\n source_lengths = [len(s) for s in source_tokens]\n source_tensor = self.input_vocab.to_input_tensor(src_sent, device=self.device)\n # feed to Transformer encoder\n input_padding_mask = self.generate_sent_masks(source_tensor, source_lengths)\n encoder_output = self.encode(source_tensor, input_padding_mask.squeeze(\n 1)) # size batch, maxlen, d_model #no mask right? output c'est un tuple?\n # use lengths kept in mind to get mask over the encoder output (padding mask)\n\n target_tokens = [['[START]'] for _ in range(source_tensor.size(0))]\n target_tokens_padded = self.target_vocab.tokens_to_tensor(target_tokens,\n device=self.device) # size bsize, max_len\n target_tokens_mask = BSP.generate_target_mask(target_tokens_padded, pad_idx=0) # size bsize, maxlen, maxlen\n # Ready for the decoder with source, its mask, target, its mask\n\n for i in range(max_len - 1):\n decoder_output = self.decode(input_dec=self.model_embeddings_target(target_tokens_padded),\n output_enc=encoder_output,\n multihead1_mask=target_tokens_mask, multihead2_mask=input_padding_mask)\n\n P = F.log_softmax(self.linear_projection(decoder_output), dim=-1)\n _, next_word = torch.max(P[:, -1], dim=-1)\n\n new_token = self.target_vocab.tokenizer.ids_to_tokens[next_word.item()]\n if new_token == '[END]':\n break\n target_tokens = [tokens + [new_token] for tokens in target_tokens]\n target_tokens_padded = self.target_vocab.tokens_to_tensor(target_tokens, device=self.device)\n target_tokens_mask = BSP.generate_target_mask(target_tokens_padded, pad_idx=0)\n return [target_token[1:] for target_token in target_tokens]\n\n def beam_search(self, src_sent, beam_size, max_len):\n len_vocab = len(self.input_vocab.tokenizer.ids_to_tokens)\n\n source_tokens = self.input_vocab.to_input_tokens(src_sent)\n source_lengths = [len(s) for s in source_tokens]\n source_tensor = self.input_vocab.to_input_tensor(src_sent, device=self.device)\n # feed to Transformer encoder\n input_padding_mask = self.generate_sent_masks(source_tensor, source_lengths)\n encoder_output = self.encode(source_tensor, input_padding_mask.squeeze(1)) # size 1, maxlen, d_model\n\n hypotheses = [['[START]']]\n hyp_scores = torch.zeros(len(hypotheses), dtype=torch.float, device=self.device)\n len_hyps = [1]\n hypotheses_padded = self.target_vocab.tokens_to_tensor(hypotheses, device=self.device)\n hyp_tokens_mask = BSP.generate_target_mask(hypotheses_padded, pad_idx=0)\n completed_hypotheses = []\n\n t = 0\n while len(completed_hypotheses) < 2 * beam_size and t < max_len:\n t += 1\n hyp_num = len(hypotheses)\n\n exp_encoder_output = encoder_output.expand(hyp_num, encoder_output.size(1), encoder_output.size(2))\n\n decoder_output = self.decode(input_dec=self.model_embeddings_target(hypotheses_padded),\n output_enc=exp_encoder_output, multihead1_mask=hyp_tokens_mask,\n multihead2_mask=input_padding_mask)\n\n P = F.log_softmax(self.linear_projection(decoder_output), dim=-1) # size hyp_num, max_len, len_vocab\n updates = [P[i, len_i - 1, :] for i, len_i in enumerate(len_hyps)] # n_hyp tensors of size len_vocab\n score_updates = torch.stack(updates)\n continuating_scores = (hyp_scores.unsqueeze(1).expand_as(score_updates) + score_updates).view(\n -1) # size n_hyp x vocab\n top_scores, top_positions = torch.topk(continuating_scores, k=beam_size)\n prev_hyp_ids = top_positions // len_vocab\n hyp_word_ids = top_positions % len_vocab\n\n new_hypotheses = []\n new_len_hyps = []\n new_hyp_scores = []\n\n for prev_hyp_id, hyp_word_id, cand_new_hyp_score in zip(prev_hyp_ids, hyp_word_ids, top_scores):\n prev_hyp_id = prev_hyp_id.item()\n hyp_word_id = hyp_word_id.item()\n cand_new_hyp_score = cand_new_hyp_score.item()\n\n hyp_token = self.input_vocab.tokenizer.ids_to_tokens[int(hyp_word_id)]\n\n new_hyp_sent = hypotheses[prev_hyp_id] + [hyp_token]\n if hyp_token == '[END]':\n completed_hypotheses.append(Hypothesis(value=new_hyp_sent[1:-1], # on jerte le start et le end\n score=cand_new_hyp_score))\n else:\n new_hypotheses.append(new_hyp_sent)\n new_len_hyps.append(len_hyps[prev_hyp_id] + 1)\n new_hyp_scores.append(cand_new_hyp_score)\n\n if len(completed_hypotheses) >= 2 * beam_size:\n break\n\n if len(new_hypotheses) == 0:\n hypotheses = [['[START]']]\n hyp_scores = torch.zeros(len(hypotheses), dtype=torch.float, device=self.device)\n len_hyps = [1]\n else:\n hypotheses = new_hypotheses\n len_hyps = new_len_hyps\n hyp_scores = torch.tensor(new_hyp_scores, dtype=torch.float, device=self.device)\n hypotheses_padded = self.target_vocab.tokens_to_tensor(hypotheses, device=self.device)\n hyp_tokens_mask = BSP.generate_target_mask(hypotheses_padded, pad_idx=0)\n\n completed_hypotheses.sort(key=lambda hyp: hyp.score, reverse=True)\n return [hyp.value for hyp in completed_hypotheses[:beam_size]]\n\n\nif __name__ == '__main__':\n vocab = Vocab('bert-base-uncased')\n tsp = TSP(input_vocab=vocab, target_vocab=vocab)\n src = 'what is the highest point in florida ?'\n print(vocab.tokenizer.tokenize(src))\n" ]
[ [ "torch.no_grad", "torch.cuda.is_available" ], [ "torch.nn.Dropout", "torch.max", "torch.topk", "torch.from_numpy", "torch.tensor", "numpy.ones", "torch.stack", "torch.nn.init.xavier_uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
oscarvik/Language-Modelling-CSE291-AS2
[ "18af16de61cbe8d820b1445207107b4ea4771680" ]
[ "model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.utils.rnn as rnn_utils\nfrom utils import to_var\n\n\nclass SentenceVAE(nn.Module):\n\n def __init__(self, vocab_size, embedding_size, rnn_type, hidden_size, word_dropout, embedding_dropout, latent_size,\n sos_idx, eos_idx, pad_idx, unk_idx, max_sequence_length, num_layers=1, bidirectional=False):\n\n super().__init__()\n self.tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.Tensor\n\n self.max_sequence_length = max_sequence_length\n self.sos_idx = sos_idx\n self.eos_idx = eos_idx\n self.pad_idx = pad_idx\n self.unk_idx = unk_idx\n\n self.latent_size = latent_size\n\n self.rnn_type = rnn_type\n self.bidirectional = bidirectional\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n\n self.embedding = nn.Embedding(vocab_size, embedding_size)\n self.word_dropout_rate = word_dropout\n self.embedding_dropout = nn.Dropout(p=embedding_dropout)\n\n if rnn_type == 'rnn':\n rnn = nn.RNN\n elif rnn_type == 'gru':\n rnn = nn.GRU\n elif rnn_type == 'lstm':\n rnn = nn.LSTM\n else:\n raise ValueError()\n\n self.encoder_rnn = rnn(embedding_size, hidden_size, num_layers=num_layers, bidirectional=self.bidirectional,\n batch_first=True)\n self.decoder_rnn = rnn(embedding_size, hidden_size, num_layers=num_layers, bidirectional=self.bidirectional,\n batch_first=True)\n\n self.hidden_factor = (2 if bidirectional else 1) * num_layers\n\n self.hidden2mean = nn.Linear(hidden_size * self.hidden_factor, latent_size)\n self.hidden2logv = nn.Linear(hidden_size * self.hidden_factor, latent_size)\n self.latent2hidden = nn.Linear(latent_size, hidden_size * self.hidden_factor)\n self.outputs2vocab = nn.Linear(hidden_size * (2 if bidirectional else 1), vocab_size)\n\n def forward(self, input_sequence, length):\n\n batch_size = input_sequence.size(0)\n sorted_lengths, sorted_idx = torch.sort(length, descending=True)\n input_sequence = input_sequence[sorted_idx]\n\n # ENCODER\n input_embedding = self.embedding(input_sequence)\n\n packed_input = rnn_utils.pack_padded_sequence(input_embedding, sorted_lengths.data.tolist(), batch_first=True)\n\n _, hidden = self.encoder_rnn(packed_input)\n\n if self.bidirectional or self.num_layers > 1:\n # flatten hidden state\n hidden = hidden.view(batch_size, self.hidden_size * self.hidden_factor)\n else:\n hidden = hidden.squeeze()\n\n # REPARAMETERIZATION\n mean = self.hidden2mean(hidden)\n logv = self.hidden2logv(hidden)\n std = torch.exp(0.5 * logv)\n\n z = to_var(torch.randn([batch_size, self.latent_size]))\n z = z * std + mean\n\n # DECODER\n hidden = self.latent2hidden(z)\n\n if self.bidirectional or self.num_layers > 1:\n # unflatten hidden state\n hidden = hidden.view(self.hidden_factor, batch_size, self.hidden_size)\n else:\n hidden = hidden.unsqueeze(0)\n\n # decoder input\n if self.word_dropout_rate > 0:\n # randomly replace decoder input with <unk>\n prob = torch.rand(input_sequence.size())\n if torch.cuda.is_available():\n prob = prob.cuda()\n prob[(input_sequence.data - self.sos_idx) * (input_sequence.data - self.pad_idx) == 0] = 1\n decoder_input_sequence = input_sequence.clone()\n decoder_input_sequence[prob < self.word_dropout_rate] = self.unk_idx\n input_embedding = self.embedding(decoder_input_sequence)\n input_embedding = self.embedding_dropout(input_embedding)\n packed_input = rnn_utils.pack_padded_sequence(input_embedding, sorted_lengths.data.tolist(), batch_first=True)\n\n # decoder forward pass\n outputs, _ = self.decoder_rnn(packed_input, hidden)\n\n # process outputs\n padded_outputs = rnn_utils.pad_packed_sequence(outputs, batch_first=True)[0]\n padded_outputs = padded_outputs.contiguous()\n _, reversed_idx = torch.sort(sorted_idx)\n padded_outputs = padded_outputs[reversed_idx]\n b, s, _ = padded_outputs.size()\n\n # project outputs to vocab\n logp = nn.functional.log_softmax(self.outputs2vocab(padded_outputs.view(-1, padded_outputs.size(2))), dim=-1)\n logp = logp.view(b, s, self.embedding.num_embeddings)\n\n return logp, mean, logv, z\n\n def inference(self, n=4, z=None):\n\n if z is None:\n batch_size = n\n z = to_var(torch.randn([batch_size, self.latent_size]))\n else:\n batch_size = z.size(0)\n\n hidden = self.latent2hidden(z)\n\n if self.bidirectional or self.num_layers > 1:\n # unflatten hidden state\n hidden = hidden.view(self.hidden_factor, batch_size, self.hidden_size)\n\n hidden = hidden.unsqueeze(0)\n\n # required for dynamic stopping of sentence generation\n sequence_idx = torch.arange(0, batch_size, out=self.tensor()).long() # all idx of batch\n sequence_running = torch.arange(0, batch_size,\n out=self.tensor()).long() # all idx of batch which are still generating\n sequence_mask = torch.ones(batch_size, out=self.tensor()).byte()\n\n running_seqs = torch.arange(0, batch_size,\n out=self.tensor()).long() # idx of still generating sequences with respect to current loop\n\n generations = self.tensor(batch_size, self.max_sequence_length).fill_(self.pad_idx).long()\n\n t = 0\n while (t < self.max_sequence_length and len(running_seqs) > 0):\n\n if t == 0:\n input_sequence = to_var(torch.Tensor(batch_size).fill_(self.sos_idx).long())\n\n input_sequence = input_sequence.unsqueeze(1)\n\n input_embedding = self.embedding(input_sequence)\n\n output, hidden = self.decoder_rnn(input_embedding, hidden)\n\n logits = self.outputs2vocab(output)\n\n input_sequence = self._sample(logits)\n\n # save next input\n generations = self._save_sample(generations, input_sequence, sequence_running, t)\n\n # update gloabl running sequence\n sequence_mask[sequence_running] = (input_sequence != self.eos_idx).data\n sequence_running = sequence_idx.masked_select(sequence_mask)\n\n # update local running sequences\n running_mask = (input_sequence != self.eos_idx).data\n running_seqs = running_seqs.masked_select(running_mask)\n\n # prune input and hidden state according to local update\n if batch_size == 1 or len(input_sequence.size()) == 0:\n input_sequence = input_sequence.unsqueeze(0)\n if len(running_seqs) > 0:\n input_sequence = input_sequence[running_seqs]\n hidden = hidden[:, running_seqs]\n\n running_seqs = torch.arange(0, len(running_seqs), out=self.tensor()).long()\n\n t += 1\n\n return generations, z\n\n def _sample(self, dist, mode='greedy'):\n\n if mode == 'greedy':\n _, sample = torch.topk(dist, 1, dim=-1)\n sample = sample.squeeze()\n\n return sample\n\n def _save_sample(self, save_to, sample, running_seqs, t):\n # select only still running\n running_latest = save_to[running_seqs]\n # update token at position t\n running_latest[:, t] = sample.data\n # save back\n save_to[running_seqs] = running_latest\n\n return save_to\n" ]
[ [ "torch.nn.Dropout", "torch.Tensor", "torch.randn", "torch.nn.Embedding", "torch.exp", "torch.nn.Linear", "torch.nn.utils.rnn.pad_packed_sequence", "torch.sort", "torch.cuda.is_available", "torch.topk" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jonasrothfuss/DeepEpisodicMemory
[ "1095315a5d75a4840ef4017af70432e2dd535e4c" ]
[ "models/model_zoo/model_conv4.py" ]
[ "import numpy as np\nimport tensorflow as tf\n\nimport tensorflow.contrib.slim as slim\nimport tensorflow.contrib.layers\nfrom tensorflow.contrib.layers.python import layers as tf_layers\nfrom models.conv_lstm import basic_conv_lstm_cell\n\n# Amount to use when lower bounding tensors\nRELU_SHIFT = 1e-12\nFC_LAYER_SIZE = 512\n\n# kernel size for DNA and CDNA.\nDNA_KERN_SIZE = 5\n\n\ndef encoder_model(frames, sequence_length, initializer, scope='encoder', fc_conv_layer=False):\n \"\"\"\n Args:\n frames: 5D array of batch with videos - shape(batch_size, num_frames, frame_width, frame_higth, num_channels)\n sequence_length: number of frames that shall be encoded\n scope: tensorflow variable scope name\n initializer: specifies the initialization type (default: contrib.slim.layers uses Xavier init with uniform data)\n fc_conv_layer: adds an fc layer at the end of the encoder\n Returns:\n hidden4: hidden state of highest ConvLSTM layer\n fc_conv_layer: indicated whether a Fully Convolutional (8x8x16 -> 1x1x1024) shall be added\n \"\"\"\n\n lstm_state1, lstm_state2, lstm_state3, lstm_state4 = None, None, None, None\n\n for i in range(sequence_length):\n\n\n frame = frames[:,i,:,:,:]\n\n reuse = (i > 0)\n\n with tf.variable_scope(scope, reuse=reuse):\n #LAYER 1: conv1\n conv1 = slim.layers.conv2d(frame, 16, [5, 5], stride=2, scope='conv1', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,\n normalizer_params={'scope': 'layer_norm1'})\n\n #LAYER 2: convLSTM1\n hidden1, lstm_state1 = basic_conv_lstm_cell(conv1, lstm_state1, 16, initializer, filter_size=5, scope='convlstm1')\n hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2')\n\n #LAYER 3: conv2\n conv2 = slim.layers.conv2d(hidden1, hidden1.get_shape()[3], [5, 5], stride=2, scope='conv2', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,\n normalizer_params={'scope': 'layer_norm3'})\n\n #LAYER 4: convLSTM2\n hidden2, lstm_state2 = basic_conv_lstm_cell(conv2, lstm_state2, 16, initializer, filter_size=5, scope='convlstm2')\n hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm4')\n\n #LAYER 5: conv3\n conv3 = slim.layers.conv2d(hidden2, hidden2.get_shape()[3], [3, 3], stride=2, scope='conv3', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,\n normalizer_params={'scope': 'layer_norm5'})\n\n #LAYER 6: convLSTM3\n hidden3, lstm_state3 = basic_conv_lstm_cell(conv3, lstm_state3, 16, initializer, filter_size=3, scope='convlstm3')\n hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm6')\n\n\n #LAYER 7: conv4\n conv4 = slim.layers.conv2d(hidden3, hidden3.get_shape()[3], [3, 3], stride=2, scope='conv4', normalizer_fn=tf_layers.layer_norm, weights_initializer=initializer,\n normalizer_params={'scope': 'layer_norm7'})\n\n #LAYER 8: convLSTM4 (8x8 featuremap size)\n hidden4, lstm_state4 = basic_conv_lstm_cell(conv4, lstm_state4, 16, initializer, filter_size=3, scope='convlstm4')\n hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm8')\n\n #LAYER 9: Fully Convolutional Layer (8x8x16 --> 1x1xFC_LAYER_SIZE)\n if fc_conv_layer:\n fc_conv = slim.layers.conv2d(hidden4, FC_LAYER_SIZE, [8,8], stride=1, scope='fc_conv', padding='VALID', weights_initializer=initializer)\n hidden_repr = fc_conv\n else:\n hidden_repr = hidden4\n\n return hidden_repr\n\n\ndef decoder_model(hidden_repr, sequence_length, initializer, num_channels=3, scope='decoder', fc_conv_layer=False):\n \"\"\"\n Args:\n hidden_repr: Tensor of latent space representation\n sequence_length: number of frames that shall be decoded from the hidden_repr\n num_channels: number of channels for generated frames\n initializer: specifies\n fc_conv_layer: adds an fc layer at the end of the decoder\n Returns:\n frame_gen: array of generated frames (Tensors)\n fc_conv_layer: indicates whether hidden_repr is 1x1xdepth tensor a and fully concolutional layer shall be added\n \"\"\"\n frame_gen = []\n\n lstm_state1, lstm_state2, lstm_state3, lstm_state4 = None, None, None, None\n\n assert (not fc_conv_layer) or (hidden_repr.get_shape()[1] == hidden_repr.get_shape()[2] == 1)\n\n for i in range(sequence_length):\n reuse = (i > 0) #reuse variables (recurrence) after first time step\n\n with tf.variable_scope(scope, reuse=reuse):\n\n #Fully Convolutional Layer (1x1xFC_LAYER_SIZE -> 8x8x16)\n if fc_conv_layer:\n fc_conv = slim.layers.conv2d_transpose(hidden_repr, 16, [8, 8], stride=1, scope='fc_conv', padding='VALID', weights_initializer=initializer)\n hidden1_input = fc_conv\n else:\n hidden1_input = hidden_repr\n\n #LAYER 1: convLSTM1\n hidden1, lstm_state1 = basic_conv_lstm_cell(hidden1_input, lstm_state1, 16, initializer, filter_size=3, scope='convlstm1')\n hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm1')\n\n #LAYER 2: upconv1 (8x8 -> 16x16)\n upconv1 = slim.layers.conv2d_transpose(hidden1, hidden1.get_shape()[3], 3, stride=2, scope='upconv1', weights_initializer=initializer,\n normalizer_fn=tf_layers.layer_norm,\n normalizer_params={'scope': 'layer_norm2'})\n\n #LAYER 3: convLSTM2\n hidden2, lstm_state2 = basic_conv_lstm_cell(upconv1, lstm_state2, 16, initializer, filter_size=3, scope='convlstm2')\n hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3')\n\n #LAYER 4: upconv2 (16x16 -> 32x32)\n upconv2 = slim.layers.conv2d_transpose(hidden2, hidden2.get_shape()[3], 3, stride=2, scope='upconv2', weights_initializer=initializer,\n normalizer_fn=tf_layers.layer_norm,\n normalizer_params={'scope': 'layer_norm4'})\n\n #LAYER 5: convLSTM3\n hidden3, lstm_state3 = basic_conv_lstm_cell(upconv2, lstm_state3, 16, initializer, filter_size=5, scope='convlstm3')\n hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm5')\n\n # LAYER 6: upconv3 (32x32 -> 64x64)\n upconv3 = slim.layers.conv2d_transpose(hidden3, hidden3.get_shape()[3], 5, stride=2, scope='upconv3', weights_initializer=initializer,\n normalizer_fn=tf_layers.layer_norm,\n normalizer_params={'scope': 'layer_norm6'})\n\n #LAYER 7: convLSTM4\n hidden4, lstm_state4 = basic_conv_lstm_cell(upconv3, lstm_state4, 16, initializer, filter_size=5, scope='convlstm4')\n hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm7')\n\n #Layer 8: upconv4 (64x64 -> 128x128)\n upconv4 = slim.layers.conv2d_transpose(hidden4, num_channels, 5, stride=2, scope='upconv4', weights_initializer=initializer)\n\n frame_gen.append(upconv4)\n\n assert len(frame_gen)==sequence_length\n return frame_gen\n\n\ndef composite_model(frames, encoder_len=5, decoder_future_len=5, decoder_reconst_len=5, uniform_init=True, num_channels=3, fc_conv_layer=False):\n \"\"\"\n Args:\n frames: 5D array of batch with videos - shape(batch_size, num_frames, frame_width, frame_higth, num_channels)\n encoder_len: number of frames that shall be encoded\n decoder_future_sequence_length: number of frames that shall be decoded from the hidden_repr\n num_channels: number of channels for generated frames\n fc_conv_layer: indicates whether fully connected layer shall be added between encoder and decoder\n uniform_init: specifies if the weight initialization should be drawn from gaussian or uniform distribution (default:uniform)\n Returns:\n frame_gen: array of generated frames (Tensors)\n \"\"\"\n assert all([len > 0 for len in [encoder_len, decoder_future_len, decoder_reconst_len]])\n initializer = tf_layers.xavier_initializer(uniform=uniform_init)\n\n hidden_repr = encoder_model(frames, encoder_len, initializer, fc_conv_layer=fc_conv_layer)\n frames_pred = decoder_model(hidden_repr, decoder_future_len, initializer, num_channels=num_channels,\n scope='decoder_pred', fc_conv_layer=fc_conv_layer)\n frames_reconst = decoder_model(hidden_repr, decoder_reconst_len, initializer, num_channels=num_channels,\n scope='decoder_reconst', fc_conv_layer=fc_conv_layer)\n return frames_pred, frames_reconst, hidden_repr" ]
[ [ "tensorflow.contrib.slim.layers.conv2d", "tensorflow.contrib.slim.layers.conv2d_transpose", "tensorflow.contrib.layers.python.layers.layer_norm", "tensorflow.variable_scope", "tensorflow.contrib.layers.python.layers.xavier_initializer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
thomasjpfan/d3m_estimator_to_primitive
[ "f88b5ee9458634ba66456e9febcaf0042846b882" ]
[ "xgboost_wrap/tests/test_D3M_XGBClassifier.py" ]
[ "import unittest\nimport pickle\n\nfrom xgboost_wrap import D3M_XGBClassifier\nfrom pathlib import Path\nfrom d3m.metadata import base as metadata_base\nfrom d3m import container\nfrom d3m.primitive_interfaces.base import PrimitiveBase\nfrom d3m.exceptions import PrimitiveNotFittedError\nfrom pandas.testing import assert_frame_equal\n\nfrom sklearn_wrap.vendor.common_primitives import dataset_to_dataframe, column_parser\n\ndataset_doc_path = (\n Path(__file__).parent.parent\n / \"tests-data\"\n / \"datasets\"\n / \"iris_dataset_1\"\n / \"datasetDoc.json\"\n)\ndataset_doc_path = dataset_doc_path.resolve()\n\ndataset = container.Dataset.load(dataset_uri=\"file://{}\".format(dataset_doc_path))\nhyperparams_class = dataset_to_dataframe.DatasetToDataFramePrimitive.metadata.query()[\n \"primitive_code\"\n][\"class_type_arguments\"][\"Hyperparams\"]\nprimitive = dataset_to_dataframe.DatasetToDataFramePrimitive(\n hyperparams=hyperparams_class.defaults()\n)\ncall_metadata = primitive.produce(inputs=dataset)\n\ndataframe = call_metadata.value\ncolumn_parser_htperparams = column_parser.Hyperparams.defaults()\ncolumn_parser_primitive = column_parser.ColumnParserPrimitive(\n hyperparams=column_parser_htperparams\n)\nparsed_dataframe = column_parser_primitive.produce(inputs=dataframe).value\nparsed_dataframe.metadata = parsed_dataframe.metadata.add_semantic_type(\n (metadata_base.ALL_ELEMENTS, 5),\n \"https://metadata.datadrivendiscovery.org/types/Target\",\n)\nparsed_dataframe.metadata = parsed_dataframe.metadata.add_semantic_type(\n (metadata_base.ALL_ELEMENTS, 5),\n \"https://metadata.datadrivendiscovery.org/types/TrueTarget\",\n)\nparsed_dataframe.metadata = parsed_dataframe.metadata.remove_semantic_type(\n (metadata_base.ALL_ELEMENTS, 5),\n \"https://metadata.datadrivendiscovery.org/types/Attribute\",\n)\nparsed_dataframe.metadata = parsed_dataframe.metadata.add_semantic_type(\n (metadata_base.ALL_ELEMENTS, 5),\n \"https://metadata.datadrivendiscovery.org/types/CategoricalData\",\n)\n\ntrain_set = targets = parsed_dataframe\n\n\nsemantic_types_to_remove = set(\n [\n \"https://metadata.datadrivendiscovery.org/types/TrueTarget\",\n \"https://metadata.datadrivendiscovery.org/types/SuggestedTarget\",\n ]\n)\nsemantic_types_to_add = set(\n [\"https://metadata.datadrivendiscovery.org/types/PredictedTarget\"]\n)\n\n# We want to test the running of the code without errors and not the correctness of it\n# since that is assumed to be tested by sklearn\n\n\nclass TestD3M_XGBClassifier(unittest.TestCase):\n def create_learner(self, hyperparams):\n clf = D3M_XGBClassifier.D3M_XGBClassifier(hyperparams=hyperparams)\n return clf\n\n def set_training_data_on_learner(self, learner, **args):\n learner.set_training_data(**args)\n\n def fit_learner(self, learner: PrimitiveBase):\n learner.fit()\n\n def produce_learner(self, learner, **args):\n return learner.produce(**args)\n\n def basic_fit(self, hyperparams):\n learner = self.create_learner(hyperparams)\n training_data_args = self.set_data(hyperparams)\n self.set_training_data_on_learner(learner, **training_data_args)\n\n self.assertRaises(\n PrimitiveNotFittedError,\n learner.produce,\n inputs=training_data_args.get(\"inputs\"),\n )\n\n self.fit_learner(learner)\n\n assert len(learner._training_indices) > 0\n\n output = self.produce_learner(learner, inputs=training_data_args.get(\"inputs\"))\n return output, learner, training_data_args\n\n def pickle(self, hyperparams):\n output, learner, training_data_args = self.basic_fit(hyperparams)\n\n # Testing get_params() and set_params()\n params = learner.get_params()\n learner.set_params(params=params)\n\n model = pickle.dumps(learner)\n new_clf = pickle.loads(model)\n new_output = new_clf.produce(inputs=training_data_args.get(\"inputs\"))\n\n assert_frame_equal(new_output.value, output.value)\n\n def set_data(self, hyperparams):\n hyperparams = hyperparams.get(\"use_semantic_types\")\n if hyperparams:\n return {\"inputs\": train_set, \"outputs\": targets}\n else:\n return {\n \"inputs\": parsed_dataframe.select_columns([1, 2, 3, 4]),\n \"outputs\": parsed_dataframe.select_columns([5]),\n }\n\n def get_transformed_indices(self, learner):\n return learner._target_column_indices\n\n def new_return_checker(self, output, indices):\n input_target = train_set.select_columns(list(indices))\n for i in range(len(output.columns)):\n input_semantic_types = input_target.metadata.query(\n (metadata_base.ALL_ELEMENTS, i)\n ).get(\"semantic_types\")\n output_semantic_type = set(\n output.metadata.query((metadata_base.ALL_ELEMENTS, i)).get(\n \"semantic_types\"\n )\n )\n transformed_input_semantic_types = (\n set(input_semantic_types) - semantic_types_to_remove\n )\n transformed_input_semantic_types = transformed_input_semantic_types.union(\n semantic_types_to_add\n )\n assert output_semantic_type == transformed_input_semantic_types\n\n def append_return_checker(self, output, indices):\n for i in range(len(train_set.columns)):\n input_semantic_types = set(\n train_set.metadata.query((metadata_base.ALL_ELEMENTS, i)).get(\n \"semantic_types\"\n )\n )\n output_semantic_type = set(\n output.value.metadata.query((metadata_base.ALL_ELEMENTS, i)).get(\n \"semantic_types\"\n )\n )\n assert output_semantic_type == input_semantic_types\n\n self.new_return_checker(\n output.value.select_columns(\n list(range(len(train_set.columns), len(output.value.columns)))\n ),\n indices,\n )\n\n def replace_return_checker(self, output, indices):\n for i in range(len(train_set.columns)):\n if i in indices:\n continue\n input_semantic_types = set(\n train_set.metadata.query((metadata_base.ALL_ELEMENTS, i)).get(\n \"semantic_types\"\n )\n )\n output_semantic_type = set(\n output.value.metadata.query((metadata_base.ALL_ELEMENTS, i)).get(\n \"semantic_types\"\n )\n )\n assert output_semantic_type == input_semantic_types\n\n self.new_return_checker(output.value.select_columns(list(indices)), indices)\n\n def test_with_semantic_types(self):\n hyperparams = D3M_XGBClassifier.Hyperparams.defaults().replace(\n {\"use_semantic_types\": True}\n )\n self.pickle(hyperparams)\n\n def test_without_semantic_types(self):\n hyperparams = D3M_XGBClassifier.Hyperparams.defaults()\n self.pickle(hyperparams)\n\n def test_with_new_return_result(self):\n hyperparams = D3M_XGBClassifier.Hyperparams.defaults().replace(\n {\"return_result\": \"new\", \"use_semantic_types\": True}\n )\n output, clf, _ = self.basic_fit(hyperparams)\n indices = self.get_transformed_indices(clf)\n self.new_return_checker(output.value, indices)\n\n def test_with_append_return_result(self):\n hyperparams = D3M_XGBClassifier.Hyperparams.defaults().replace(\n {\"return_result\": \"append\", \"use_semantic_types\": True}\n )\n output, clf, _ = self.basic_fit(hyperparams)\n indices = self.get_transformed_indices(clf)\n self.append_return_checker(output, indices)\n\n def test_with_replace_return_result(self):\n hyperparams = D3M_XGBClassifier.Hyperparams.defaults().replace(\n {\"return_result\": \"replace\", \"use_semantic_types\": True}\n )\n output, clf, _ = self.basic_fit(hyperparams)\n indices = self.get_transformed_indices(clf)\n self.replace_return_checker(output, indices)\n\n def test_produce_methods(self):\n hyperparams = D3M_XGBClassifier.Hyperparams.defaults()\n output, clf, _ = self.basic_fit(hyperparams)\n list_of_methods = [\n \"produce_cluster_centers\",\n \"produce_feature_importances\",\n \"produce_support\",\n ]\n for method in list_of_methods:\n produce_method = getattr(clf, method, None)\n if produce_method:\n produce_method()\n\n def test_target_column_name(self):\n hyperparams = D3M_XGBClassifier.Hyperparams.defaults().replace(\n {\"return_result\": \"replace\", \"use_semantic_types\": True}\n )\n output, clf, _ = self.basic_fit(hyperparams)\n\n predicted_target_column_list = (\n output.value.metadata.get_columns_with_semantic_type(\n \"https://metadata.datadrivendiscovery.org/types/PredictedTarget\"\n )\n )\n input_true_target_column_list = (\n parsed_dataframe.metadata.get_columns_with_semantic_type(\n \"https://metadata.datadrivendiscovery.org/types/TrueTarget\"\n )\n )\n # Test if metadata was copied correctly\n predicted_target_column_metadata = output.value.metadata.select_columns(\n predicted_target_column_list\n )\n input_true_target_column_metadata = parsed_dataframe.metadata.select_columns(\n input_true_target_column_list\n )\n\n if len(predicted_target_column_list) == 1:\n # Checking that the predicted target name matches the input target\n predicted_name = predicted_target_column_metadata.query(\n (metadata_base.ALL_ELEMENTS,)\n ).get(\"name\")\n input_true_target_name = input_true_target_column_metadata.query(\n (metadata_base.ALL_ELEMENTS,)\n ).get(\"name\")\n assert predicted_name == input_true_target_name\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "pandas.testing.assert_frame_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]