repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
cta-sst-1m/ctapipe
[ "10b058f8dcc166177d1eb5b2af638ca37722a021", "10b058f8dcc166177d1eb5b2af638ca37722a021" ]
[ "ctapipe/calib/camera/pedestals.py", "examples/display_event.py" ]
[ "\"\"\"\nFactory for the estimation of the flat field coefficients\n\"\"\"\n\nfrom abc import abstractmethod\nimport numpy as np\nfrom astropy import units as u\nfrom ctapipe.core import Component\n\n\nfrom ctapipe.image.extractor import ImageExtractor\nfrom ctapipe.core.traits import Int, Unicode, List\n\n__all__ = [\n 'calc_pedestals_from_traces',\n 'PedestalCalculator',\n 'PedestalIntegrator'\n]\n\n\ndef calc_pedestals_from_traces(traces, start_sample, end_sample):\n \"\"\"A very simple algorithm to calculates pedestals and pedestal\n variances from camera traces by integrating the samples over a\n fixed window for all pixels. This assumes that the data are\n sample-mode (e.g. cameras that return time traces for each pixel).\n\n Parameters\n ----------\n\n traces: array of shape (n_pixels, n_samples)\n time-sampled camera data in a 2D array pixel x sample\n start_sample: int\n index of starting sample over which to integrate\n end_sample: int\n index of ending sample over which to integrate\n\n Returns\n -------\n\n two arrays of length n_pix (the first dimension of the input trace\n array). The first array contains the pedestal values, and the\n second is the pedestal variances over the sample window.\n\n \"\"\"\n traces = np.asanyarray(traces) # ensure this is an ndarray\n peds = traces[:, start_sample:end_sample].mean(axis=1)\n pedvars = traces[:, start_sample:end_sample].var(axis=1)\n return peds, pedvars\n\n\nclass PedestalCalculator(Component):\n \"\"\"\n Parent class for the pedestal calculators.\n Fills the MonitoringCameraContainer.PedestalContainer on the base of a given pedestal sample.\n The sample is defined by a maximal interval of time (sample_duration) or a\n minimal number of events (sample_duration).\n The calculator is supposed to act in an event loop, extract and collect the\n event charge and fill the PedestalContainer\n\n Parameters\n ----------\n tel_id : int\n id of the telescope (default 0)\n sample_duration : int\n interval of time (s) used to gather the pedestal statistics\n sample_size : int\n number of pedestal events requested for the statistics\n n_channels : int\n number of waveform channel to be considered\n charge_product : str\n Name of the charge extractor to be used\n config : traitlets.loader.Config\n Configuration specified by config file or cmdline arguments.\n Used to set traitlet values.\n Set to None if no configuration to pass.\n\n kwargs\n\n\"\"\"\n\n tel_id = Int(\n 0,\n help='id of the telescope to calculate the pedestal values'\n ).tag(config=True)\n sample_duration = Int(\n 60,\n help='sample duration in seconds'\n ).tag(config=True)\n sample_size = Int(\n 10000,\n help='sample size'\n ).tag(config=True)\n n_channels = Int(\n 2,\n help='number of channels to be treated'\n ).tag(config=True)\n charge_product = Unicode(\n 'FixedWindowSum',\n help='Name of the charge extractor to be used'\n ).tag(config=True)\n\n def __init__(self, subarray, **kwargs):\n \"\"\"\n Parent class for the pedestal calculators.\n Fills the MonitoringCameraContainer.PedestalContainer on the base of a given pedestal sample.\n The sample is defined by a maximal interval of time (sample_duration) or a\n minimal number of events (sample_duration).\n The calculator is supposed to act in an event loop, extract and collect the\n event charge and fill the PedestalContainer\n\n Parameters\n ----------\n subarray: ctapipe.instrument.SubarrayDescription\n Description of the subarray\n tel_id : int\n id of the telescope (default 0)\n sample_duration : int\n interval of time (s) used to gather the pedestal statistics\n sample_size : int\n number of pedestal events requested for the statistics\n n_channels : int\n number of waveform channel to be considered\n charge_product : str\n Name of the charge extractor to be used\n config : traitlets.loader.Config\n Configuration specified by config file or cmdline arguments.\n Used to set traitlet values.\n Set to None if no configuration to pass.\n\n kwargs\n\n \"\"\"\n\n super().__init__(**kwargs)\n\n # load the waveform charge extractor\n self.extractor = ImageExtractor.from_name(\n self.charge_product,\n config=self.config,\n subarray=subarray,\n )\n self.log.info(f\"extractor {self.extractor}\")\n\n @abstractmethod\n def calculate_pedestals(self, event):\n \"\"\"\n Calculate the pedestal statistics and fill the\n mon.tel[tel_id].pedestal container\n\n Parameters\n ----------\n event: general DataContainer\n\n Returns: True if the mon.tel[tel_id].pedestal is updated,\n False otherwise\n\n \"\"\"\n\n\nclass PedestalIntegrator(PedestalCalculator):\n \"\"\"Calculates pedestal parameters integrating the charge of pedestal events:\n the pedestal value corresponds to the charge estimated with the selected\n charge extractor\n The pixels are set as outliers on the base of a cut on the pixel charge median\n over the pedestal sample and the pixel charge standard deviation over\n the pedestal sample with respect to the camera median values\n\n\n Parameters:\n ----------\n charge_median_cut_outliers : List[2]\n Interval (number of std) of accepted charge values around camera median value\n charge_std_cut_outliers : List[2]\n Interval (number of std) of accepted charge standard deviation around camera median value\n\n \"\"\"\n charge_median_cut_outliers = List(\n [-3, 3],\n help='Interval (number of std) of accepted charge values around camera median value'\n ).tag(config=True)\n charge_std_cut_outliers = List(\n [-3, 3],\n help='Interval (number of std) of accepted charge standard deviation around camera median value'\n ).tag(config=True)\n\n def __init__(self, **kwargs):\n \"\"\"Calculates pedestal parameters integrating the charge of pedestal events:\n the pedestal value corresponds to the charge estimated with the selected\n charge extractor\n The pixels are set as outliers on the base of a cut on the pixel charge median\n over the pedestal sample and the pixel charge standard deviation over\n the pedestal sample with respect to the camera median values\n\n\n Parameters:\n ----------\n charge_median_cut_outliers : List[2]\n Interval (number of std) of accepted charge values around camera median value\n charge_std_cut_outliers : List[2]\n Interval (number of std) of accepted charge standard deviation around camera median value\n \"\"\"\n\n super().__init__(**kwargs)\n\n self.log.info(\"Used events statistics : %d\", self.sample_size)\n\n # members to keep state in calculate_relative_gain()\n self.num_events_seen = 0\n self.time_start = None # trigger time of first event in sample\n self.charge_medians = None # med. charge in camera per event in sample\n self.charges = None # charge per event in sample\n self.sample_masked_pixels = None # pixels tp be masked per event in sample\n\n def _extract_charge(self, event):\n \"\"\"\n Extract the charge and the time from a pedestal event\n\n Parameters\n ----------\n\n event : general event container\n\n \"\"\"\n\n waveforms = event.r1.tel[self.tel_id].waveform\n selected_gain_channel = event.r1.tel[self.tel_id].selected_gain_channel\n\n # Extract charge and time\n charge = 0\n peak_pos = 0\n if self.extractor:\n charge, peak_pos = self.extractor(\n waveforms, self.tel_id, selected_gain_channel\n )\n\n return charge, peak_pos\n\n def calculate_pedestals(self, event):\n \"\"\"\n calculate the pedestal statistical values from\n the charge extracted from pedestal events\n and fill the mon.tel[tel_id].pedestal container\n\n Parameters\n ----------\n event : general event container\n\n \"\"\"\n # initialize the np array at each cycle\n waveform = event.r1.tel[self.tel_id].waveform\n container = event.mon.tel[self.tel_id].pedestal\n\n # re-initialize counter\n if self.num_events_seen == self.sample_size:\n self.num_events_seen = 0\n\n # real data\n if event.meta['origin'] != 'hessio':\n\n trigger_time = event.r1.tel[self.tel_id].trigger_time\n pixel_mask = event.mon.tel[self.tel_id].pixel_status.hardware_failing_pixels\n\n else: # patches for MC data\n\n if event.trig.tels_with_trigger:\n trigger_time = event.trig.gps_time.unix\n else:\n trigger_time = 0\n\n pixel_mask = np.zeros(waveform.shape[1], dtype=bool)\n\n if self.num_events_seen == 0:\n self.time_start = trigger_time\n self.setup_sample_buffers(waveform, self.sample_size)\n\n # extract the charge of the event and\n # the peak position (assumed as time for the moment)\n charge = self._extract_charge(event)[0]\n\n self.collect_sample(charge, pixel_mask)\n\n sample_age = trigger_time - self.time_start\n\n # check if to create a calibration event\n if (\n sample_age > self.sample_duration\n or self.num_events_seen == self.sample_size\n ):\n pedestal_results = calculate_pedestal_results(\n self,\n self.charges,\n self.sample_masked_pixels,\n )\n time_results = calculate_time_results(\n self.time_start,\n trigger_time,\n )\n\n result = {\n 'n_events': self.num_events_seen,\n **pedestal_results,\n **time_results,\n }\n for key, value in result.items():\n setattr(container, key, value)\n\n return True\n\n else:\n\n return False\n\n def setup_sample_buffers(self, waveform, sample_size):\n \"\"\"Initialize sample buffers\"\"\"\n\n n_channels = waveform.shape[0]\n n_pix = waveform.shape[1]\n shape = (sample_size, n_channels, n_pix)\n\n self.charge_medians = np.zeros((sample_size, n_channels))\n self.charges = np.zeros(shape)\n self.sample_masked_pixels = np.zeros(shape)\n\n def collect_sample(self, charge, pixel_mask):\n \"\"\"Collect the sample data\"\"\"\n\n good_charge = np.ma.array(charge, mask=pixel_mask)\n charge_median = np.ma.median(good_charge, axis=1)\n\n self.charges[self.num_events_seen] = charge\n self.sample_masked_pixels[self.num_events_seen] = pixel_mask\n self.charge_medians[self.num_events_seen] = charge_median\n self.num_events_seen += 1\n\n\ndef calculate_time_results(\n time_start,\n trigger_time,\n):\n \"\"\"Calculate and return the sample time\"\"\"\n return {\n # FIXME Why divided by two here?\n 'sample_time': u.Quantity((trigger_time - time_start) / 2, u.s),\n 'sample_time_min': u.Quantity(time_start, u.s),\n 'sample_time_max': u.Quantity(trigger_time, u.s),\n }\n\n\ndef calculate_pedestal_results(\n self,\n trace_integral,\n masked_pixels_of_sample,\n):\n \"\"\"Calculate and return the sample statistics\"\"\"\n masked_trace_integral = np.ma.array(\n trace_integral,\n mask=masked_pixels_of_sample\n )\n # median over the sample per pixel\n pixel_median = np.ma.median(masked_trace_integral, axis=0)\n\n # mean over the sample per pixel\n pixel_mean = np.ma.mean(masked_trace_integral, axis=0)\n\n # std over the sample per pixel\n pixel_std = np.ma.std(masked_trace_integral, axis=0)\n\n # median over the camera\n median_of_pixel_median = np.ma.median(pixel_median, axis=1)\n\n # std of median over the camera\n std_of_pixel_median = np.ma.std(pixel_median, axis=1)\n\n # median of the std over the camera\n median_of_pixel_std = np.ma.median(pixel_std, axis=1)\n\n # std of the std over camera\n std_of_pixel_std = np.ma.std(pixel_std, axis=1)\n\n # outliers from standard deviation\n deviation = pixel_std - median_of_pixel_std[:, np.newaxis]\n charge_std_outliers = (\n np.logical_or(deviation < self.charge_std_cut_outliers[0] * std_of_pixel_std[:,np.newaxis],\n deviation > self.charge_std_cut_outliers[1] * std_of_pixel_std[:,np.newaxis]))\n\n # outliers from median\n deviation = pixel_median - median_of_pixel_median[:, np.newaxis]\n charge_median_outliers = (\n np.logical_or(deviation < self.charge_median_cut_outliers[0] * std_of_pixel_median[:,np.newaxis],\n deviation > self.charge_median_cut_outliers[1] * std_of_pixel_median[:,np.newaxis]))\n\n return {\n 'charge_median': np.ma.getdata(pixel_median),\n 'charge_mean': np.ma.getdata(pixel_mean),\n 'charge_std': np.ma.getdata(pixel_std),\n 'charge_std_outliers': np.ma.getdata(charge_std_outliers),\n 'charge_median_outliers': np.ma.getdata(charge_median_outliers)\n }\n\n\n", "#!/usr/bin/env python3\n\n# run this example with:\n#\n# python display_event.py <filename>\n#\n# if no filename is given, a default example file will be used\n# containing ~10 events\n\nimport logging\nimport random\nimport sys\n\nfrom matplotlib import pyplot as plt\nfrom numpy import ceil, sqrt\n\nfrom ctapipe.io import event_source\nfrom ctapipe.utils.datasets import get_dataset_path\nfrom ctapipe.visualization import CameraDisplay\n\nlogging.basicConfig(level=logging.DEBUG)\n\nfig = plt.figure(figsize=(12, 8))\ncmaps = [\n plt.cm.jet, plt.cm.winter, plt.cm.ocean, plt.cm.bone, plt.cm.gist_earth,\n plt.cm.hot, plt.cm.cool, plt.cm.coolwarm\n]\n\n\ndef display_event(event, subarray):\n \"\"\"an extremely inefficient display. It creates new instances of\n CameraDisplay for every event and every camera, and also new axes\n for each event. It's hacked, but it works\n \"\"\"\n print(\"Displaying... please wait (this is an inefficient implementation)\")\n global fig\n ntels = len(event.r0.tels_with_data)\n fig.clear()\n\n plt.suptitle(f\"EVENT {event.index.event_id}\")\n\n disps = []\n\n for ii, tel_id in enumerate(event.r0.tels_with_data):\n print(f\"\\t draw cam {tel_id}...\")\n nn = int(ceil(sqrt(ntels)))\n ax = plt.subplot(nn, nn, ii + 1)\n\n geom = subarray.tel[tel_id].camera.geometry\n disp = CameraDisplay(geom, ax=ax, title=f\"CT{tel_id}\")\n disp.pixels.set_antialiaseds(False)\n disp.autoupdate = False\n disp.cmap = random.choice(cmaps)\n chan = 0\n signals = event.r0.tel[tel_id].image[chan].astype(float)\n signals -= signals.mean()\n disp.image = signals\n disp.set_limits_percent(95)\n disp.add_colorbar()\n disps.append(disp)\n\n return disps\n\n\ndef get_input():\n print(\"============================================\")\n print(\"n or [enter] - go to Next event\")\n print(\"d - Display the event\")\n print(\"p - Print all event data\")\n print(\"i - event Info\")\n print(\"s - save event image\")\n print(\"q - Quit\")\n return input(\"Choice: \")\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) > 1:\n filename = sys.argv.pop(1)\n else:\n filename = get_dataset_path(\"gamma_test_large.simtel.gz\")\n\n plt.style.use(\"ggplot\")\n plt.show(block=False)\n\n # loop over events and display menu at each event:\n source = event_source(filename)\n subarray = source.subarray\n\n for event in source:\n\n print(\n \"EVENT_ID: \", event.index.event_id, \"TELS: \", event.r0.tels_with_data,\n \"MC Energy:\", event.mc.energy\n )\n\n while True:\n response = get_input()\n if response.startswith(\"d\"):\n disps = display_event(event, source.subarray)\n plt.pause(0.1)\n elif response.startswith(\"p\"):\n print(\"--event-------------------\")\n print(event)\n print(\"--event.r0---------------\")\n print(event.r0)\n print(\"--event.mc----------------\")\n print(event.mc)\n print(\"--event.r0.tel-----------\")\n for teldata in event.r0.tel.values():\n print(teldata)\n elif response == \"\" or response.startswith(\"n\"):\n break\n elif response.startswith('i'):\n for tel_id in sorted(event.r0.tel):\n for chan in event.r0.tel[tel_id].waveform:\n npix = len(subarray.tel[tel_id].camera.geometry.pix_x)\n nsamp = event.r0.tel[tel_id].num_samples\n print(\n \"CT{:4d} ch{} pixels,samples:{}\"\n .format(tel_id, chan, npix, nsamp)\n )\n elif response.startswith('s'):\n filename = f\"event_{event.index.event_id:010d}.png\"\n print(\"Saving to\", filename)\n plt.savefig(filename)\n\n elif response.startswith('q'):\n break\n\n if response.startswith('q'):\n break\n" ]
[ [ "numpy.ma.getdata", "numpy.ma.std", "numpy.ma.median", "numpy.logical_or", "numpy.asanyarray", "numpy.ma.mean", "numpy.ma.array", "numpy.zeros" ], [ "numpy.sqrt", "matplotlib.pyplot.pause", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplot", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.style.use", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
1maginasian/PaddleHub
[ "7e680db4b2750a3d0b63d64c10d457586f473aa9" ]
[ "paddlehub/datasets/base_nlp_dataset.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Dict, List, Optional, Union, Tuple\nimport csv\nimport io\nimport os\n\nimport numpy as np\nimport paddle\n\nfrom paddlehub.env import DATA_HOME\nfrom paddlehub.text.bert_tokenizer import BertTokenizer\nfrom paddlehub.text.tokenizer import CustomTokenizer\nfrom paddlehub.utils.log import logger\nfrom paddlehub.utils.utils import download\nfrom paddlehub.utils.xarfile import is_xarfile, unarchive\n\n\nclass InputExample(object):\n \"\"\"\n The input data structure of Transformer modules (BERT, ERNIE and so on).\n \"\"\"\n\n def __init__(self, guid: int, text_a: str, text_b: Optional[str] = None, label: Optional[str] = None):\n \"\"\"\n The input data structure.\n Args:\n guid (:obj:`int`):\n Unique id for the input data.\n text_a (:obj:`str`, `optional`, defaults to :obj:`None`):\n The first sequence. For single sequence tasks, only this sequence must be specified.\n text_b (:obj:`str`, `optional`, defaults to :obj:`None`):\n The second sequence if sentence-pair.\n label (:obj:`str`, `optional`, defaults to :obj:`None`):\n The label of the example.\n Examples:\n .. code-block:: python\n from paddlehub.datasets.base_nlp_dataset import InputExample\n example = InputExample(guid=0,\n text_a='15.4寸笔记本的键盘确实爽,基本跟台式机差不多了',\n text_b='蛮喜欢数字小键盘,输数字特方便,样子也很美观,做工也相当不错',\n label='1')\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n def __str__(self):\n if self.text_b is None:\n return \"text={}\\tlabel={}\".format(self.text_a, self.label)\n else:\n return \"text_a={}\\ttext_b={},label={}\".format(self.text_a, self.text_b, self.label)\n\n\nclass BaseNLPDataset(object):\n \"\"\"\n The virtual base class for nlp datasets, such TextClassificationDataset, SeqLabelingDataset, and so on.\n The base class must be supered and re-implemented the method _read_file.\n \"\"\"\n\n def __init__(self,\n base_path: str,\n tokenizer: Union[BertTokenizer, CustomTokenizer],\n max_seq_len: Optional[int] = 128,\n mode: Optional[str] = \"train\",\n data_file: Optional[str] = None,\n label_file: Optional[str] = None,\n label_list: Optional[List[str]] = None):\n \"\"\"\n Ags:\n base_path (:obj:`str`): The directory to the whole dataset.\n tokenizer (:obj:`BertTokenizer` or :obj:`CustomTokenizer`):\n It tokenizes the text and encodes the data as model needed.\n max_seq_len (:obj:`int`, `optional`, defaults to :128):\n If set to a number, will limit the total sequence returned so that it has a maximum length.\n mode (:obj:`str`, `optional`, defaults to `train`):\n It identifies the dataset mode (train, test or dev).\n data_file(:obj:`str`, `optional`, defaults to :obj:`None`):\n The data file name, which is relative to the base_path.\n label_file(:obj:`str`, `optional`, defaults to :obj:`None`):\n The label file name, which is relative to the base_path.\n It is all labels of the dataset, one line one label.\n label_list(:obj:`List[str]`, `optional`, defaults to :obj:`None`):\n The list of all labels of the dataset\n \"\"\"\n self.data_file = os.path.join(base_path, data_file)\n self.label_list = label_list\n\n self.mode = mode\n self.tokenizer = tokenizer\n self.max_seq_len = max_seq_len\n\n if label_file:\n self.label_file = os.path.join(base_path, label_file)\n if not self.label_list:\n self.label_list = self._load_label_data()\n else:\n logger.warning(\"As label_list has been assigned, label_file is noneffective\")\n if self.label_list:\n self.label_map = {item: index for index, item in enumerate(self.label_list)}\n\n def _load_label_data(self):\n \"\"\"\n Loads labels from label file.\n \"\"\"\n if os.path.exists(self.label_file):\n with open(self.label_file, \"r\", encoding=\"utf8\") as f:\n return f.read().strip().split(\"\\n\")\n else:\n raise RuntimeError(\"The file {} is not found.\".format(self.label_file))\n\n def _download_and_uncompress_dataset(self, destination: str, url: str):\n \"\"\"\n Downloads dataset and uncompresses it.\n Args:\n destination (:obj:`str`): The dataset cached directory.\n url (:obj: str): The link to be downloaded a dataset.\n \"\"\"\n if not os.path.exists(destination):\n dataset_package = download(url=url, path=DATA_HOME)\n if is_xarfile(dataset_package):\n unarchive(dataset_package, DATA_HOME)\n else:\n logger.info(\"Dataset {} already cached.\".format(destination))\n\n def _read_file(self, input_file: str, is_file_with_header: bool = False):\n \"\"\"\n Reads the files.\n Args:\n input_file (:obj:str) : The file to be read.\n is_file_with_header(:obj:bool, `optional`, default to :obj: False) :\n Whether or not the file is with the header introduction.\n \"\"\"\n raise NotImplementedError\n\n def get_labels(self):\n \"\"\"\n Gets all labels.\n \"\"\"\n return self.label_list\n\n\nclass TextClassificationDataset(BaseNLPDataset, paddle.io.Dataset):\n \"\"\"\n The dataset class which is fit for all datatset of text classification.\n \"\"\"\n\n def __init__(self,\n base_path: str,\n tokenizer: Union[BertTokenizer, CustomTokenizer],\n max_seq_len: int = 128,\n mode: str = \"train\",\n data_file: str = None,\n label_file: str = None,\n label_list: list = None,\n is_file_with_header: bool = False):\n \"\"\"\n Ags:\n base_path (:obj:`str`): The directory to the whole dataset.\n tokenizer (:obj:`BertTokenizer` or :obj:`CustomTokenizer`):\n It tokenizes the text and encodes the data as model needed.\n max_seq_len (:obj:`int`, `optional`, defaults to :128):\n If set to a number, will limit the total sequence returned so that it has a maximum length.\n mode (:obj:`str`, `optional`, defaults to `train`):\n It identifies the dataset mode (train, test or dev).\n data_file(:obj:`str`, `optional`, defaults to :obj:`None`):\n The data file name, which is relative to the base_path.\n label_file(:obj:`str`, `optional`, defaults to :obj:`None`):\n The label file name, which is relative to the base_path.\n It is all labels of the dataset, one line one label.\n label_list(:obj:`List[str]`, `optional`, defaults to :obj:`None`):\n The list of all labels of the dataset\n is_file_with_header(:obj:bool, `optional`, default to :obj: False) :\n Whether or not the file is with the header introduction.\n \"\"\"\n super(TextClassificationDataset, self).__init__(\n base_path=base_path,\n tokenizer=tokenizer,\n max_seq_len=max_seq_len,\n mode=mode,\n data_file=data_file,\n label_file=label_file,\n label_list=label_list)\n self.examples = self._read_file(self.data_file, is_file_with_header)\n\n self.records = self._convert_examples_to_records(self.examples)\n\n def _read_file(self, input_file, is_file_with_header: bool = False) -> List[InputExample]:\n \"\"\"\n Reads a tab separated value file.\n Args:\n input_file (:obj:str) : The file to be read.\n is_file_with_header(:obj:bool, `optional`, default to :obj: False) :\n Whether or not the file is with the header introduction.\n Returns:\n examples (:obj:`List[InputExample]`): All the input data.\n \"\"\"\n if not os.path.exists(input_file):\n raise RuntimeError(\"The file {} is not found.\".format(input_file))\n else:\n with io.open(input_file, \"r\", encoding=\"UTF-8\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=None)\n examples = []\n seq_id = 0\n header = next(reader) if is_file_with_header else None\n for line in reader:\n example = InputExample(guid=seq_id, label=line[0], text_a=line[1])\n seq_id += 1\n examples.append(example)\n return examples\n\n def _convert_examples_to_records(self, examples: List[InputExample]) -> List[dict]:\n \"\"\"\n Converts all examples to records which the model needs.\n Args:\n examples(obj:`List[InputExample]`): All data examples returned by _read_file.\n Returns:\n records(:obj:`List[dict]`): All records which the model needs.\n \"\"\"\n records = []\n for example in examples:\n record = self.tokenizer.encode(text=example.text_a, text_pair=example.text_b, max_seq_len=self.max_seq_len)\n # CustomTokenizer will tokenize the text firstly and then lookup words in the vocab\n # When all words are not found in the vocab, the text will be dropped.\n if not record:\n logger.info(\n \"The text %s has been dropped as it has no words in the vocab after tokenization.\" % example.text_a)\n continue\n if example.label:\n record['label'] = self.label_map[example.label]\n records.append(record)\n return records\n\n def __getitem__(self, idx):\n record = self.records[idx]\n if 'label' in record.keys():\n return np.array(record['input_ids']), np.array(record['segment_ids']), np.array(record['label'], dtype=np.int64)\n else:\n return np.array(record['input_ids']), np.array(record['segment_ids'])\n\n def __len__(self):\n return len(self.records)\n\n\nclass SeqLabelingDataset(BaseNLPDataset, paddle.io.Dataset):\n def __init__(self,\n base_path: str,\n tokenizer: Union[BertTokenizer, CustomTokenizer],\n max_seq_len: int = 128,\n mode: str = \"train\",\n data_file: str = None,\n label_file: str = None,\n label_list: list = None,\n split_char: str =\"\\002\",\n no_entity_label: str = \"O\",\n ignore_label: int = -100,\n is_file_with_header: bool = False):\n super(SeqLabelingDataset, self).__init__(\n base_path=base_path,\n tokenizer=tokenizer,\n max_seq_len=max_seq_len,\n mode=mode,\n data_file=data_file,\n label_file=label_file,\n label_list=label_list)\n\n self.no_entity_label = no_entity_label\n self.split_char = split_char\n self.ignore_label = ignore_label\n\n self.examples = self._read_file(self.data_file, is_file_with_header)\n self.records = self._convert_examples_to_records(self.examples)\n\n def _read_file(self, input_file, is_file_with_header: bool = False) -> List[InputExample]:\n \"\"\"Reads a tab separated value file.\"\"\"\n if not os.path.exists(input_file):\n raise RuntimeError(\"The file {} is not found.\".format(input_file))\n else:\n with io.open(input_file, \"r\", encoding=\"UTF-8\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=None)\n examples = []\n seq_id = 0\n header = next(reader) if is_file_with_header else None\n for line in reader:\n example = InputExample(guid=seq_id, label=line[1], text_a=line[0])\n seq_id += 1\n examples.append(example)\n return examples\n\n def _convert_examples_to_records(self, examples: List[InputExample]) -> List[dict]:\n \"\"\"\n Returns a list[dict] including all the input information what the model need.\n Args:\n examples (list): the data examples, returned by _read_file.\n Returns:\n a list with all the examples record.\n \"\"\"\n records = []\n for example in examples:\n tokens, labels = self._reseg_token_label(\n tokens=example.text_a.split(self.split_char),\n labels=example.label.split(self.split_char))\n record = self.tokenizer.encode(\n text=tokens, max_seq_len=self.max_seq_len)\n # CustomTokenizer will tokenize the text firstly and then lookup words in the vocab\n # When all words are not found in the vocab, the text will be dropped.\n if not record:\n logger.info(\n \"The text %s has been dropped as it has no words in the vocab after tokenization.\"\n % example.text_a)\n continue\n if labels:\n record[\"label\"] = []\n tokens_with_specical_token = self.tokenizer.convert_ids_to_tokens(record['input_ids'])\n tokens_index = 0\n for token in tokens_with_specical_token:\n if tokens_index < len(\n tokens) and token == tokens[tokens_index]:\n record[\"label\"].append(\n self.label_list.index(labels[tokens_index]))\n tokens_index += 1\n elif token in [self.tokenizer.pad_token]:\n record[\"label\"].append(self.ignore_label) # label of special token\n else:\n record[\"label\"].append(\n self.label_list.index(self.no_entity_label))\n records.append(record)\n return records\n\n def _reseg_token_label(\n self, tokens: List[str], labels: List[str] = None) -> Tuple[List[str], List[str]] or List[str]:\n if labels:\n if len(tokens) != len(labels):\n raise ValueError(\n \"The length of tokens must be same with labels\")\n ret_tokens = []\n ret_labels = []\n for token, label in zip(tokens, labels):\n sub_token = self.tokenizer(token)\n if len(sub_token) == 0:\n continue\n ret_tokens.extend(sub_token)\n ret_labels.append(label)\n if len(sub_token) < 2:\n continue\n sub_label = label\n if label.startswith(\"B-\"):\n sub_label = \"I-\" + label[2:]\n ret_labels.extend([sub_label] * (len(sub_token) - 1))\n\n if len(ret_tokens) != len(ret_labels):\n raise ValueError(\n \"The length of ret_tokens can't match with labels\")\n return ret_tokens, ret_labels\n else:\n ret_tokens = []\n for token in tokens:\n sub_token = self.tokenizer(token)\n if len(sub_token) == 0:\n continue\n ret_tokens.extend(sub_token)\n if len(sub_token) < 2:\n continue\n return ret_tokens, None\n\n def __getitem__(self, idx):\n record = self.records[idx]\n if 'label' in record.keys():\n return np.array(record['input_ids']), np.array(record['segment_ids']), np.array(record['seq_len']), np.array(record['label'], dtype=np.int64)\n else:\n return np.array(record['input_ids']), np.array(record['segment_ids']), np.array(record['seq_len'])\n\n def __len__(self):\n return len(self.records)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ewen2015/GossipCat
[ "6792c2ddee16515d9724583c9b57f332cff4b206" ]
[ "gossipcat/graph/GraphFE.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nauthor: Ewen Wang\nemail: [email protected]\nlicense: Apache License 2.0\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport networkx as nx\nimport itertools \n\ndef link_pred_generator(function):\n def link_pred(graph, source, target):\n for u, v, p in function(graph, [(source, target)]):\n return p\n return link_pred\n\ndef hitting_time(nodelist, adj, source, target):\n hit_ind = (nodelist.index(source), nodelist.index(target))\n A = adj.copy()\n A[hit_ind[1],:] = 0\n A[hit_ind[1], hit_ind[1]] = 1\n A = (A.T/A.sum(axis=1)).T\n B = A.copy()\n prob = 0\n n = 0\n while prob < 0.99 and n < 100:\n prob = B[hit_ind]\n B = np.dot(B, A)\n n += 1\n return n\n \ndef edge(graph):\n edge_attr = []\n nodelist = list(graph)\n adj = nx.adj_matrix(graph)\n edge_attr = pd.DataFrame(list(itertools.combinations(list(graph.nodes.keys()), r=2)), columns=['source', 'target'])\n edge_attr['hitting_time'] = edge_attr.apply(lambda x: hitting_time(nodelist, adj, x[0], x[1]), axis=1)\n edge_attr['shortest_path_length'] = edge_attr.apply(lambda x: nx.shortest_path_length(graph, x[0], x[1]) if nx.has_path(graph, x[0], x[1]) else 0, axis=1)\n edge_attr['efficiency'] = edge_attr.apply(lambda x: nx.efficiency(graph, x[0], x[1]), axis=1)\n edge_attr['jaccard_coefficient'] = edge_attr.apply(lambda x: link_pred_generator(nx.jaccard_coefficient)(graph, x[0], x[1]), axis=1)\n edge_attr['resource_allocation_index'] = edge_attr.apply(lambda x: link_pred_generator(nx.resource_allocation_index)(graph, x[0], x[1]), axis=1)\n edge_attr['adamic_adar_index'] = edge_attr.apply(lambda x: link_pred_generator(nx.adamic_adar_index)(graph, x[0], x[1]), axis=1)\n edge_attr['preferential_attachment'] = edge_attr.apply(lambda x: link_pred_generator(nx.preferential_attachment)(graph, x[0], x[1]), axis=1)\n return edge_attr\n\n\nclass Attribute(object):\n \"\"\"Generate all node-based, edge-based, and graph-based attributes of all connected components in a whole graph.\n \"\"\"\n def __init__(self, graph):\n \"\"\"Initialize the class and generate graph attributes\"\"\"\n self.graphs = list(graph.subgraph(c) for c in nx.connected_components(graph))\n self.graph = self.graphs[0]\n if len(self.graphs)>1:\n self.all_attr = pd.DataFrame()\n print(\"Note: \"+str(len(self.graphs))+\" connected components are contained.\")\n\n self.graph_attr = pd.DataFrame()\n self.node_attr = pd.DataFrame()\n self.edge_attr = pd.DataFrame()\n self.pair_attr = pd.DataFrame()\n\n def _graph(self):\n \"\"\"Generate graph-based attributes.\"\"\"\n self.graph_attr['number_of_nodes'] = [nx.number_of_nodes(self.graph)]\n self.graph_attr['number_of_edges'] = [nx.number_of_edges(self.graph)]\n self.graph_attr['number_of_selfloops'] = [nx.number_of_selfloops(self.graph)]\n self.graph_attr['graph_number_of_cliques'] = [nx.graph_number_of_cliques(self.graph)]\n self.graph_attr['graph_clique_number'] = [nx.graph_clique_number(self.graph)]\n self.graph_attr['density'] = [nx.density(self.graph)]\n self.graph_attr['transitivity'] = [nx.transitivity(self.graph)]\n self.graph_attr['average_clustering'] = [nx.average_clustering(self.graph)]\n self.graph_attr['radius'] = [nx.radius(self.graph)]\n self.graph_attr['is_tree'] = [1 if nx.is_tree(self.graph) else 0]\n self.graph_attr['wiener_index'] = [nx.wiener_index(self.graph)]\n return self.graph_attr\n\n def _node(self):\n \"\"\"Generate node-based attributes.\"\"\"\n degree_cent = pd.DataFrame(list(nx.degree_centrality(self.graph).items()), columns=['node', 'degree_centrality'])\n closenessCent = pd.DataFrame(list(nx.closeness_centrality(self.graph).items()), columns=['node', 'closeness_centrality'])\n betweennessCent = pd.DataFrame(list(nx.betweenness_centrality(self.graph).items()), columns=['node', 'betweenness_centrality'])\n pagerank = pd.DataFrame(list(nx.pagerank(self.graph).items()), columns=['node', 'pagerank'])\n\n self.node_attr = degree_cent\n self.node_attr['closeness_centrality'] = closenessCent['closeness_centrality']\n self.node_attr['betweenness_centrality'] = betweennessCent['betweenness_centrality']\n self.node_attr['pagerank'] = pagerank['pagerank']\n return self.node_attr\n\n def _edge(self):\n \"\"\"Generate edge-based attributes.\"\"\"\n self.edge_attr = edge(self.graph)\n return self.edge_attr\n\n def sigTabular(self):\n \"\"\"Combine all node-based, edge-based, and graph-based attributes of a single connected component.\"\"\"\n self.node_attr = self._node(self.graph)\n self.edge_attr = self._edge(self.graph)\n self.graph_attr = self._graph(self.graph)\n self.pair_attr = self.edge_attr.merge(self.node_attr, how='left', left_on='source', right_on='node').merge(self.node_attr, how='left', left_on='target', right_on='node') \n self.pair_attr = self.pair_attr.drop(['node_x', 'node_y'], axis=1)\n\n graph_attr_l = ['number_of_nodes', 'number_of_edges', 'number_of_selfloops', 'graph_number_of_cliques', \n 'graph_clique_number', 'density', 'transitivity', 'average_clustering', 'radius', 'is_tree', 'wiener_index']\n for i in graph_attr_l:\n self.pair_attr[i] = self.graph_attr[i][0]\n return self.pair_attr\n\n def mulTabular(self):\n \"\"\"Combine all node-based, edge-based, and graph-based attributes of all connected components in the whole graph.\"\"\"\n for ind, graph in enumerate(self.graphs):\n self.graph = graph\n self.pair_attr = self.sigTabular()\n if ind==0:\n self.all_attr = self.pair_attr\n else:\n self.all_attr = pd.concat([self.all_attr, self.pair_attr])\n return self.all_attr\n\n\nclass GFeature(object):\n \"\"\"Feature engineering to add all node-based and graph-based attributes of all connected components in a whole graph.\n \"\"\"\n def __init__(self, df, source, target):\n \"\"\"\n Args:\n df: dataframe with source and target nodes.\n source: source node name.\n target: target node name.\n\n Returns:\n A DataFrame with graph features.\n \"\"\"\n self.df = df \n self.source = source\n self.target = target\n\n self.g = nx.from_pandas_edgelist(df=self.df, source=self.source, target=self.target) \n self.graphs = list(self.g.subgraph(c) for c in nx.connected_components(self.g))\n print(\"Note: \"+str(len(self.graphs))+\" connected components are contained.\") \n \n self.graph_attr = pd.DataFrame()\n self.node_attr = pd.DataFrame()\n self.df_r = pd.DataFrame()\n\n def _graph(self, graph):\n \"\"\"Generate graph-based attributes.\"\"\"\n graph_attr = pd.DataFrame()\n graph_attr['number_of_nodes'] = [nx.number_of_nodes(graph)]\n graph_attr['number_of_edges'] = [nx.number_of_edges(graph)]\n graph_attr['number_of_selfloops'] = [nx.number_of_selfloops(graph)]\n graph_attr['graph_number_of_cliques'] = [nx.graph_number_of_cliques(graph)]\n graph_attr['graph_clique_number'] = [nx.graph_clique_number(graph)]\n graph_attr['density'] = [nx.density(graph)]\n graph_attr['transitivity'] = [nx.transitivity(graph)]\n graph_attr['average_clustering'] = [nx.average_clustering(graph)]\n graph_attr['radius'] = [nx.radius(graph)]\n graph_attr['is_tree'] = [1 if nx.is_tree(graph) else 0]\n graph_attr['wiener_index'] = [nx.wiener_index(graph)]\n return graph_attr\n\n def _node(self, graph):\n \"\"\"Generate node-based attributes.\"\"\"\n node_attr = pd.DataFrame()\n degree_cent = pd.DataFrame(list(nx.degree_centrality(graph).items()), columns=['node', 'degree_centrality'])\n closenessCent = pd.DataFrame(list(nx.closeness_centrality(graph).items()), columns=['node', 'closeness_centrality'])\n betweennessCent = pd.DataFrame(list(nx.betweenness_centrality(graph).items()), columns=['node', 'betweenness_centrality'])\n pagerank = pd.DataFrame(list(nx.pagerank(graph).items()), columns=['node', 'pagerank'])\n\n node_attr = degree_cent\n node_attr['closeness_centrality'] = closenessCent['closeness_centrality']\n node_attr['betweenness_centrality'] = betweennessCent['betweenness_centrality']\n node_attr['pagerank'] = pagerank['pagerank']\n return node_attr \n \n def signleGraphFeatures(self, graph, df):\n \"\"\"Combine all node-based, edge-based, and graph-based attributes of a single connected component.\"\"\"\n node_attr = self._node(graph)\n graph_attr = self._graph(graph)\n df = df.merge(node_attr, how='left', left_on='srcIp', right_on='node')\n df = df.drop(['node'], axis=1)\n graph_attr_l = ['number_of_nodes', 'number_of_edges', 'number_of_selfloops', 'graph_number_of_cliques', \n 'graph_clique_number', 'density', 'transitivity', 'average_clustering', 'radius', 'is_tree', 'wiener_index']\n for i in graph_attr_l:\n node_attr[i] = graph_attr[i][0]\n df = df.merge(node_attr, how='left', left_on='destIp', right_on='node') \n df = df.drop(['node'], axis=1)\n return df\n\n def graphFeaturesUpdate(self, graph, df, d_r):\n \"\"\"Combine all node-based, edge-based, and graph-based attributes of a single connected component.\"\"\"\n t = self.signleGraphFeatures(graph, df)\n d_r.update(t, overwrite=False)\n return d_r\n\n def generate(self):\n for ind, graph in enumerate(self.graphs):\n if ind == 0:\n self.d_r = self.signleGraphFeatures(graph, self.df)\n else:\n self.d_r = self.graphFeaturesUpdate(graph, self.df, self.d_r)\n return self.d_r\n\n# def _graph(graph):\n# \"\"\"Generate graph-based attributes.\"\"\"\n# graph_attr = pd.DataFrame()\n# graph_attr['number_of_nodes'] = [nx.number_of_nodes(graph)]\n# graph_attr['number_of_edges'] = [nx.number_of_edges(graph)]\n# graph_attr['number_of_selfloops'] = [nx.number_of_selfloops(graph)]\n# graph_attr['graph_number_of_cliques'] = [nx.graph_number_of_cliques(graph)]\n# graph_attr['graph_clique_number'] = [nx.graph_clique_number(graph)]\n# graph_attr['density'] = [nx.density(graph)]\n# graph_attr['transitivity'] = [nx.transitivity(graph)]\n# graph_attr['average_clustering'] = [nx.average_clustering(graph)]\n# graph_attr['radius'] = [nx.radius(graph)]\n# graph_attr['is_tree'] = [1 if nx.is_tree(graph) else 0]\n# graph_attr['wiener_index'] = [nx.wiener_index(graph)]\n# return graph_attr\n\n# def _node(graph):\n# \"\"\"Generate node-based attributes.\"\"\"\n# node_attr = pd.DataFrame()\n# degree_cent = pd.DataFrame(list(nx.degree_centrality(graph).items()), columns=['node', 'degree_centrality'])\n# closenessCent = pd.DataFrame(list(nx.closeness_centrality(graph).items()), columns=['node', 'closeness_centrality'])\n# betweennessCent = pd.DataFrame(list(nx.betweenness_centrality(graph).items()), columns=['node', 'betweenness_centrality'])\n# pagerank = pd.DataFrame(list(nx.pagerank(graph).items()), columns=['node', 'pagerank'])\n\n# node_attr = degree_cent\n# node_attr['closeness_centrality'] = closenessCent['closeness_centrality']\n# node_attr['betweenness_centrality'] = betweennessCent['betweenness_centrality']\n# node_attr['pagerank'] = pagerank['pagerank']\n# return node_attr\n\n# def signleGraphFeatures(graph, df):\n# \"\"\"Combine all node-based, edge-based, and graph-based attributes of a single connected component.\"\"\"\n# node_attr = _node(graph)\n# graph_attr = _graph(graph)\n# df = df.merge(node_attr, how='left', left_on='srcIp', right_on='node')\n# df = df.drop(['node'], axis=1)\n# graph_attr_l = ['number_of_nodes', 'number_of_edges', 'number_of_selfloops', 'graph_number_of_cliques', \n# 'graph_clique_number', 'density', 'transitivity', 'average_clustering', 'radius', 'is_tree', 'wiener_index']\n# for i in graph_attr_l:\n# node_attr[i] = graph_attr[i][0]\n# df = df.merge(node_attr, how='left', left_on='destIp', right_on='node') \n# df = df.drop(['node'], axis=1)\n# return df\n\n# def graphFeaturesUpdate(graph, df, d_r):\n# \"\"\"Combine all node-based, edge-based, and graph-based attributes of a single connected component.\"\"\"\n# t = signleGraphFeatures(graph, df)\n# d_r.update(t, overwrite=False)\n# return d_r\n\n# def generate(df, source, target):\n# \"\"\"Feature engineering to add all node-based and graph-based attributes of all connected components in a whole graph.\n \n# Args:\n# df: dataframe with source and target nodes.\n# source: source node name.\n# target: target node name.\n\n# Returns:\n# A DataFrame with graph features.\n# \"\"\"\n# g = nx.from_pandas_edgelist(df=df, source=source, target=target) \n# graphs = list(g.subgraph(c) for c in nx.connected_components(g))\n# print(\"Note: \"+str(len(graphs))+\" connected components are contained.\")\n\n# for ind, graph in enumerate(graphs):\n# if ind == 0:\n# d_r = signleGraphFeatures(graph, df)\n# else:\n# d_r = graphFeaturesUpdate(graph, df, d_r)\n# return d_r" ]
[ [ "numpy.dot", "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
bcjonescbt/pyro
[ "1bc46ddd4485a18434bf1c5c128a8b020b46c559" ]
[ "pyro/infer/mcmc/util.py" ]
[ "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport functools\nimport warnings\nfrom collections import OrderedDict, defaultdict\nfrom functools import partial, reduce\nfrom itertools import product\nimport traceback as tb\n\nimport torch\nfrom torch.distributions import biject_to\nfrom opt_einsum import shared_intermediates\n\nimport pyro\nimport pyro.poutine as poutine\nimport pyro.distributions as dist\nfrom pyro.distributions.util import broadcast_shape, logsumexp\nfrom pyro.infer import config_enumerate\nfrom pyro.infer.util import is_validation_enabled\nfrom pyro.ops import stats\nfrom pyro.ops.contract import contract_to_tensor\nfrom pyro.ops.integrator import potential_grad\nfrom pyro.poutine.subsample_messenger import _Subsample\nfrom pyro.poutine.util import prune_subsample_sites\nfrom pyro.util import check_site_shape, ignore_jit_warnings\n\n\nclass TraceTreeEvaluator:\n \"\"\"\n Computes the log probability density of a trace (of a model with\n tree structure) that possibly contains discrete sample sites\n enumerated in parallel. This will be deprecated in favor of\n :class:`~pyro.infer.mcmc.util.EinsumTraceProbEvaluator`.\n\n :param model_trace: execution trace from a static model.\n :param bool has_enumerable_sites: whether the trace contains any\n discrete enumerable sites.\n :param int max_plate_nesting: Optional bound on max number of nested\n :func:`pyro.plate` contexts.\n \"\"\"\n def __init__(self,\n model_trace,\n has_enumerable_sites=False,\n max_plate_nesting=None):\n self.has_enumerable_sites = has_enumerable_sites\n self.max_plate_nesting = max_plate_nesting\n # To be populated using the model trace once.\n self._log_probs = defaultdict(list)\n self._log_prob_shapes = defaultdict(tuple)\n self._children = defaultdict(list)\n self._enum_dims = {}\n self._plate_dims = {}\n self._parse_model_structure(model_trace)\n\n def _parse_model_structure(self, model_trace):\n if not self.has_enumerable_sites:\n return\n if self.max_plate_nesting is None:\n raise ValueError(\"Finite value required for `max_plate_nesting` when model \"\n \"has discrete (enumerable) sites.\")\n self._compute_log_prob_terms(model_trace)\n # 1. Infer model structure - compute parent-child relationship.\n sorted_ordinals = sorted(self._log_probs.keys())\n for i, child_node in enumerate(sorted_ordinals):\n for j in range(i-1, -1, -1):\n cur_node = sorted_ordinals[j]\n if cur_node < child_node:\n self._children[cur_node].append(child_node)\n break # at most 1 parent.\n # 2. Populate `plate_dims` and `enum_dims` to be evaluated/\n # enumerated out at each ordinal.\n self._populate_cache(frozenset(), frozenset(), set())\n\n def _populate_cache(self, ordinal, parent_ordinal, parent_enum_dims):\n \"\"\"\n For each ordinal, populate the `plate` and `enum` dims to be\n evaluated or enumerated out.\n \"\"\"\n log_prob_shape = self._log_prob_shapes[ordinal]\n plate_dims = sorted([frame.dim for frame in ordinal - parent_ordinal])\n enum_dims = set((i for i in range(-len(log_prob_shape), -self.max_plate_nesting)\n if log_prob_shape[i] > 1))\n self._plate_dims[ordinal] = plate_dims\n self._enum_dims[ordinal] = set(enum_dims - parent_enum_dims)\n for c in self._children[ordinal]:\n self._populate_cache(c, ordinal, enum_dims)\n\n def _compute_log_prob_terms(self, model_trace):\n \"\"\"\n Computes the conditional probabilities for each of the sites\n in the model trace, and stores the result in `self._log_probs`.\n \"\"\"\n model_trace.compute_log_prob()\n self._log_probs = defaultdict(list)\n ordering = {name: frozenset(site[\"cond_indep_stack\"])\n for name, site in model_trace.nodes.items()\n if site[\"type\"] == \"sample\"}\n # Collect log prob terms per independence context.\n for name, site in model_trace.nodes.items():\n if site[\"type\"] == \"sample\":\n if is_validation_enabled():\n check_site_shape(site, self.max_plate_nesting)\n self._log_probs[ordering[name]].append(site[\"log_prob\"])\n if not self._log_prob_shapes:\n for ordinal, log_prob in self._log_probs.items():\n self._log_prob_shapes[ordinal] = broadcast_shape(*(t.shape for t in self._log_probs[ordinal]))\n\n def _reduce(self, ordinal, agg_log_prob=torch.tensor(0.)):\n \"\"\"\n Reduce the log prob terms for the given ordinal:\n - taking log_sum_exp of factors in enum dims (i.e.\n adding up the probability terms).\n - summing up the dims within `max_plate_nesting`.\n (i.e. multiplying probs within independent batches).\n\n :param ordinal: node (ordinal)\n :param torch.Tensor agg_log_prob: aggregated `log_prob`\n terms from the downstream nodes.\n :return: `log_prob` with marginalized `plate` and `enum`\n dims.\n \"\"\"\n log_prob = sum(self._log_probs[ordinal]) + agg_log_prob\n for enum_dim in self._enum_dims[ordinal]:\n log_prob = logsumexp(log_prob, dim=enum_dim, keepdim=True)\n for marginal_dim in self._plate_dims[ordinal]:\n log_prob = log_prob.sum(dim=marginal_dim, keepdim=True)\n return log_prob\n\n def _aggregate_log_probs(self, ordinal):\n \"\"\"\n Aggregate the `log_prob` terms using depth first search.\n \"\"\"\n if not self._children[ordinal]:\n return self._reduce(ordinal)\n agg_log_prob = sum(map(self._aggregate_log_probs, self._children[ordinal]))\n return self._reduce(ordinal, agg_log_prob)\n\n def log_prob(self, model_trace):\n \"\"\"\n Returns the log pdf of `model_trace` by appropriately handling\n enumerated log prob factors.\n\n :return: log pdf of the trace.\n \"\"\"\n with shared_intermediates():\n if not self.has_enumerable_sites:\n return model_trace.log_prob_sum()\n self._compute_log_prob_terms(model_trace)\n return self._aggregate_log_probs(ordinal=frozenset()).sum()\n\n\nclass TraceEinsumEvaluator:\n \"\"\"\n Computes the log probability density of a trace (of a model with\n tree structure) that possibly contains discrete sample sites\n enumerated in parallel. This uses optimized `einsum` operations\n to marginalize out the the enumerated dimensions in the trace\n via :class:`~pyro.ops.contract.contract_to_tensor`.\n\n :param model_trace: execution trace from a static model.\n :param bool has_enumerable_sites: whether the trace contains any\n discrete enumerable sites.\n :param int max_plate_nesting: Optional bound on max number of nested\n :func:`pyro.plate` contexts.\n \"\"\"\n def __init__(self,\n model_trace,\n has_enumerable_sites=False,\n max_plate_nesting=None):\n self.has_enumerable_sites = has_enumerable_sites\n self.max_plate_nesting = max_plate_nesting\n # To be populated using the model trace once.\n self._enum_dims = set()\n self.ordering = {}\n self._populate_cache(model_trace)\n\n def _populate_cache(self, model_trace):\n \"\"\"\n Populate the ordinals (set of ``CondIndepStack`` frames)\n and enum_dims for each sample site.\n \"\"\"\n if not self.has_enumerable_sites:\n return\n if self.max_plate_nesting is None:\n raise ValueError(\"Finite value required for `max_plate_nesting` when model \"\n \"has discrete (enumerable) sites.\")\n model_trace.compute_log_prob()\n model_trace.pack_tensors()\n for name, site in model_trace.nodes.items():\n if site[\"type\"] == \"sample\" and not isinstance(site[\"fn\"], _Subsample):\n if is_validation_enabled():\n check_site_shape(site, self.max_plate_nesting)\n self.ordering[name] = frozenset(model_trace.plate_to_symbol[f.name]\n for f in site[\"cond_indep_stack\"]\n if f.vectorized)\n self._enum_dims = set(model_trace.symbol_to_dim) - set(model_trace.plate_to_symbol.values())\n\n def _get_log_factors(self, model_trace):\n \"\"\"\n Aggregates the `log_prob` terms into a list for each\n ordinal.\n \"\"\"\n model_trace.compute_log_prob()\n model_trace.pack_tensors()\n log_probs = OrderedDict()\n # Collect log prob terms per independence context.\n for name, site in model_trace.nodes.items():\n if site[\"type\"] == \"sample\" and not isinstance(site[\"fn\"], _Subsample):\n if is_validation_enabled():\n check_site_shape(site, self.max_plate_nesting)\n log_probs.setdefault(self.ordering[name], []).append(site[\"packed\"][\"log_prob\"])\n return log_probs\n\n def log_prob(self, model_trace):\n \"\"\"\n Returns the log pdf of `model_trace` by appropriately handling\n enumerated log prob factors.\n\n :return: log pdf of the trace.\n \"\"\"\n if not self.has_enumerable_sites:\n return model_trace.log_prob_sum()\n log_probs = self._get_log_factors(model_trace)\n with shared_intermediates() as cache:\n return contract_to_tensor(log_probs, self._enum_dims, cache=cache)\n\n\ndef _guess_max_plate_nesting(model, args, kwargs):\n \"\"\"\n Guesses max_plate_nesting by running the model once\n without enumeration. This optimistically assumes static model\n structure.\n \"\"\"\n with poutine.block():\n model_trace = poutine.trace(model).get_trace(*args, **kwargs)\n sites = [site for site in model_trace.nodes.values()\n if site[\"type\"] == \"sample\"]\n\n dims = [frame.dim\n for site in sites\n for frame in site[\"cond_indep_stack\"]\n if frame.vectorized]\n max_plate_nesting = -min(dims) if dims else 0\n return max_plate_nesting\n\n\nclass _PEMaker:\n def __init__(self, model, model_args, model_kwargs, trace_prob_evaluator, transforms):\n self.model = model\n self.model_args = model_args\n self.model_kwargs = model_kwargs\n self.trace_prob_evaluator = trace_prob_evaluator\n self.transforms = transforms\n self._compiled_fn = None\n\n def _potential_fn(self, params):\n params_constrained = {k: self.transforms[k].inv(v) for k, v in params.items()}\n cond_model = poutine.condition(self.model, params_constrained)\n model_trace = poutine.trace(cond_model).get_trace(*self.model_args,\n **self.model_kwargs)\n log_joint = self.trace_prob_evaluator.log_prob(model_trace)\n for name, t in self.transforms.items():\n log_joint = log_joint - torch.sum(\n t.log_abs_det_jacobian(params_constrained[name], params[name]))\n return -log_joint\n\n def _potential_fn_jit(self, skip_jit_warnings, jit_options, params):\n if not params:\n return self._potential_fn(params)\n names, vals = zip(*sorted(params.items()))\n\n if self._compiled_fn:\n return self._compiled_fn(*vals)\n\n with pyro.validation_enabled(False):\n tmp = []\n for _, v in pyro.get_param_store().named_parameters():\n if v.requires_grad:\n v.requires_grad_(False)\n tmp.append(v)\n\n def _pe_jit(*zi):\n params = dict(zip(names, zi))\n return self._potential_fn(params)\n\n if skip_jit_warnings:\n _pe_jit = ignore_jit_warnings()(_pe_jit)\n self._compiled_fn = torch.jit.trace(_pe_jit, vals, **jit_options)\n\n for v in tmp:\n v.requires_grad_(True)\n return self._compiled_fn(*vals)\n\n def get_potential_fn(self, jit_compile=False, skip_jit_warnings=True, jit_options=None):\n if jit_compile:\n jit_options = {\"check_trace\": False} if jit_options is None else jit_options\n return partial(self._potential_fn_jit, skip_jit_warnings, jit_options)\n return self._potential_fn\n\n\n# TODO: expose init_strategy using separate functions.\ndef _get_init_params(model, model_args, model_kwargs, transforms, potential_fn, prototype_params,\n max_tries_initial_params=100, num_chains=1, strategy=\"uniform\"):\n params = prototype_params\n params_per_chain = defaultdict(list)\n n = 0\n\n # For empty models, exit early\n if not params:\n return params\n\n for i in range(max_tries_initial_params):\n while n < num_chains:\n if strategy == \"uniform\":\n params = {k: dist.Uniform(v.new_full(v.shape, -2), v.new_full(v.shape, 2)).sample()\n for k, v in params.items()}\n elif strategy == \"prior\":\n trace = poutine.trace(model).get_trace(*model_args, **model_kwargs)\n samples = {name: trace.nodes[name][\"value\"].detach() for name in params}\n params = {k: transforms[k](v) for k, v in samples.items()}\n pe_grad, pe = potential_grad(potential_fn, params)\n\n if torch.isfinite(pe) and all(map(torch.all, map(torch.isfinite, pe_grad.values()))):\n for k, v in params.items():\n params_per_chain[k].append(v)\n n += 1\n if num_chains == 1:\n return {k: v[0] for k, v in params_per_chain.items()}\n else:\n return {k: torch.stack(v) for k, v in params_per_chain.items()}\n raise ValueError(\"Model specification seems incorrect - cannot find valid initial params.\")\n\n\ndef initialize_model(model, model_args=(), model_kwargs={}, transforms=None, max_plate_nesting=None,\n jit_compile=False, jit_options=None, skip_jit_warnings=False, num_chains=1):\n \"\"\"\n Given a Python callable with Pyro primitives, generates the following model-specific\n properties needed for inference using HMC/NUTS kernels:\n\n - initial parameters to be sampled using a HMC kernel,\n - a potential function whose input is a dict of parameters in unconstrained space,\n - transforms to transform latent sites of `model` to unconstrained space,\n - a prototype trace to be used in MCMC to consume traces from sampled parameters.\n\n :param model: a Pyro model which contains Pyro primitives.\n :param tuple model_args: optional args taken by `model`.\n :param dict model_kwargs: optional kwargs taken by `model`.\n :param dict transforms: Optional dictionary that specifies a transform\n for a sample site with constrained support to unconstrained space. The\n transform should be invertible, and implement `log_abs_det_jacobian`.\n If not specified and the model has sites with constrained support,\n automatic transformations will be applied, as specified in\n :mod:`torch.distributions.constraint_registry`.\n :param int max_plate_nesting: Optional bound on max number of nested\n :func:`pyro.plate` contexts. This is required if model contains\n discrete sample sites that can be enumerated over in parallel.\n :param bool jit_compile: Optional parameter denoting whether to use\n the PyTorch JIT to trace the log density computation, and use this\n optimized executable trace in the integrator.\n :param dict jit_options: A dictionary contains optional arguments for\n :func:`torch.jit.trace` function.\n :param bool ignore_jit_warnings: Flag to ignore warnings from the JIT\n tracer when ``jit_compile=True``. Default is False.\n :param int num_chains: Number of parallel chains. If `num_chains > 1`,\n the returned `initial_params` will be a list with `num_chains` elements.\n :returns: a tuple of (`initial_params`, `potential_fn`, `transforms`, `prototype_trace`)\n \"\"\"\n # XXX `transforms` domains are sites' supports\n # FIXME: find a good pattern to deal with `transforms` arg\n if transforms is None:\n automatic_transform_enabled = True\n transforms = {}\n else:\n automatic_transform_enabled = False\n if max_plate_nesting is None:\n max_plate_nesting = _guess_max_plate_nesting(model, model_args, model_kwargs)\n # Wrap model in `poutine.enum` to enumerate over discrete latent sites.\n # No-op if model does not have any discrete latents.\n model = poutine.enum(config_enumerate(model),\n first_available_dim=-1 - max_plate_nesting)\n model_trace = poutine.trace(model).get_trace(*model_args, **model_kwargs)\n has_enumerable_sites = False\n prototype_samples = {}\n for name, node in model_trace.iter_stochastic_nodes():\n if isinstance(node[\"fn\"], _Subsample):\n continue\n if node[\"fn\"].has_enumerate_support:\n has_enumerable_sites = True\n continue\n # we need to detach here because this sample can be a leaf variable,\n # so we can't change its requires_grad flag to calculate its grad in\n # velocity_verlet\n prototype_samples[name] = node[\"value\"].detach()\n if automatic_transform_enabled:\n transforms[name] = biject_to(node[\"fn\"].support).inv\n\n trace_prob_evaluator = TraceEinsumEvaluator(model_trace,\n has_enumerable_sites,\n max_plate_nesting)\n prototype_params = {k: transforms[k](v) for k, v in prototype_samples.items()}\n\n pe_maker = _PEMaker(model, model_args, model_kwargs, trace_prob_evaluator, transforms)\n\n # Note that we deliberately do not exercise jit compilation here so as to\n # enable potential_fn to be picklable (a torch._C.Function cannot be pickled).\n init_params = _get_init_params(model, model_args, model_kwargs, transforms,\n pe_maker.get_potential_fn(), prototype_params, num_chains=num_chains)\n potential_fn = pe_maker.get_potential_fn(jit_compile, skip_jit_warnings, jit_options)\n return init_params, potential_fn, transforms, model_trace\n\n\ndef _safe(fn):\n \"\"\"\n Safe version of utilities in the :mod:`pyro.ops.stats` module. Wrapped\n functions return `NaN` tensors instead of throwing exceptions.\n\n :param fn: stats function from :mod:`pyro.ops.stats` module.\n \"\"\"\n @functools.wraps(fn)\n def wrapped(sample, *args, **kwargs):\n try:\n val = fn(sample, *args, **kwargs)\n except Exception:\n warnings.warn(tb.format_exc())\n val = torch.full(sample.shape[2:], float(\"nan\"),\n dtype=sample.dtype, device=sample.device)\n return val\n\n return wrapped\n\n\ndef diagnostics(samples, group_by_chain=True):\n \"\"\"\n Gets diagnostics statistics such as effective sample size and\n split Gelman-Rubin using the samples drawn from the posterior\n distribution.\n\n :param dict samples: dictionary of samples keyed by site name.\n :param bool group_by_chain: If True, each variable in `samples`\n will be treated as having shape `num_chains x num_samples x sample_shape`.\n Otherwise, the corresponding shape will be `num_samples x sample_shape`\n (i.e. without chain dimension).\n :return: dictionary of diagnostic stats for each sample site.\n \"\"\"\n diagnostics = {}\n for site, support in samples.items():\n if not group_by_chain:\n support = support.unsqueeze(0)\n site_stats = OrderedDict()\n site_stats[\"n_eff\"] = _safe(stats.effective_sample_size)(support)\n site_stats[\"r_hat\"] = stats.split_gelman_rubin(support)\n diagnostics[site] = site_stats\n return diagnostics\n\n\ndef summary(samples, prob=0.9, group_by_chain=True):\n \"\"\"\n Returns a summary table displaying diagnostics of ``samples`` from the\n posterior. The diagnostics displayed are mean, standard deviation, median,\n the 90% Credibility Interval, :func:`~pyro.ops.stats.effective_sample_size`,\n :func:`~pyro.ops.stats.split_gelman_rubin`.\n\n :param dict samples: dictionary of samples keyed by site name.\n :param float prob: the probability mass of samples within the credibility interval.\n :param bool group_by_chain: If True, each variable in `samples`\n will be treated as having shape `num_chains x num_samples x sample_shape`.\n Otherwise, the corresponding shape will be `num_samples x sample_shape`\n (i.e. without chain dimension).\n \"\"\"\n if not group_by_chain:\n samples = {k: v.unsqueeze(0) for k, v in samples.items()}\n\n summary_dict = {}\n for name, value in samples.items():\n value_flat = torch.reshape(value, (-1,) + value.shape[2:])\n mean = value_flat.mean(dim=0)\n std = value_flat.std(dim=0)\n median = value_flat.median(dim=0)[0]\n hpdi = stats.hpdi(value_flat, prob=prob)\n n_eff = _safe(stats.effective_sample_size)(value)\n r_hat = stats.split_gelman_rubin(value)\n hpd_lower = '{:.1f}%'.format(50 * (1 - prob))\n hpd_upper = '{:.1f}%'.format(50 * (1 + prob))\n summary_dict[name] = OrderedDict([(\"mean\", mean), (\"std\", std), (\"median\", median),\n (hpd_lower, hpdi[0]), (hpd_upper, hpdi[1]),\n (\"n_eff\", n_eff), (\"r_hat\", r_hat)])\n return summary_dict\n\n\ndef print_summary(samples, prob=0.9, group_by_chain=True):\n \"\"\"\n Prints a summary table displaying diagnostics of ``samples`` from the\n posterior. The diagnostics displayed are mean, standard deviation, median,\n the 90% Credibility Interval, :func:`~pyro.ops.stats.effective_sample_size`,\n :func:`~pyro.ops.stats.split_gelman_rubin`.\n\n :param dict samples: dictionary of samples keyed by site name.\n :param float prob: the probability mass of samples within the credibility interval.\n :param bool group_by_chain: If True, each variable in `samples`\n will be treated as having shape `num_chains x num_samples x sample_shape`.\n Otherwise, the corresponding shape will be `num_samples x sample_shape`\n (i.e. without chain dimension).\n \"\"\"\n summary_dict = summary(samples, prob, group_by_chain)\n\n row_names = {k: k + '[' + ','.join(map(lambda x: str(x - 1), v.shape[2:])) + ']'\n for k, v in samples.items()}\n max_len = max(max(map(lambda x: len(x), row_names.values())), 10)\n name_format = '{:>' + str(max_len) + '}'\n header_format = name_format + ' {:>9}' * 7\n columns = [''] + list(list(summary_dict.values())[0].keys())\n\n print()\n print(header_format.format(*columns))\n\n row_format = name_format + ' {:>9.2f}' * 7\n for name, stats_dict in summary_dict.items():\n shape = stats_dict[\"mean\"].shape\n if len(shape) == 0:\n print(row_format.format(name, *stats_dict.values()))\n else:\n for idx in product(*map(range, shape)):\n idx_str = '[{}]'.format(','.join(map(str, idx)))\n print(row_format.format(name + idx_str, *[v[idx] for v in stats_dict.values()]))\n print()\n\n\ndef _predictive_sequential(model, posterior_samples, model_args, model_kwargs,\n num_samples, sample_sites, return_trace=False):\n collected = []\n samples = [{k: v[i] for k, v in posterior_samples.items()} for i in range(num_samples)]\n for i in range(num_samples):\n trace = poutine.trace(poutine.condition(model, samples[i])).get_trace(*model_args, **model_kwargs)\n if return_trace:\n collected.append(trace)\n else:\n collected.append({site: trace.nodes[site]['value'] for site in sample_sites})\n\n return collected if return_trace else {site: torch.stack([s[site] for s in collected])\n for site in sample_sites}\n\n\ndef predictive(model, posterior_samples, *args, **kwargs):\n \"\"\"\n .. warning::\n This function is deprecated and will be removed in a future release.\n Use the :class:`~pyro.infer.predictive.Predictive` class instead.\n\n Run model by sampling latent parameters from `posterior_samples`, and return\n values at sample sites from the forward run. By default, only sites not contained in\n `posterior_samples` are returned. This can be modified by changing the `return_sites`\n keyword argument.\n\n :param model: Python callable containing Pyro primitives.\n :param dict posterior_samples: dictionary of samples from the posterior.\n :param args: model arguments.\n :param kwargs: model kwargs; and other keyword arguments (see below).\n\n :Keyword Arguments:\n * **num_samples** (``int``) - number of samples to draw from the predictive distribution.\n This argument has no effect if ``posterior_samples`` is non-empty, in which case, the\n leading dimension size of samples in ``posterior_samples`` is used.\n * **return_sites** (``list``) - sites to return; by default only sample sites not present\n in `posterior_samples` are returned.\n * **return_trace** (``bool``) - whether to return the full trace. Note that this is vectorized\n over `num_samples`.\n * **parallel** (``bool``) - predict in parallel by wrapping the existing model\n in an outermost `plate` messenger. Note that this requires that the model has\n all batch dims correctly annotated via :class:`~pyro.plate`. Default is `False`.\n\n :return: dict of samples from the predictive distribution, or a single vectorized\n `trace` (if `return_trace=True`).\n \"\"\"\n warnings.warn('The `mcmc.predictive` function is deprecated and will be removed in '\n 'a future release. Use the `pyro.infer.Predictive` class instead.',\n FutureWarning)\n num_samples = kwargs.pop('num_samples', None)\n return_sites = kwargs.pop('return_sites', None)\n return_trace = kwargs.pop('return_trace', False)\n parallel = kwargs.pop('parallel', False)\n\n max_plate_nesting = _guess_max_plate_nesting(model, args, kwargs)\n model_trace = prune_subsample_sites(poutine.trace(model).get_trace(*args, **kwargs))\n reshaped_samples = {}\n\n for name, sample in posterior_samples.items():\n\n batch_size, sample_shape = sample.shape[0], sample.shape[1:]\n\n if num_samples is None:\n num_samples = batch_size\n\n elif num_samples != batch_size:\n warnings.warn(\"Sample's leading dimension size {} is different from the \"\n \"provided {} num_samples argument. Defaulting to {}.\"\n .format(batch_size, num_samples, batch_size), UserWarning)\n num_samples = batch_size\n\n sample = sample.reshape((num_samples,) + (1,) * (max_plate_nesting - len(sample_shape)) + sample_shape)\n reshaped_samples[name] = sample\n\n if num_samples is None:\n raise ValueError(\"No sample sites in model to infer `num_samples`.\")\n\n return_site_shapes = {}\n for site in model_trace.stochastic_nodes + model_trace.observation_nodes:\n site_shape = (num_samples,) + model_trace.nodes[site]['value'].shape\n if return_sites:\n if site in return_sites:\n return_site_shapes[site] = site_shape\n else:\n if site not in reshaped_samples:\n return_site_shapes[site] = site_shape\n\n if not parallel:\n return _predictive_sequential(model, posterior_samples, args, kwargs, num_samples,\n return_site_shapes.keys(), return_trace)\n\n def _vectorized_fn(fn):\n \"\"\"\n Wraps a callable inside an outermost :class:`~pyro.plate` to parallelize\n sampling from the posterior predictive.\n\n :param fn: arbitrary callable containing Pyro primitives.\n :return: wrapped callable.\n \"\"\"\n\n def wrapped_fn(*args, **kwargs):\n with pyro.plate(\"_num_predictive_samples\", num_samples, dim=-max_plate_nesting-1):\n return fn(*args, **kwargs)\n\n return wrapped_fn\n\n trace = poutine.trace(poutine.condition(_vectorized_fn(model), reshaped_samples))\\\n .get_trace(*args, **kwargs)\n\n if return_trace:\n return trace\n\n predictions = {}\n for site, shape in return_site_shapes.items():\n value = trace.nodes[site]['value']\n if value.numel() < reduce((lambda x, y: x * y), shape):\n predictions[site] = value.expand(shape)\n else:\n predictions[site] = value.reshape(shape)\n\n return predictions\n" ]
[ [ "torch.jit.trace", "torch.reshape", "torch.tensor", "torch.isfinite", "torch.stack", "torch.distributions.biject_to" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SaoirseARM/model-optimization
[ "a396089bca13ab9d38d533406dff717b6694b0d6" ]
[ "tensorflow_model_optimization/python/core/clustering/keras/cluster_integration_test.py" ]
[ "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"End-to-end tests for keras clustering API.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom absl.testing import parameterized\nfrom tensorflow.python.keras import keras_parameterized\n\nfrom tensorflow_model_optimization.python.core.clustering.keras import cluster\nfrom tensorflow_model_optimization.python.core.clustering.keras import cluster_config\n\nkeras = tf.keras\nlayers = keras.layers\ntest = tf.test\n\nCentroidInitialization = cluster_config.CentroidInitialization\n\nclass ClusterIntegrationTest(test.TestCase, parameterized.TestCase):\n \"\"\"Integration tests for clustering.\"\"\"\n\n @keras_parameterized.run_all_keras_modes\n def testValuesRemainClusteredAfterTraining(self):\n \"\"\"\n Verifies that training a clustered model does not destroy the clusters.\n \"\"\"\n number_of_clusters = 10\n original_model = keras.Sequential([\n layers.Dense(2, input_shape=(2,)),\n layers.Dense(2),\n ])\n\n clustered_model = cluster.cluster_weights(\n original_model,\n number_of_clusters=number_of_clusters,\n cluster_centroids_init=CentroidInitialization.LINEAR\n )\n\n clustered_model.compile(\n loss=keras.losses.categorical_crossentropy,\n optimizer='adam',\n metrics=['accuracy']\n )\n\n def dataset_generator():\n x_train = np.array([\n [0, 1],\n [2, 0],\n [0, 3],\n [4, 1],\n [5, 1],\n ])\n y_train = np.array([\n [0, 1],\n [1, 0],\n [1, 0],\n [0, 1],\n [0, 1],\n ])\n for x, y in zip(x_train, y_train):\n yield np.array([x]), np.array([y])\n\n clustered_model.fit_generator(dataset_generator(), steps_per_epoch=1)\n stripped_model = cluster.strip_clustering(clustered_model)\n weights_as_list = stripped_model.get_weights()[0].reshape(-1,).tolist()\n unique_weights = set(weights_as_list)\n self.assertLessEqual(len(unique_weights), number_of_clusters)\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhangyimi/Research
[ "94519a72e7936c77f62a31709634b72c09aabf74" ]
[ "CV/VehicleCounting/vehicle_counting/hausdorff_dist.py" ]
[ "import numpy as np\nimport numba\nimport distances\nfrom inspect import getmembers\n\ndef _find_available_functions(module_name):\n all_members = getmembers(module_name)\n available_functions = [member[0] for member in all_members if isinstance(member[1], numba.targets.registry.CPUDispatcher)]\n return available_functions\n\[email protected](nopython=True, fastmath=True)\ndef _hausdorff(XA, XB, distance_function):\n nA = XA.shape[0]\n nB = XB.shape[0]\n cmax = 0.\n for i in range(nA):\n cmin = np.inf\n for j in range(nB):\n d = distance_function(XA[i,:], XB[j,:])\n if d<cmin:\n cmin = d\n if cmin<cmax:\n break\n if cmin>cmax and np.inf>cmin:\n cmax = cmin\n '''\n\tfor j in range(nB):\n\t\tcmin = np.inf\n\t\tfor i in range(nA):\n\t\t\td = distance_function(XA[i,:], XB[j,:])\n\t\t\tif d<cmin:\n\t\t\t\tcmin = d\n\t\t\tif cmin<cmax:\n\t\t\t\tbreak\n\t\tif cmin>cmax and np.inf>cmin:\n\t\t\tcmax = cmin\n '''\n return cmax\n\ndef hausdorff_distance(XA, XB, distance='euclidean'):\n assert distance in _find_available_functions(distances), \\\n 'distance is not an implemented function'\n assert type(XA) is np.ndarray and type(XB) is np.ndarray, \\\n 'arrays must be of type numpy.ndarray'\n assert np.issubdtype(XA.dtype, np.number) and np.issubdtype(XA.dtype, np.number), \\\n 'the arrays data type must be numeric'\n assert XA.ndim == 2 and XB.ndim == 2, \\\n 'arrays must be 2-dimensional'\n assert XA.shape[1] == XB.shape[1], \\\n 'arrays must have equal number of columns'\n if distance == 'haversine':\n assert XA.shape[1] >= 2, 'haversine distance requires at least 2 coordinates per point (lat, lng)'\n assert XB.shape[1] >= 2, 'haversine distance requires at least 2 coordinates per point (lat, lng)'\n distance_function = getattr(distances, distance)\n return _hausdorff(XA, XB, distance_function)\n" ]
[ [ "numpy.issubdtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ekirving/qpbrute
[ "38a9555d9c0724403c68f40d29478be446a4ec7e" ]
[ "qpbrute/qpbayes.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCompare all fitted models to each other using Bayes factors from admixture_graph\n\"\"\"\n__author__ = \"Evan K. Irving-Pease\"\n__copyright__ = \"Copyright 2018\"\n__email__ = \"[email protected]\"\n__license__ = \"MIT\"\n\nimport argparse\nimport glob\nimport itertools\nimport os\nimport re\nimport sys\nfrom collections import defaultdict\nfrom datetime import timedelta\nfrom time import time\n\nimport pandas as pd\nimport pathos.multiprocessing as mp\n\nfrom qpbrute.consts import (\n MCMC_NUM_BURN,\n CPU_CORES_HIGH,\n MCMC_NUM_CHAINS,\n MCMC_NUM_TEMPS,\n MCMC_NUM_ITERS,\n CPU_CORES_MAX,\n FOLDERS,\n)\nfrom qpbrute.utils import run_cmd\n\n\nclass QPBayes:\n def __init__(\n self,\n geno,\n snp,\n ind,\n prefix,\n nodes,\n outgroup,\n chains,\n heated,\n iterations,\n burnin,\n verbose,\n threads,\n ):\n \"\"\"\n Initialise the object attributes\n \"\"\"\n self.geno_file = geno\n self.snp_file = snp\n self.ind_file = ind\n self.prefix = prefix\n self.nodes = nodes\n self.outgroup = outgroup\n self.mcmc_chains = int(chains)\n self.mcmc_heated = int(heated)\n self.mcmc_iters = int(float(iterations))\n self.mcmc_burn = int(float(burnin))\n self.verbose = verbose\n self.threads = int(threads)\n self.code_dir = os.path.abspath(os.path.dirname(__file__))\n\n # sanity check the outgroup is not in the node list\n if outgroup in nodes:\n self.nodes.remove(outgroup)\n\n # TODO save the successful graphs in a json file\n # find all the PDFs, and extract their graph names\n self.graphs = [\n re.search(r\"a[0-9]-(.+).pdf\", pdf).group(1)\n for pdf in glob.glob(f\"{prefix}/pdf/{prefix}*\")\n ]\n\n # make sure all the output folders exits\n for folder in FOLDERS:\n os.makedirs(f\"{prefix}/{folder}\", exist_ok=True)\n\n # self.dot_path = f\"{prefix}/graphs/{prefix}\"\n self.dstat_par = f\"{prefix}/dstats/{prefix}.par\"\n self.dstat_csv = f\"{prefix}/dstats/{prefix}.csv\"\n self.dstat_log = f\"{prefix}/dstats/{prefix}.log\"\n self.dstat_tests = f\"{prefix}/dstats/{prefix}.tests\"\n self.bayes_log = f\"{prefix}/{prefix}.bayes.log\"\n\n # clean up the log file\n if os.path.exists(self.bayes_log):\n os.remove(self.bayes_log)\n\n # open the log file for writing\n self.log_handle = open(self.bayes_log, \"a\")\n\n def log(self, message):\n \"\"\"\n Handle message logging to file/stdout.\n \"\"\"\n # send message to the log file\n print(message, file=self.log_handle)\n self.log_handle.flush()\n\n if self.verbose:\n # echo to stdout\n print(message)\n sys.stdout.flush()\n\n def calculate_dstats(self):\n \"\"\"\n Use `qpDstat` to calculate D-stats for all possible permutations of populations.\n\n See https://github.com/DReichLab/AdmixTools/blob/master/README.Dstatistics\n \"\"\"\n\n if os.path.isfile(self.dstat_csv):\n # only run once\n return\n\n # get all the samples, grouped by population\n samples = defaultdict(list)\n with open(self.ind_file, \"r\") as fin:\n for line in fin.readlines():\n sample, gender, population = line.split()\n samples[population].append(sample)\n\n # compose the list of all 3-way tests (we're doing outgroup D-stats)\n tests = set()\n for x, y, z in itertools.permutations(self.nodes, 3):\n tests.add((self.outgroup, x, y, z))\n\n # write all the tests to disk\n with open(self.dstat_tests, \"w\") as fout:\n fout.writelines(\" \".join(test) + \"\\n\" for test in tests)\n\n # compose the config settings\n config = [\n \"genotypename: {}\".format(self.geno_file),\n \"snpname: {}\".format(self.snp_file),\n \"indivname: {}\".format(self.ind_file),\n \"popfilename: {}\".format(\n self.dstat_tests\n ), # Program will run the method for all listed 4-way tests\n \"blgsize: 0.005\", # TODO parameterize\n \"f4mode: YES\", # TODO f4 statistics not D-stats are computed\n ]\n\n # the params to be defined in a .par file\n with open(self.dstat_par, \"w\") as fout:\n fout.write(\"\\n\".join(config))\n\n self.log(\n \"INFO: There are {:,} D-stat tests to compute for {} populations.\".format(\n len(tests), len(self.nodes)\n )\n )\n\n # run qpDstat\n log = run_cmd([\"qpDstat\", \"-p\", self.dstat_par])\n\n # save the log file\n with open(self.dstat_log, \"w\") as fout:\n fout.write(log)\n\n results = list()\n columns = [\"W\", \"X\", \"Y\", \"Z\", \"D\", \"Z.value\"]\n\n # parse the results from the log file\n for line in log.splitlines():\n if \"result:\" in line:\n results.append(dict(zip(columns, line.split()[1:7])))\n\n # convert to DataFrame and save to disk\n pd.DataFrame(results, columns=columns).to_csv(self.dstat_csv, index=False)\n\n def calculate_bayes_factors(self):\n \"\"\"\n Use `admixturegraph` to calculate Bayes factors for all fitted graphs.\n\n See https://github.com/mailund/admixture_graph\n \"\"\"\n self.log(\n \"INFO: There are {:,} graphs to compute Bayes factors for.\".format(\n len(self.graphs)\n )\n )\n\n if len(self.graphs) == 0:\n sys.exit()\n\n if self.threads > 1:\n # compute the model likelihoods\n pool = mp.ProcessingPool(self.threads)\n pool.map(self.model_likelihood, self.graphs)\n else:\n # compute likelihoods without multi-threading\n for graph in self.graphs:\n self.model_likelihood(graph)\n\n def model_likelihood(self, graph):\n \"\"\"\n Run the MCMC to calculate the model likelihoods\n \"\"\"\n log_file = f\"{self.prefix}/bayes/{self.prefix}-{graph}-likelihood.log\"\n\n if not os.path.isfile(\n f\"{self.prefix}/bayes/{self.prefix}-{graph}-burn-gelman.pdf\"\n ):\n # only run once\n run_cmd(\n [\n \"Rscript\",\n f\"{self.code_dir}/rscript/model_likelihood.R\",\n self.prefix,\n graph,\n self.dstat_csv,\n self.mcmc_chains,\n self.mcmc_heated,\n self.mcmc_iters,\n self.mcmc_burn,\n ],\n env={\"OMP_NUM_THREADS\": \"1\"},\n stdout=open(log_file, \"w\"),\n )\n\n self.log(\"INFO: Bayes factor done for graph {}\".format(graph))\n\n def find_best_model(self):\n \"\"\"\n Compare Bayes factors to find the best fitting model.\n \"\"\"\n log_file = f\"{self.prefix}/bayes/{self.prefix}-bayes.log\"\n\n run_cmd(\n [\n \"Rscript\",\n f\"{self.code_dir}/rscript/bayes_factors.R\",\n self.prefix,\n self.mcmc_burn,\n ],\n stdout=open(log_file, \"w\"),\n )\n\n\ndef calculate_bayes_factors(\n geno,\n snp,\n ind,\n prefix,\n nodes,\n outgroup,\n chains,\n heated,\n iterations,\n burnin,\n verbose=True,\n threads=CPU_CORES_HIGH,\n):\n \"\"\"\n Find the best fitting graph by calculating the Bayes factors for each model found by QPBrute.\n \"\"\"\n start = time()\n\n # instantiate the class\n qpb = QPBayes(\n geno,\n snp,\n ind,\n prefix,\n nodes,\n outgroup,\n chains,\n heated,\n iterations,\n burnin,\n verbose,\n threads,\n )\n\n if qpb.mcmc_burn >= qpb.mcmc_iters:\n raise RuntimeError(\n \"ERROR: MCMC burn in must be less than the number of iterations\"\n )\n\n # calculate outgroup D-stats for all 3-way permutations of the populations\n qpb.calculate_dstats()\n\n # calculate Bayes factors for all the fitted graphs, using the pre-computed D-stats\n qpb.calculate_bayes_factors()\n\n # compare Bayes factors to find the best fitting model\n qpb.find_best_model()\n\n qpb.log(\n \"INFO: Calculating Bayes factors took: {}\".format(\n timedelta(seconds=time() - start)\n )\n )\n\n\ndef qpbayes():\n # parse the command line arguments\n parser = argparse.ArgumentParser(\n description=\"Compare all fitted models to each other using Bayes factors.\"\n )\n parser.add_argument(\n \"--geno\",\n help=\"Input genotype file (eigenstrat format)\",\n metavar=\"example.geno\",\n required=True,\n )\n parser.add_argument(\n \"--snp\",\n help=\"Input snp file (eigenstrat format)\",\n metavar=\"example.snp\",\n required=True,\n )\n parser.add_argument(\n \"--ind\",\n help=\"Input indiv file (eigenstrat format)\",\n metavar=\"example.ind\",\n required=True,\n )\n parser.add_argument(\n \"--prefix\", help=\"Output prefix\", metavar=\"example\", required=True\n )\n parser.add_argument(\n \"--pops\",\n nargs=\"+\",\n help=\"List of populations\",\n metavar=(\"A\", \"B\"),\n required=True,\n )\n parser.add_argument(\n \"--out\", help=\"Outgroup population\", metavar=\"OUT\", required=True\n )\n parser.add_argument(\n \"--chains\",\n help=\"Number of replicate MCMC chains to run (default: %s)\" % MCMC_NUM_CHAINS,\n metavar=\"NUM\",\n default=MCMC_NUM_CHAINS,\n )\n parser.add_argument(\n \"--heated\",\n help=\"Number of heated chains per replicate (default: %s)\" % MCMC_NUM_TEMPS,\n metavar=\"NUM\",\n default=MCMC_NUM_TEMPS,\n )\n parser.add_argument(\n \"--iterations\",\n help=\"Number of MCMC iterations per chain (default: %.1e)\" % MCMC_NUM_ITERS,\n metavar=\"NUM\",\n default=MCMC_NUM_ITERS,\n )\n parser.add_argument(\n \"--burnin\",\n help=\"Number of MCMC iterations to burnin (default: %.1e)\" % MCMC_NUM_BURN,\n metavar=\"NUM\",\n default=MCMC_NUM_BURN,\n )\n parser.add_argument(\n \"--threads\",\n help=\"Number of threads to use (default: %s)\" % CPU_CORES_MAX,\n metavar=\"NUM\",\n default=CPU_CORES_MAX,\n )\n\n argv = parser.parse_args()\n\n calculate_bayes_factors(\n argv.geno,\n argv.snp,\n argv.ind,\n argv.prefix,\n argv.pops,\n argv.out,\n argv.chains,\n argv.heated,\n argv.iterations,\n argv.burnin,\n threads=argv.threads,\n )\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Sinaxist/mne-python
[ "33146156f2660f122ecc04fa0d5b3fd3c34b549e" ]
[ "mne/utils.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Some utility functions.\"\"\"\nfrom __future__ import print_function\n\n# Authors: Alexandre Gramfort <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport atexit\nfrom distutils.version import LooseVersion\nfrom functools import wraps\nimport ftplib\nfrom functools import partial\nimport hashlib\nimport inspect\nimport json\nimport logging\nfrom math import log, ceil\nimport multiprocessing\nimport operator\nimport os\nimport os.path as op\nimport platform\nimport shutil\nfrom shutil import rmtree\nfrom string import Formatter\nimport subprocess\nimport sys\nimport tempfile\nimport time\nimport traceback\nimport warnings\nimport webbrowser\n\nimport numpy as np\nfrom scipy import linalg, sparse\n\nfrom .externals.six.moves import urllib\nfrom .externals.six import string_types, StringIO, BytesIO, integer_types\nfrom .externals.decorator import decorator\n\nfrom .fixes import _get_args\n\nlogger = logging.getLogger('mne') # one selection here used across mne-python\nlogger.propagate = False # don't propagate (in case of multiple imports)\n\n\ndef _memory_usage(*args, **kwargs):\n if isinstance(args[0], tuple):\n args[0][0](*args[0][1], **args[0][2])\n elif not isinstance(args[0], int): # can be -1 for current use\n args[0]()\n return [-1]\n\n\ntry:\n from memory_profiler import memory_usage\nexcept ImportError:\n memory_usage = _memory_usage\n\n\ndef nottest(f):\n \"\"\"Mark a function as not a test (decorator).\"\"\"\n f.__test__ = False\n return f\n\n\n# # # WARNING # # #\n# This list must also be updated in doc/_templates/class.rst if it is\n# changed here!\n_doc_special_members = ('__contains__', '__getitem__', '__iter__', '__len__',\n '__call__', '__add__', '__sub__', '__mul__', '__div__',\n '__neg__', '__hash__')\n\n###############################################################################\n# RANDOM UTILITIES\n\n\ndef _ensure_int(x, name='unknown', must_be='an int'):\n \"\"\"Ensure a variable is an integer.\"\"\"\n # This is preferred over numbers.Integral, see:\n # https://github.com/scipy/scipy/pull/7351#issuecomment-299713159\n try:\n x = int(operator.index(x))\n except TypeError:\n raise TypeError('%s must be %s, got %s' % (name, must_be, type(x)))\n return x\n\n\ndef _pl(x):\n \"\"\"Determine if plural should be used.\"\"\"\n len_x = x if isinstance(x, (integer_types, np.generic)) else len(x)\n return '' if len_x == 1 else 's'\n\n\ndef _explain_exception(start=-1, stop=None, prefix='> '):\n \"\"\"Explain an exception.\"\"\"\n # start=-1 means \"only the most recent caller\"\n etype, value, tb = sys.exc_info()\n string = traceback.format_list(traceback.extract_tb(tb)[start:stop])\n string = (''.join(string).split('\\n') +\n traceback.format_exception_only(etype, value))\n string = ':\\n' + prefix + ('\\n' + prefix).join(string)\n return string\n\n\ndef _get_call_line(in_verbose=False):\n \"\"\"Get the call line from within a function.\"\"\"\n # XXX Eventually we could auto-triage whether in a `verbose` decorated\n # function or not.\n # NB This probably only works for functions that are undecorated,\n # or decorated by `verbose`.\n back = 2 if not in_verbose else 4\n call_frame = inspect.getouterframes(inspect.currentframe())[back][0]\n context = inspect.getframeinfo(call_frame).code_context\n context = 'unknown' if context is None else context[0].strip()\n return context\n\n\ndef _sort_keys(x):\n \"\"\"Sort and return keys of dict.\"\"\"\n keys = list(x.keys()) # note: not thread-safe\n idx = np.argsort([str(k) for k in keys])\n keys = [keys[ii] for ii in idx]\n return keys\n\n\ndef object_hash(x, h=None):\n \"\"\"Hash a reasonable python object.\n\n Parameters\n ----------\n x : object\n Object to hash. Can be anything comprised of nested versions of:\n {dict, list, tuple, ndarray, str, bytes, float, int, None}.\n h : hashlib HASH object | None\n Optional, object to add the hash to. None creates an MD5 hash.\n\n Returns\n -------\n digest : int\n The digest resulting from the hash.\n \"\"\"\n if h is None:\n h = hashlib.md5()\n if hasattr(x, 'keys'):\n # dict-like types\n keys = _sort_keys(x)\n for key in keys:\n object_hash(key, h)\n object_hash(x[key], h)\n elif isinstance(x, bytes):\n # must come before \"str\" below\n h.update(x)\n elif isinstance(x, (string_types, float, int, type(None))):\n h.update(str(type(x)).encode('utf-8'))\n h.update(str(x).encode('utf-8'))\n elif isinstance(x, np.ndarray):\n x = np.asarray(x)\n h.update(str(x.shape).encode('utf-8'))\n h.update(str(x.dtype).encode('utf-8'))\n h.update(x.tostring())\n elif hasattr(x, '__len__'):\n # all other list-like types\n h.update(str(type(x)).encode('utf-8'))\n for xx in x:\n object_hash(xx, h)\n else:\n raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))\n return int(h.hexdigest(), 16)\n\n\ndef object_size(x):\n \"\"\"Estimate the size of a reasonable python object.\n\n Parameters\n ----------\n x : object\n Object to approximate the size of.\n Can be anything comprised of nested versions of:\n {dict, list, tuple, ndarray, str, bytes, float, int, None}.\n\n Returns\n -------\n size : int\n The estimated size in bytes of the object.\n \"\"\"\n # Note: this will not process object arrays properly (since those only)\n # hold references\n if isinstance(x, (bytes, string_types, int, float, type(None))):\n size = sys.getsizeof(x)\n elif isinstance(x, np.ndarray):\n # On newer versions of NumPy, just doing sys.getsizeof(x) works,\n # but on older ones you always get something small :(\n size = sys.getsizeof(np.array([])) + x.nbytes\n elif isinstance(x, np.generic):\n size = x.nbytes\n elif isinstance(x, dict):\n size = sys.getsizeof(x)\n for key, value in x.items():\n size += object_size(key)\n size += object_size(value)\n elif isinstance(x, (list, tuple)):\n size = sys.getsizeof(x) + sum(object_size(xx) for xx in x)\n elif sparse.isspmatrix_csc(x) or sparse.isspmatrix_csr(x):\n size = sum(sys.getsizeof(xx)\n for xx in [x, x.data, x.indices, x.indptr])\n else:\n raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))\n return size\n\n\ndef object_diff(a, b, pre=''):\n \"\"\"Compute all differences between two python variables.\n\n Parameters\n ----------\n a : object\n Currently supported: dict, list, tuple, ndarray, int, str, bytes,\n float, StringIO, BytesIO.\n b : object\n Must be same type as x1.\n pre : str\n String to prepend to each line.\n\n Returns\n -------\n diffs : str\n A string representation of the differences.\n \"\"\"\n out = ''\n if type(a) != type(b):\n out += pre + ' type mismatch (%s, %s)\\n' % (type(a), type(b))\n elif isinstance(a, dict):\n k1s = _sort_keys(a)\n k2s = _sort_keys(b)\n m1 = set(k2s) - set(k1s)\n if len(m1):\n out += pre + ' left missing keys %s\\n' % (m1)\n for key in k1s:\n if key not in k2s:\n out += pre + ' right missing key %s\\n' % key\n else:\n out += object_diff(a[key], b[key], pre + '[%s]' % repr(key))\n elif isinstance(a, (list, tuple)):\n if len(a) != len(b):\n out += pre + ' length mismatch (%s, %s)\\n' % (len(a), len(b))\n else:\n for ii, (xx1, xx2) in enumerate(zip(a, b)):\n out += object_diff(xx1, xx2, pre + '[%s]' % ii)\n elif isinstance(a, (string_types, int, float, bytes)):\n if a != b:\n out += pre + ' value mismatch (%s, %s)\\n' % (a, b)\n elif a is None:\n if b is not None:\n out += pre + ' left is None, right is not (%s)\\n' % (b)\n elif isinstance(a, np.ndarray):\n if not np.array_equal(a, b):\n out += pre + ' array mismatch\\n'\n elif isinstance(a, (StringIO, BytesIO)):\n if a.getvalue() != b.getvalue():\n out += pre + ' StringIO mismatch\\n'\n elif sparse.isspmatrix(a):\n # sparsity and sparse type of b vs a already checked above by type()\n if b.shape != a.shape:\n out += pre + (' sparse matrix a and b shape mismatch'\n '(%s vs %s)' % (a.shape, b.shape))\n else:\n c = a - b\n c.eliminate_zeros()\n if c.nnz > 0:\n out += pre + (' sparse matrix a and b differ on %s '\n 'elements' % c.nnz)\n else:\n raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))\n return out\n\n\ndef check_random_state(seed):\n \"\"\"Turn seed into a np.random.RandomState instance.\n\n If seed is None, return the RandomState singleton used by np.random.\n If seed is an int, return a new RandomState instance seeded with seed.\n If seed is already a RandomState instance, return it.\n Otherwise raise ValueError.\n \"\"\"\n if seed is None or seed is np.random:\n return np.random.mtrand._rand\n if isinstance(seed, (int, np.integer)):\n return np.random.RandomState(seed)\n if isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError('%r cannot be used to seed a numpy.random.RandomState'\n ' instance' % seed)\n\n\ndef split_list(l, n):\n \"\"\"Split list in n (approx) equal pieces.\"\"\"\n n = int(n)\n sz = len(l) // n\n for i in range(n - 1):\n yield l[i * sz:(i + 1) * sz]\n yield l[(n - 1) * sz:]\n\n\ndef create_chunks(sequence, size):\n \"\"\"Generate chunks from a sequence.\n\n Parameters\n ----------\n sequence : iterable\n Any iterable object\n size : int\n The chunksize to be returned\n \"\"\"\n return (sequence[p:p + size] for p in range(0, len(sequence), size))\n\n\ndef sum_squared(X):\n \"\"\"Compute norm of an array.\n\n Parameters\n ----------\n X : array\n Data whose norm must be found\n\n Returns\n -------\n value : float\n Sum of squares of the input array X\n \"\"\"\n X_flat = X.ravel(order='F' if np.isfortran(X) else 'C')\n return np.dot(X_flat, X_flat)\n\n\ndef warn(message, category=RuntimeWarning):\n \"\"\"Emit a warning with trace outside the mne namespace.\n\n This function takes arguments like warnings.warn, and sends messages\n using both ``warnings.warn`` and ``logger.warn``. Warnings can be\n generated deep within nested function calls. In order to provide a\n more helpful warning, this function traverses the stack until it\n reaches a frame outside the ``mne`` namespace that caused the error.\n\n Parameters\n ----------\n message : str\n Warning message.\n category : instance of Warning\n The warning class. Defaults to ``RuntimeWarning``.\n \"\"\"\n import mne\n root_dir = op.dirname(mne.__file__)\n frame = None\n if logger.level <= logging.WARN:\n last_fname = ''\n frame = inspect.currentframe()\n while frame:\n fname = frame.f_code.co_filename\n lineno = frame.f_lineno\n # in verbose dec\n if fname == '<string>' and last_fname == 'utils.py':\n last_fname = fname\n frame = frame.f_back\n continue\n # treat tests as scripts\n # and don't capture unittest/case.py (assert_raises)\n if not (fname.startswith(root_dir) or\n ('unittest' in fname and 'case' in fname)) or \\\n op.basename(op.dirname(fname)) == 'tests':\n break\n last_fname = op.basename(fname)\n frame = frame.f_back\n del frame\n # We need to use this instead of warn(message, category, stacklevel)\n # because we move out of the MNE stack, so warnings won't properly\n # recognize the module name (and our warnings.simplefilter will fail)\n warnings.warn_explicit(message, category, fname, lineno,\n 'mne', globals().get('__warningregistry__', {}))\n logger.warning(message)\n\n\ndef check_fname(fname, filetype, endings, endings_err=()):\n \"\"\"Enforce MNE filename conventions.\n\n Parameters\n ----------\n fname : str\n Name of the file.\n filetype : str\n Type of file. e.g., ICA, Epochs etc.\n endings : tuple\n Acceptable endings for the filename.\n endings_err : tuple\n Obligatory possible endings for the filename.\n \"\"\"\n if len(endings_err) > 0 and not fname.endswith(endings_err):\n print_endings = ' or '.join([', '.join(endings_err[:-1]),\n endings_err[-1]])\n raise IOError('The filename (%s) for file type %s must end with %s'\n % (fname, filetype, print_endings))\n print_endings = ' or '.join([', '.join(endings[:-1]), endings[-1]])\n if not fname.endswith(endings):\n warn('This filename (%s) does not conform to MNE naming conventions. '\n 'All %s files should end with %s'\n % (fname, filetype, print_endings))\n\n\nclass WrapStdOut(object):\n \"\"\"Dynamically wrap to sys.stdout.\n\n This makes packages that monkey-patch sys.stdout (e.g.doctest,\n sphinx-gallery) work properly.\n \"\"\"\n\n def __getattr__(self, name): # noqa: D105\n # Even more ridiculous than this class, this must be sys.stdout (not\n # just stdout) in order for this to work (tested on OSX and Linux)\n if hasattr(sys.stdout, name):\n return getattr(sys.stdout, name)\n else:\n raise AttributeError(\"'file' object has not attribute '%s'\" % name)\n\n\nclass _TempDir(str):\n \"\"\"Create and auto-destroy temp dir.\n\n This is designed to be used with testing modules. Instances should be\n defined inside test functions. Instances defined at module level can not\n guarantee proper destruction of the temporary directory.\n\n When used at module level, the current use of the __del__() method for\n cleanup can fail because the rmtree function may be cleaned up before this\n object (an alternative could be using the atexit module instead).\n \"\"\"\n\n def __new__(self): # noqa: D105\n new = str.__new__(self, tempfile.mkdtemp(prefix='tmp_mne_tempdir_'))\n return new\n\n def __init__(self): # noqa: D102\n self._path = self.__str__()\n\n def __del__(self): # noqa: D105\n rmtree(self._path, ignore_errors=True)\n\n\ndef estimate_rank(data, tol='auto', return_singular=False, norm=True):\n \"\"\"Estimate the rank of data.\n\n This function will normalize the rows of the data (typically\n channels or vertices) such that non-zero singular values\n should be close to one.\n\n Parameters\n ----------\n data : array\n Data to estimate the rank of (should be 2-dimensional).\n tol : float | str\n Tolerance for singular values to consider non-zero in\n calculating the rank. The singular values are calculated\n in this method such that independent data are expected to\n have singular value around one. Can be 'auto' to use the\n same thresholding as ``scipy.linalg.orth``.\n return_singular : bool\n If True, also return the singular values that were used\n to determine the rank.\n norm : bool\n If True, data will be scaled by their estimated row-wise norm.\n Else data are assumed to be scaled. Defaults to True.\n\n Returns\n -------\n rank : int\n Estimated rank of the data.\n s : array\n If return_singular is True, the singular values that were\n thresholded to determine the rank are also returned.\n \"\"\"\n data = data.copy() # operate on a copy\n if norm is True:\n norms = _compute_row_norms(data)\n data /= norms[:, np.newaxis]\n s = linalg.svd(data, compute_uv=False, overwrite_a=True)\n if isinstance(tol, string_types):\n if tol != 'auto':\n raise ValueError('tol must be \"auto\" or float')\n eps = np.finfo(float).eps\n tol = np.max(data.shape) * np.amax(s) * eps\n tol = float(tol)\n rank = np.sum(s > tol)\n if return_singular is True:\n return rank, s\n else:\n return rank\n\n\ndef _compute_row_norms(data):\n \"\"\"Compute scaling based on estimated norm.\"\"\"\n norms = np.sqrt(np.sum(data ** 2, axis=1))\n norms[norms == 0] = 1.0\n return norms\n\n\ndef _reject_data_segments(data, reject, flat, decim, info, tstep):\n \"\"\"Reject data segments using peak-to-peak amplitude.\"\"\"\n from .epochs import _is_good\n from .io.pick import channel_indices_by_type\n\n data_clean = np.empty_like(data)\n idx_by_type = channel_indices_by_type(info)\n step = int(ceil(tstep * info['sfreq']))\n if decim is not None:\n step = int(ceil(step / float(decim)))\n this_start = 0\n this_stop = 0\n drop_inds = []\n for first in range(0, data.shape[1], step):\n last = first + step\n data_buffer = data[:, first:last]\n if data_buffer.shape[1] < (last - first):\n break # end of the time segment\n if _is_good(data_buffer, info['ch_names'], idx_by_type, reject,\n flat, ignore_chs=info['bads']):\n this_stop = this_start + data_buffer.shape[1]\n data_clean[:, this_start:this_stop] = data_buffer\n this_start += data_buffer.shape[1]\n else:\n logger.info(\"Artifact detected in [%d, %d]\" % (first, last))\n drop_inds.append((first, last))\n data = data_clean[:, :this_stop]\n if not data.any():\n raise RuntimeError('No clean segment found. Please '\n 'consider updating your rejection '\n 'thresholds.')\n return data, drop_inds\n\n\ndef _get_inst_data(inst):\n \"\"\"Get data view from MNE object instance like Raw, Epochs or Evoked.\"\"\"\n from .io.base import BaseRaw\n from .epochs import BaseEpochs\n from . import Evoked\n from .time_frequency.tfr import _BaseTFR\n\n if isinstance(inst, (BaseRaw, BaseEpochs, Evoked, _BaseTFR)):\n if not inst.preload:\n inst.load_data()\n return inst._data\n else:\n raise TypeError('The argument must be an instance of Raw, Epochs, '\n 'Evoked, EpochsTFR or AverageTFR, got {0}.'.format(\n type(inst)))\n\n\nclass _FormatDict(dict):\n \"\"\"Help pformat() work properly.\"\"\"\n\n def __missing__(self, key):\n return \"{\" + key + \"}\"\n\n\ndef pformat(temp, **fmt):\n \"\"\"Format a template string partially.\n\n Examples\n --------\n >>> pformat(\"{a}_{b}\", a='x')\n 'x_{b}'\n \"\"\"\n formatter = Formatter()\n mapping = _FormatDict(fmt)\n return formatter.vformat(temp, (), mapping)\n\n\n###############################################################################\n# DECORATORS\n\n# Following deprecated class copied from scikit-learn\n\n# force show of DeprecationWarning even on python 2.7\nwarnings.filterwarnings('always', category=DeprecationWarning, module='mne')\n\n\nclass deprecated(object):\n \"\"\"Mark a function or class as deprecated (decorator).\n\n Issue a warning when the function is called/the class is instantiated and\n adds a warning to the docstring.\n\n The optional extra argument will be appended to the deprecation message\n and the docstring. Note: to use this with the default value for extra, put\n in an empty of parentheses::\n\n >>> from mne.utils import deprecated\n >>> deprecated() # doctest: +ELLIPSIS\n <mne.utils.deprecated object at ...>\n\n >>> @deprecated()\n ... def some_function(): pass\n\n\n Parameters\n ----------\n extra: string\n To be added to the deprecation messages.\n \"\"\"\n\n # Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,\n # but with many changes.\n\n # scikit-learn will not import on all platforms b/c it can be\n # sklearn or scikits.learn, so a self-contained example is used above\n\n def __init__(self, extra=''): # noqa: D102\n self.extra = extra\n\n def __call__(self, obj): # noqa: D105\n \"\"\"Call.\n\n Parameters\n ----------\n obj : object\n Object to call.\n \"\"\"\n if isinstance(obj, type):\n return self._decorate_class(obj)\n else:\n return self._decorate_fun(obj)\n\n def _decorate_class(self, cls):\n msg = \"Class %s is deprecated\" % cls.__name__\n if self.extra:\n msg += \"; %s\" % self.extra\n\n # FIXME: we should probably reset __new__ for full generality\n init = cls.__init__\n\n def deprecation_wrapped(*args, **kwargs):\n warnings.warn(msg, category=DeprecationWarning)\n return init(*args, **kwargs)\n cls.__init__ = deprecation_wrapped\n\n deprecation_wrapped.__name__ = '__init__'\n deprecation_wrapped.__doc__ = self._update_doc(init.__doc__)\n deprecation_wrapped.deprecated_original = init\n\n return cls\n\n def _decorate_fun(self, fun):\n \"\"\"Decorate function fun.\"\"\"\n msg = \"Function %s is deprecated\" % fun.__name__\n if self.extra:\n msg += \"; %s\" % self.extra\n\n def deprecation_wrapped(*args, **kwargs):\n warnings.warn(msg, category=DeprecationWarning)\n return fun(*args, **kwargs)\n\n deprecation_wrapped.__name__ = fun.__name__\n deprecation_wrapped.__dict__ = fun.__dict__\n deprecation_wrapped.__doc__ = self._update_doc(fun.__doc__)\n\n return deprecation_wrapped\n\n def _update_doc(self, olddoc):\n newdoc = \".. warning:: DEPRECATED\"\n if self.extra:\n newdoc = \"%s: %s\" % (newdoc, self.extra)\n if olddoc:\n newdoc = \"%s\\n\\n%s\" % (newdoc, olddoc)\n return newdoc\n\n\n@decorator\ndef verbose(function, *args, **kwargs):\n \"\"\"Verbose decorator to allow functions to override log-level.\n\n This decorator is used to set the verbose level during a function or method\n call, such as :func:`mne.compute_covariance`. The `verbose` keyword\n argument can be 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', True (an\n alias for 'INFO'), or False (an alias for 'WARNING'). To set the global\n verbosity level for all functions, use :func:`mne.set_log_level`.\n\n Parameters\n ----------\n function : function\n Function to be decorated by setting the verbosity level.\n\n Returns\n -------\n dec : function\n The decorated function\n\n Examples\n --------\n You can use the ``verbose`` argument to set the verbose level on the fly::\n >>> import mne\n >>> cov = mne.compute_raw_covariance(raw, verbose='WARNING') # doctest: +SKIP\n >>> cov = mne.compute_raw_covariance(raw, verbose='INFO') # doctest: +SKIP\n Using up to 49 segments\n Number of samples used : 5880\n [done]\n\n See Also\n --------\n set_log_level\n set_config\n \"\"\" # noqa: E501\n arg_names = _get_args(function)\n default_level = verbose_level = None\n if len(arg_names) > 0 and arg_names[0] == 'self':\n default_level = getattr(args[0], 'verbose', None)\n if 'verbose' in arg_names:\n verbose_level = args[arg_names.index('verbose')]\n elif 'verbose' in kwargs:\n verbose_level = kwargs.pop('verbose')\n\n # This ensures that object.method(verbose=None) will use object.verbose\n verbose_level = default_level if verbose_level is None else verbose_level\n\n if verbose_level is not None:\n # set it back if we get an exception\n with use_log_level(verbose_level):\n return function(*args, **kwargs)\n return function(*args, **kwargs)\n\n\nclass use_log_level(object):\n \"\"\"Context handler for logging level.\n\n Parameters\n ----------\n level : int\n The level to use.\n \"\"\"\n\n def __init__(self, level): # noqa: D102\n self.level = level\n\n def __enter__(self): # noqa: D105\n self.old_level = set_log_level(self.level, True)\n\n def __exit__(self, *args): # noqa: D105\n set_log_level(self.old_level)\n\n\n@nottest\ndef slow_test(f):\n \"\"\"Mark slow tests (decorator).\"\"\"\n f.slow_test = True\n return f\n\n\n@nottest\ndef ultra_slow_test(f):\n \"\"\"Mark ultra slow tests (decorator).\"\"\"\n f.ultra_slow_test = True\n f.slow_test = True\n return f\n\n\ndef has_nibabel(vox2ras_tkr=False):\n \"\"\"Determine if nibabel is installed.\n\n Parameters\n ----------\n vox2ras_tkr : bool\n If True, require nibabel has vox2ras_tkr support.\n\n Returns\n -------\n has : bool\n True if the user has nibabel.\n \"\"\"\n try:\n import nibabel\n out = True\n if vox2ras_tkr: # we need MGHHeader to have vox2ras_tkr param\n out = (getattr(getattr(getattr(nibabel, 'MGHImage', 0),\n 'header_class', 0),\n 'get_vox2ras_tkr', None) is not None)\n return out\n except ImportError:\n return False\n\n\ndef has_mne_c():\n \"\"\"Check for MNE-C.\"\"\"\n return 'MNE_ROOT' in os.environ\n\n\ndef has_freesurfer():\n \"\"\"Check for Freesurfer.\"\"\"\n return 'FREESURFER_HOME' in os.environ\n\n\ndef requires_nibabel(vox2ras_tkr=False):\n \"\"\"Check for nibabel.\"\"\"\n extra = ' with vox2ras_tkr support' if vox2ras_tkr else ''\n return np.testing.dec.skipif(not has_nibabel(vox2ras_tkr),\n 'Requires nibabel%s' % extra)\n\n\ndef buggy_mkl_svd(function):\n \"\"\"Decorate tests that make calls to SVD and intermittently fail.\"\"\"\n @wraps(function)\n def dec(*args, **kwargs):\n try:\n return function(*args, **kwargs)\n except np.linalg.LinAlgError as exp:\n if 'SVD did not converge' in str(exp):\n from nose.plugins.skip import SkipTest\n msg = 'Intel MKL SVD convergence error detected, skipping test'\n warn(msg)\n raise SkipTest(msg)\n raise\n return dec\n\n\ndef requires_version(library, min_version):\n \"\"\"Check for a library version.\"\"\"\n return np.testing.dec.skipif(not check_version(library, min_version),\n 'Requires %s version >= %s'\n % (library, min_version))\n\n\ndef requires_module(function, name, call=None):\n \"\"\"Skip a test if package is not available (decorator).\"\"\"\n call = ('import %s' % name) if call is None else call\n try:\n from nose.plugins.skip import SkipTest\n except ImportError:\n SkipTest = AssertionError\n\n @wraps(function)\n def dec(*args, **kwargs): # noqa: D102\n try:\n exec(call) in globals(), locals()\n except Exception as exc:\n raise SkipTest('Test %s skipped, requires %s. Got exception (%s)'\n % (function.__name__, name, exc))\n return function(*args, **kwargs)\n return dec\n\n\ndef copy_doc(source):\n \"\"\"Copy the docstring from another function (decorator).\n\n The docstring of the source function is prepepended to the docstring of the\n function wrapped by this decorator.\n\n This is useful when inheriting from a class and overloading a method. This\n decorator can be used to copy the docstring of the original method.\n\n Parameters\n ----------\n source : function\n Function to copy the docstring from\n\n Returns\n -------\n wrapper : function\n The decorated function\n\n Examples\n --------\n >>> class A:\n ... def m1():\n ... '''Docstring for m1'''\n ... pass\n >>> class B (A):\n ... @copy_doc(A.m1)\n ... def m1():\n ... ''' this gets appended'''\n ... pass\n >>> print(B.m1.__doc__)\n Docstring for m1 this gets appended\n \"\"\"\n def wrapper(func):\n if source.__doc__ is None or len(source.__doc__) == 0:\n raise ValueError('Cannot copy docstring: docstring was empty.')\n doc = source.__doc__\n if func.__doc__ is not None:\n doc += func.__doc__\n func.__doc__ = doc\n return func\n return wrapper\n\n\ndef copy_function_doc_to_method_doc(source):\n \"\"\"Use the docstring from a function as docstring for a method.\n\n The docstring of the source function is prepepended to the docstring of the\n function wrapped by this decorator. Additionally, the first parameter\n specified in the docstring of the source function is removed in the new\n docstring.\n\n This decorator is useful when implementing a method that just calls a\n function. This pattern is prevalent in for example the plotting functions\n of MNE.\n\n Parameters\n ----------\n source : function\n Function to copy the docstring from\n\n Returns\n -------\n wrapper : function\n The decorated method\n\n Examples\n --------\n >>> def plot_function(object, a, b):\n ... '''Docstring for plotting function.\n ...\n ... Parameters\n ... ----------\n ... object : instance of object\n ... The object to plot\n ... a : int\n ... Some parameter\n ... b : int\n ... Some parameter\n ... '''\n ... pass\n ...\n >>> class A:\n ... @copy_function_doc_to_method_doc(plot_function)\n ... def plot(self, a, b):\n ... '''\n ... Notes\n ... -----\n ... .. versionadded:: 0.13.0\n ... '''\n ... plot_function(self, a, b)\n >>> print(A.plot.__doc__)\n Docstring for plotting function.\n <BLANKLINE>\n Parameters\n ----------\n a : int\n Some parameter\n b : int\n Some parameter\n <BLANKLINE>\n Notes\n -----\n .. versionadded:: 0.13.0\n <BLANKLINE>\n\n Notes\n -----\n The parsing performed is very basic and will break easily on docstrings\n that are not formatted exactly according to the ``numpydoc`` standard.\n Always inspect the resulting docstring when using this decorator.\n \"\"\"\n def wrapper(func):\n doc = source.__doc__.split('\\n')\n\n # Find parameter block\n for line, text in enumerate(doc[:-2]):\n if (text.strip() == 'Parameters' and\n doc[line + 1].strip() == '----------'):\n parameter_block = line\n break\n else:\n # No parameter block found\n raise ValueError('Cannot copy function docstring: no parameter '\n 'block found. To simply copy the docstring, use '\n 'the @copy_doc decorator instead.')\n\n # Find first parameter\n for line, text in enumerate(doc[parameter_block:], parameter_block):\n if ':' in text:\n first_parameter = line\n parameter_indentation = len(text) - len(text.lstrip(' '))\n break\n else:\n raise ValueError('Cannot copy function docstring: no parameters '\n 'found. To simply copy the docstring, use the '\n '@copy_doc decorator instead.')\n\n # Find end of first parameter\n for line, text in enumerate(doc[first_parameter + 1:],\n first_parameter + 1):\n # Ignore empty lines\n if len(text.strip()) == 0:\n continue\n\n line_indentation = len(text) - len(text.lstrip(' '))\n if line_indentation <= parameter_indentation:\n # Reach end of first parameter\n first_parameter_end = line\n\n # Of only one parameter is defined, remove the Parameters\n # heading as well\n if ':' not in text:\n first_parameter = parameter_block\n\n break\n else:\n # End of docstring reached\n first_parameter_end = line\n first_parameter = parameter_block\n\n # Copy the docstring, but remove the first parameter\n doc = ('\\n'.join(doc[:first_parameter]) + '\\n' +\n '\\n'.join(doc[first_parameter_end:]))\n if func.__doc__ is not None:\n doc += func.__doc__\n func.__doc__ = doc\n return func\n return wrapper\n\n\n_pandas_call = \"\"\"\nimport pandas\nversion = LooseVersion(pandas.__version__)\nif version < '0.8.0':\n raise ImportError\n\"\"\"\n\n_sklearn_call = \"\"\"\nrequired_version = '0.14'\nimport sklearn\nversion = LooseVersion(sklearn.__version__)\nif version < required_version:\n raise ImportError\n\"\"\"\n\n_sklearn_0_15_call = \"\"\"\nrequired_version = '0.15'\nimport sklearn\nversion = LooseVersion(sklearn.__version__)\nif version < required_version:\n raise ImportError\n\"\"\"\n\n_mayavi_call = \"\"\"\nwith warnings.catch_warnings(record=True): # traits\n from mayavi import mlab\nmlab.options.backend = 'test'\n\"\"\"\n\n_mne_call = \"\"\"\nif not has_mne_c():\n raise ImportError\n\"\"\"\n\n_fs_call = \"\"\"\nif not has_freesurfer():\n raise ImportError\n\"\"\"\n\n_n2ft_call = \"\"\"\nif 'NEUROMAG2FT_ROOT' not in os.environ:\n raise ImportError\n\"\"\"\n\n_fs_or_ni_call = \"\"\"\nif not has_nibabel() and not has_freesurfer():\n raise ImportError\n\"\"\"\n\nrequires_pandas = partial(requires_module, name='pandas', call=_pandas_call)\nrequires_sklearn = partial(requires_module, name='sklearn', call=_sklearn_call)\nrequires_sklearn_0_15 = partial(requires_module, name='sklearn',\n call=_sklearn_0_15_call)\nrequires_mayavi = partial(requires_module, name='mayavi', call=_mayavi_call)\nrequires_mne = partial(requires_module, name='MNE-C', call=_mne_call)\nrequires_freesurfer = partial(requires_module, name='Freesurfer',\n call=_fs_call)\nrequires_neuromag2ft = partial(requires_module, name='neuromag2ft',\n call=_n2ft_call)\nrequires_fs_or_nibabel = partial(requires_module, name='nibabel or Freesurfer',\n call=_fs_or_ni_call)\n\nrequires_tvtk = partial(requires_module, name='TVTK',\n call='from tvtk.api import tvtk')\nrequires_statsmodels = partial(requires_module, name='statsmodels')\nrequires_pysurfer = partial(requires_module, name='PySurfer',\n call=\"\"\"import warnings\nwith warnings.catch_warnings(record=True):\n from surfer import Brain\"\"\")\nrequires_PIL = partial(requires_module, name='PIL',\n call='from PIL import Image')\nrequires_good_network = partial(\n requires_module, name='good network connection',\n call='if int(os.environ.get(\"MNE_SKIP_NETWORK_TESTS\", 0)):\\n'\n ' raise ImportError')\nrequires_ftp = partial(\n requires_module, name='ftp downloading capability',\n call='if int(os.environ.get(\"MNE_SKIP_FTP_TESTS\", 0)):\\n'\n ' raise ImportError')\nrequires_nitime = partial(requires_module, name='nitime')\nrequires_h5py = partial(requires_module, name='h5py')\nrequires_numpydoc = partial(requires_module, name='numpydoc')\n\n\ndef check_version(library, min_version):\n r\"\"\"Check minimum library version required.\n\n Parameters\n ----------\n library : str\n The library name to import. Must have a ``__version__`` property.\n min_version : str\n The minimum version string. Anything that matches\n ``'(\\d+ | [a-z]+ | \\.)'``. Can also be empty to skip version\n check (just check for library presence).\n\n Returns\n -------\n ok : bool\n True if the library exists with at least the specified version.\n \"\"\"\n ok = True\n try:\n library = __import__(library)\n except ImportError:\n ok = False\n else:\n if min_version:\n this_version = LooseVersion(library.__version__)\n if this_version < min_version:\n ok = False\n return ok\n\n\ndef _check_mayavi_version(min_version='4.3.0'):\n \"\"\"Check mayavi version.\"\"\"\n if not check_version('mayavi', min_version):\n raise RuntimeError(\"Need mayavi >= %s\" % min_version)\n\n\ndef _check_pyface_backend():\n \"\"\"Check the currently selected Pyface backend.\n\n Returns\n -------\n backend : str\n Name of the backend.\n result : 0 | 1 | 2\n 0: the backend has been tested and works.\n 1: the backend has not been tested.\n 2: the backend not been tested.\n\n Notes\n -----\n See also http://docs.enthought.com/pyface/.\n \"\"\"\n try:\n from traits.trait_base import ETSConfig\n except ImportError:\n return None, 2\n\n backend = ETSConfig.toolkit\n if backend == 'qt4':\n status = 0\n else:\n status = 1\n return backend, status\n\n\ndef _import_mlab():\n \"\"\"Quietly import mlab.\"\"\"\n with warnings.catch_warnings(record=True):\n from mayavi import mlab\n return mlab\n\n\n@verbose\ndef run_subprocess(command, verbose=None, *args, **kwargs):\n \"\"\"Run command using subprocess.Popen.\n\n Run command and wait for command to complete. If the return code was zero\n then return, otherwise raise CalledProcessError.\n By default, this will also add stdout= and stderr=subproces.PIPE\n to the call to Popen to suppress printing to the terminal.\n\n Parameters\n ----------\n command : list of str | str\n Command to run as subprocess (see subprocess.Popen documentation).\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more). Defaults to\n self.verbose.\n *args, **kwargs : arguments\n Additional arguments to pass to subprocess.Popen.\n\n Returns\n -------\n stdout : str\n Stdout returned by the process.\n stderr : str\n Stderr returned by the process.\n \"\"\"\n for stdxxx, sys_stdxxx in (['stderr', sys.stderr],\n ['stdout', sys.stdout]):\n if stdxxx not in kwargs:\n kwargs[stdxxx] = subprocess.PIPE\n elif kwargs[stdxxx] is sys_stdxxx:\n if isinstance(sys_stdxxx, StringIO):\n # nose monkey patches sys.stderr and sys.stdout to StringIO\n kwargs[stdxxx] = subprocess.PIPE\n else:\n kwargs[stdxxx] = sys_stdxxx\n\n # Check the PATH environment variable. If run_subprocess() is to be called\n # frequently this should be refactored so as to only check the path once.\n env = kwargs.get('env', os.environ)\n if any(p.startswith('~') for p in env['PATH'].split(os.pathsep)):\n warn('Your PATH environment variable contains at least one path '\n 'starting with a tilde (\"~\") character. Such paths are not '\n 'interpreted correctly from within Python. It is recommended '\n 'that you use \"$HOME\" instead of \"~\".')\n if isinstance(command, string_types):\n command_str = command\n else:\n command_str = ' '.join(command)\n logger.info(\"Running subprocess: %s\" % command_str)\n try:\n p = subprocess.Popen(command, *args, **kwargs)\n except Exception:\n if isinstance(command, string_types):\n command_name = command.split()[0]\n else:\n command_name = command[0]\n logger.error('Command not found: %s' % command_name)\n raise\n stdout_, stderr = p.communicate()\n stdout_ = '' if stdout_ is None else stdout_.decode('utf-8')\n stderr = '' if stderr is None else stderr.decode('utf-8')\n\n if stdout_.strip():\n logger.info(\"stdout:\\n%s\" % stdout_)\n if stderr.strip():\n logger.info(\"stderr:\\n%s\" % stderr)\n\n output = (stdout_, stderr)\n if p.returncode:\n print(output)\n err_fun = subprocess.CalledProcessError.__init__\n if 'output' in _get_args(err_fun):\n raise subprocess.CalledProcessError(p.returncode, command, output)\n else:\n raise subprocess.CalledProcessError(p.returncode, command)\n\n return output\n\n\n###############################################################################\n# LOGGING\n\ndef set_log_level(verbose=None, return_old_level=False):\n \"\"\"Set the logging level.\n\n Parameters\n ----------\n verbose : bool, str, int, or None\n The verbosity of messages to print. If a str, it can be either DEBUG,\n INFO, WARNING, ERROR, or CRITICAL. Note that these are for\n convenience and are equivalent to passing in logging.DEBUG, etc.\n For bool, True is the same as 'INFO', False is the same as 'WARNING'.\n If None, the environment variable MNE_LOGGING_LEVEL is read, and if\n it doesn't exist, defaults to INFO.\n return_old_level : bool\n If True, return the old verbosity level.\n \"\"\"\n if verbose is None:\n verbose = get_config('MNE_LOGGING_LEVEL', 'INFO')\n elif isinstance(verbose, bool):\n if verbose is True:\n verbose = 'INFO'\n else:\n verbose = 'WARNING'\n if isinstance(verbose, string_types):\n verbose = verbose.upper()\n logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,\n WARNING=logging.WARNING, ERROR=logging.ERROR,\n CRITICAL=logging.CRITICAL)\n if verbose not in logging_types:\n raise ValueError('verbose must be of a valid type')\n verbose = logging_types[verbose]\n logger = logging.getLogger('mne')\n old_verbose = logger.level\n logger.setLevel(verbose)\n return (old_verbose if return_old_level else None)\n\n\ndef set_log_file(fname=None, output_format='%(message)s', overwrite=None):\n \"\"\"Set the log to print to a file.\n\n Parameters\n ----------\n fname : str, or None\n Filename of the log to print to. If None, stdout is used.\n To suppress log outputs, use set_log_level('WARN').\n output_format : str\n Format of the output messages. See the following for examples:\n\n https://docs.python.org/dev/howto/logging.html\n\n e.g., \"%(asctime)s - %(levelname)s - %(message)s\".\n overwrite : bool | None\n Overwrite the log file (if it exists). Otherwise, statements\n will be appended to the log (default). None is the same as False,\n but additionally raises a warning to notify the user that log\n entries will be appended.\n \"\"\"\n logger = logging.getLogger('mne')\n handlers = logger.handlers\n for h in handlers:\n # only remove our handlers (get along nicely with nose)\n if isinstance(h, (logging.FileHandler, logging.StreamHandler)):\n if isinstance(h, logging.FileHandler):\n h.close()\n logger.removeHandler(h)\n if fname is not None:\n if op.isfile(fname) and overwrite is None:\n # Don't use warn() here because we just want to\n # emit a warnings.warn here (not logger.warn)\n warnings.warn('Log entries will be appended to the file. Use '\n 'overwrite=False to avoid this message in the '\n 'future.', RuntimeWarning, stacklevel=2)\n overwrite = False\n mode = 'w' if overwrite else 'a'\n lh = logging.FileHandler(fname, mode=mode)\n else:\n \"\"\" we should just be able to do:\n lh = logging.StreamHandler(sys.stdout)\n but because doctests uses some magic on stdout, we have to do this:\n \"\"\"\n lh = logging.StreamHandler(WrapStdOut())\n\n lh.setFormatter(logging.Formatter(output_format))\n # actually add the stream handler\n logger.addHandler(lh)\n\n\nclass catch_logging(object):\n \"\"\"Store logging.\n\n This will remove all other logging handlers, and return the handler to\n stdout when complete.\n \"\"\"\n\n def __enter__(self): # noqa: D105\n self._data = StringIO()\n self._lh = logging.StreamHandler(self._data)\n self._lh.setFormatter(logging.Formatter('%(message)s'))\n for lh in logger.handlers:\n logger.removeHandler(lh)\n logger.addHandler(self._lh)\n return self._data\n\n def __exit__(self, *args): # noqa: D105\n logger.removeHandler(self._lh)\n set_log_file(None)\n\n\n###############################################################################\n# CONFIG / PREFS\n\ndef get_subjects_dir(subjects_dir=None, raise_error=False):\n \"\"\"Safely use subjects_dir input to return SUBJECTS_DIR.\n\n Parameters\n ----------\n subjects_dir : str | None\n If a value is provided, return subjects_dir. Otherwise, look for\n SUBJECTS_DIR config and return the result.\n raise_error : bool\n If True, raise a KeyError if no value for SUBJECTS_DIR can be found\n (instead of returning None).\n\n Returns\n -------\n value : str | None\n The SUBJECTS_DIR value.\n \"\"\"\n if subjects_dir is None:\n subjects_dir = get_config('SUBJECTS_DIR', raise_error=raise_error)\n return subjects_dir\n\n\n_temp_home_dir = None\n\n\ndef _get_extra_data_path(home_dir=None):\n \"\"\"Get path to extra data (config, tables, etc.).\"\"\"\n global _temp_home_dir\n if home_dir is None:\n home_dir = os.environ.get('_MNE_FAKE_HOME_DIR')\n if home_dir is None:\n # this has been checked on OSX64, Linux64, and Win32\n if 'nt' == os.name.lower():\n home_dir = os.getenv('APPDATA')\n else:\n # This is a more robust way of getting the user's home folder on\n # Linux platforms (not sure about OSX, Unix or BSD) than checking\n # the HOME environment variable. If the user is running some sort\n # of script that isn't launched via the command line (e.g. a script\n # launched via Upstart) then the HOME environment variable will\n # not be set.\n if os.getenv('MNE_DONTWRITE_HOME', '') == 'true':\n if _temp_home_dir is None:\n _temp_home_dir = tempfile.mkdtemp()\n atexit.register(partial(shutil.rmtree, _temp_home_dir,\n ignore_errors=True))\n home_dir = _temp_home_dir\n else:\n home_dir = os.path.expanduser('~')\n\n if home_dir is None:\n raise ValueError('mne-python config file path could '\n 'not be determined, please report this '\n 'error to mne-python developers')\n\n return op.join(home_dir, '.mne')\n\n\ndef get_config_path(home_dir=None):\n r\"\"\"Get path to standard mne-python config file.\n\n Parameters\n ----------\n home_dir : str | None\n The folder that contains the .mne config folder.\n If None, it is found automatically.\n\n Returns\n -------\n config_path : str\n The path to the mne-python configuration file. On windows, this\n will be '%APPDATA%\\.mne\\mne-python.json'. On every other\n system, this will be ~/.mne/mne-python.json.\n \"\"\"\n val = op.join(_get_extra_data_path(home_dir=home_dir),\n 'mne-python.json')\n return val\n\n\ndef set_cache_dir(cache_dir):\n \"\"\"Set the directory to be used for temporary file storage.\n\n This directory is used by joblib to store memmapped arrays,\n which reduces memory requirements and speeds up parallel\n computation.\n\n Parameters\n ----------\n cache_dir: str or None\n Directory to use for temporary file storage. None disables\n temporary file storage.\n \"\"\"\n if cache_dir is not None and not op.exists(cache_dir):\n raise IOError('Directory %s does not exist' % cache_dir)\n\n set_config('MNE_CACHE_DIR', cache_dir, set_env=False)\n\n\ndef set_memmap_min_size(memmap_min_size):\n \"\"\"Set the minimum size for memmaping of arrays for parallel processing.\n\n Parameters\n ----------\n memmap_min_size: str or None\n Threshold on the minimum size of arrays that triggers automated memory\n mapping for parallel processing, e.g., '1M' for 1 megabyte.\n Use None to disable memmaping of large arrays.\n \"\"\"\n if memmap_min_size is not None:\n if not isinstance(memmap_min_size, string_types):\n raise ValueError('\\'memmap_min_size\\' has to be a string.')\n if memmap_min_size[-1] not in ['K', 'M', 'G']:\n raise ValueError('The size has to be given in kilo-, mega-, or '\n 'gigabytes, e.g., 100K, 500M, 1G.')\n\n set_config('MNE_MEMMAP_MIN_SIZE', memmap_min_size, set_env=False)\n\n\n# List the known configuration values\nknown_config_types = (\n 'MNE_BROWSE_RAW_SIZE',\n 'MNE_CACHE_DIR',\n 'MNE_COREG_COPY_ANNOT',\n 'MNE_COREG_GUESS_MRI_SUBJECT',\n 'MNE_COREG_HEAD_HIGH_RES',\n 'MNE_COREG_HEAD_OPACITY',\n 'MNE_COREG_PREPARE_BEM',\n 'MNE_COREG_SCALE_LABELS',\n 'MNE_COREG_SCENE_HEIGHT',\n 'MNE_COREG_SCENE_WIDTH',\n 'MNE_COREG_SUBJECTS_DIR',\n 'MNE_CUDA_IGNORE_PRECISION',\n 'MNE_DATA',\n 'MNE_DATASETS_BRAINSTORM_PATH',\n 'MNE_DATASETS_EEGBCI_PATH',\n 'MNE_DATASETS_MEGSIM_PATH',\n 'MNE_DATASETS_MISC_PATH',\n 'MNE_DATASETS_MTRF_PATH',\n 'MNE_DATASETS_SAMPLE_PATH',\n 'MNE_DATASETS_SOMATO_PATH',\n 'MNE_DATASETS_MULTIMODAL_PATH',\n 'MNE_DATASETS_SPM_FACE_DATASETS_TESTS',\n 'MNE_DATASETS_SPM_FACE_PATH',\n 'MNE_DATASETS_TESTING_PATH',\n 'MNE_DATASETS_VISUAL_92_CATEGORIES_PATH',\n 'MNE_DATASETS_FIELDTRIP_CMC_PATH',\n 'MNE_FORCE_SERIAL',\n 'MNE_KIT2FIFF_STIM_CHANNELS',\n 'MNE_KIT2FIFF_STIM_CHANNEL_CODING',\n 'MNE_KIT2FIFF_STIM_CHANNEL_SLOPE',\n 'MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD',\n 'MNE_LOGGING_LEVEL',\n 'MNE_MEMMAP_MIN_SIZE',\n 'MNE_SKIP_FTP_TESTS',\n 'MNE_SKIP_NETWORK_TESTS',\n 'MNE_SKIP_TESTING_DATASET_TESTS',\n 'MNE_STIM_CHANNEL',\n 'MNE_USE_CUDA',\n 'MNE_SKIP_FS_FLASH_CALL',\n 'SUBJECTS_DIR',\n)\n\n# These allow for partial matches, e.g. 'MNE_STIM_CHANNEL_1' is okay key\nknown_config_wildcards = (\n 'MNE_STIM_CHANNEL',\n)\n\n\ndef _load_config(config_path, raise_error=False):\n \"\"\"Safely load a config file.\"\"\"\n with open(config_path, 'r') as fid:\n try:\n config = json.load(fid)\n except ValueError:\n # No JSON object could be decoded --> corrupt file?\n msg = ('The MNE-Python config file (%s) is not a valid JSON '\n 'file and might be corrupted' % config_path)\n if raise_error:\n raise RuntimeError(msg)\n warn(msg)\n config = dict()\n return config\n\n\ndef get_config(key=None, default=None, raise_error=False, home_dir=None):\n \"\"\"Read MNE-Python preferences from environment or config file.\n\n Parameters\n ----------\n key : None | str\n The preference key to look for. The os evironment is searched first,\n then the mne-python config file is parsed.\n If None, all the config parameters present in environment variables or\n the path are returned.\n default : str | None\n Value to return if the key is not found.\n raise_error : bool\n If True, raise an error if the key is not found (instead of returning\n default).\n home_dir : str | None\n The folder that contains the .mne config folder.\n If None, it is found automatically.\n\n Returns\n -------\n value : dict | str | None\n The preference key value.\n\n See Also\n --------\n set_config\n \"\"\"\n if key is not None and not isinstance(key, string_types):\n raise TypeError('key must be a string')\n\n # first, check to see if key is in env\n if key is not None and key in os.environ:\n return os.environ[key]\n\n # second, look for it in mne-python config file\n config_path = get_config_path(home_dir=home_dir)\n if not op.isfile(config_path):\n config = {}\n else:\n config = _load_config(config_path)\n\n if key is None:\n # update config with environment variables\n env_keys = (set(config).union(known_config_types).\n intersection(os.environ))\n config.update({key: os.environ[key] for key in env_keys})\n return config\n elif raise_error is True and key not in config:\n meth_1 = 'os.environ[\"%s\"] = VALUE' % key\n meth_2 = 'mne.utils.set_config(\"%s\", VALUE, set_env=True)' % key\n raise KeyError('Key \"%s\" not found in environment or in the '\n 'mne-python config file: %s '\n 'Try either:'\n ' %s for a temporary solution, or:'\n ' %s for a permanent one. You can also '\n 'set the environment variable before '\n 'running python.'\n % (key, config_path, meth_1, meth_2))\n else:\n return config.get(key, default)\n\n\ndef set_config(key, value, home_dir=None, set_env=True):\n \"\"\"Set a MNE-Python preference key in the config file and environment.\n\n Parameters\n ----------\n key : str | None\n The preference key to set. If None, a tuple of the valid\n keys is returned, and ``value`` and ``home_dir`` are ignored.\n value : str | None\n The value to assign to the preference key. If None, the key is\n deleted.\n home_dir : str | None\n The folder that contains the .mne config folder.\n If None, it is found automatically.\n set_env : bool\n If True (default), update :data:`os.environ` in addition to\n updating the MNE-Python config file.\n\n See Also\n --------\n get_config\n \"\"\"\n if key is None:\n return known_config_types\n if not isinstance(key, string_types):\n raise TypeError('key must be a string')\n # While JSON allow non-string types, we allow users to override config\n # settings using env, which are strings, so we enforce that here\n if not isinstance(value, string_types) and value is not None:\n raise TypeError('value must be a string or None')\n if key not in known_config_types and not \\\n any(k in key for k in known_config_wildcards):\n warn('Setting non-standard config type: \"%s\"' % key)\n\n # Read all previous values\n config_path = get_config_path(home_dir=home_dir)\n if op.isfile(config_path):\n config = _load_config(config_path, raise_error=True)\n else:\n config = dict()\n logger.info('Attempting to create new mne-python configuration '\n 'file:\\n%s' % config_path)\n if value is None:\n config.pop(key, None)\n if set_env and key in os.environ:\n del os.environ[key]\n else:\n config[key] = value\n if set_env:\n os.environ[key] = value\n\n # Write all values. This may fail if the default directory is not\n # writeable.\n directory = op.dirname(config_path)\n if not op.isdir(directory):\n os.mkdir(directory)\n with open(config_path, 'w') as fid:\n json.dump(config, fid, sort_keys=True, indent=0)\n\n\nclass ProgressBar(object):\n \"\"\"Generate a command-line progressbar.\n\n Parameters\n ----------\n max_value : int | iterable\n Maximum value of process (e.g. number of samples to process, bytes to\n download, etc.). If an iterable is given, then `max_value` will be set\n to the length of this iterable.\n initial_value : int\n Initial value of process, useful when resuming process from a specific\n value, defaults to 0.\n mesg : str\n Message to include at end of progress bar.\n max_chars : int\n Number of characters to use for progress bar (be sure to save some room\n for the message and % complete as well).\n progress_character : char\n Character in the progress bar that indicates the portion completed.\n spinner : bool\n Show a spinner. Useful for long-running processes that may not\n increment the progress bar very often. This provides the user with\n feedback that the progress has not stalled.\n\n Example\n -------\n >>> progress = ProgressBar(13000)\n >>> progress.update(3000) # doctest: +SKIP\n [......... ] 23.07692 |\n >>> progress.update(6000) # doctest: +SKIP\n [.................. ] 46.15385 |\n\n >>> progress = ProgressBar(13000, spinner=True)\n >>> progress.update(3000) # doctest: +SKIP\n [......... ] 23.07692 |\n >>> progress.update(6000) # doctest: +SKIP\n [.................. ] 46.15385 /\n \"\"\"\n\n spinner_symbols = ['|', '/', '-', '\\\\']\n template = '\\r[{0}{1}] {2:.05f} {3} {4} '\n\n def __init__(self, max_value, initial_value=0, mesg='', max_chars=40,\n progress_character='.', spinner=False,\n verbose_bool=True): # noqa: D102\n self.cur_value = initial_value\n if isinstance(max_value, (float, int)):\n self.max_value = max_value\n self.iterable = None\n else:\n # input is an iterable\n self.max_value = len(max_value)\n self.iterable = max_value\n self.mesg = mesg\n self.max_chars = max_chars\n self.progress_character = progress_character\n self.spinner = spinner\n self.spinner_index = 0\n self.n_spinner = len(self.spinner_symbols)\n self._do_print = verbose_bool\n\n def update(self, cur_value, mesg=None):\n \"\"\"Update progressbar with current value of process.\n\n Parameters\n ----------\n cur_value : number\n Current value of process. Should be <= max_value (but this is not\n enforced). The percent of the progressbar will be computed as\n (cur_value / max_value) * 100\n mesg : str\n Message to display to the right of the progressbar. If None, the\n last message provided will be used. To clear the current message,\n pass a null string, ''.\n \"\"\"\n # Ensure floating-point division so we can get fractions of a percent\n # for the progressbar.\n self.cur_value = cur_value\n progress = min(float(self.cur_value) / self.max_value, 1.)\n num_chars = int(progress * self.max_chars)\n num_left = self.max_chars - num_chars\n\n # Update the message\n if mesg is not None:\n if mesg == 'file_sizes':\n mesg = '(%s / %s)' % (sizeof_fmt(self.cur_value),\n sizeof_fmt(self.max_value))\n self.mesg = mesg\n\n # The \\r tells the cursor to return to the beginning of the line rather\n # than starting a new line. This allows us to have a progressbar-style\n # display in the console window.\n bar = self.template.format(self.progress_character * num_chars,\n ' ' * num_left,\n progress * 100,\n self.spinner_symbols[self.spinner_index],\n self.mesg)\n # Force a flush because sometimes when using bash scripts and pipes,\n # the output is not printed until after the program exits.\n if self._do_print:\n sys.stdout.write(bar)\n sys.stdout.flush()\n # Increament the spinner\n if self.spinner:\n self.spinner_index = (self.spinner_index + 1) % self.n_spinner\n\n def update_with_increment_value(self, increment_value, mesg=None):\n \"\"\"Update progressbar with an increment.\n\n Parameters\n ----------\n increment_value : int\n Value of the increment of process. The percent of the progressbar\n will be computed as\n (self.cur_value + increment_value / max_value) * 100\n mesg : str\n Message to display to the right of the progressbar. If None, the\n last message provided will be used. To clear the current message,\n pass a null string, ''.\n \"\"\"\n self.cur_value += increment_value\n self.update(self.cur_value, mesg)\n\n def __iter__(self):\n \"\"\"Iterate to auto-increment the pbar with 1.\"\"\"\n if self.iterable is None:\n raise ValueError(\"Must give an iterable to be used in a loop.\")\n for obj in self.iterable:\n yield obj\n self.update_with_increment_value(1)\n\n\ndef _get_ftp(url, temp_file_name, initial_size, file_size, verbose_bool):\n \"\"\"Safely (resume a) download to a file from FTP.\"\"\"\n # Adapted from: https://pypi.python.org/pypi/fileDownloader.py\n # but with changes\n\n parsed_url = urllib.parse.urlparse(url)\n file_name = os.path.basename(parsed_url.path)\n server_path = parsed_url.path.replace(file_name, \"\")\n unquoted_server_path = urllib.parse.unquote(server_path)\n\n data = ftplib.FTP()\n if parsed_url.port is not None:\n data.connect(parsed_url.hostname, parsed_url.port)\n else:\n data.connect(parsed_url.hostname)\n data.login()\n if len(server_path) > 1:\n data.cwd(unquoted_server_path)\n data.sendcmd(\"TYPE I\")\n data.sendcmd(\"REST \" + str(initial_size))\n down_cmd = \"RETR \" + file_name\n assert file_size == data.size(file_name)\n progress = ProgressBar(file_size, initial_value=initial_size,\n max_chars=40, spinner=True, mesg='file_sizes',\n verbose_bool=verbose_bool)\n\n # Callback lambda function that will be passed the downloaded data\n # chunk and will write it to file and update the progress bar\n mode = 'ab' if initial_size > 0 else 'wb'\n with open(temp_file_name, mode) as local_file:\n def chunk_write(chunk):\n return _chunk_write(chunk, local_file, progress)\n data.retrbinary(down_cmd, chunk_write)\n data.close()\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef _get_http(url, temp_file_name, initial_size, file_size, verbose_bool):\n \"\"\"Safely (resume a) download to a file from http(s).\"\"\"\n # Actually do the reading\n req = urllib.request.Request(url)\n if initial_size > 0:\n req.headers['Range'] = 'bytes=%s-' % (initial_size,)\n try:\n response = urllib.request.urlopen(req)\n except Exception:\n # There is a problem that may be due to resuming, some\n # servers may not support the \"Range\" header. Switch\n # back to complete download method\n logger.info('Resuming download failed (server '\n 'rejected the request). Attempting to '\n 'restart downloading the entire file.')\n del req.headers['Range']\n response = urllib.request.urlopen(req)\n total_size = int(response.headers.get('Content-Length', '1').strip())\n if initial_size > 0 and file_size == total_size:\n logger.info('Resuming download failed (resume file size '\n 'mismatch). Attempting to restart downloading the '\n 'entire file.')\n initial_size = 0\n total_size += initial_size\n if total_size != file_size:\n raise RuntimeError('URL could not be parsed properly')\n mode = 'ab' if initial_size > 0 else 'wb'\n progress = ProgressBar(total_size, initial_value=initial_size,\n max_chars=40, spinner=True, mesg='file_sizes',\n verbose_bool=verbose_bool)\n chunk_size = 8192 # 2 ** 13\n with open(temp_file_name, mode) as local_file:\n while True:\n t0 = time.time()\n chunk = response.read(chunk_size)\n dt = time.time() - t0\n if dt < 0.005:\n chunk_size *= 2\n elif dt > 0.1 and chunk_size > 8192:\n chunk_size = chunk_size // 2\n if not chunk:\n if verbose_bool:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n break\n local_file.write(chunk)\n progress.update_with_increment_value(len(chunk),\n mesg='file_sizes')\n\n\ndef _chunk_write(chunk, local_file, progress):\n \"\"\"Write a chunk to file and update the progress bar.\"\"\"\n local_file.write(chunk)\n progress.update_with_increment_value(len(chunk))\n\n\n@verbose\ndef _fetch_file(url, file_name, print_destination=True, resume=True,\n hash_=None, timeout=10., verbose=None):\n \"\"\"Load requested file, downloading it if needed or requested.\n\n Parameters\n ----------\n url: string\n The url of file to be downloaded.\n file_name: string\n Name, along with the path, of where downloaded file will be saved.\n print_destination: bool, optional\n If true, destination of where file was saved will be printed after\n download finishes.\n resume: bool, optional\n If true, try to resume partially downloaded files.\n hash_ : str | None\n The hash of the file to check. If None, no checking is\n performed.\n timeout : float\n The URL open timeout.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n \"\"\"\n # Adapted from NISL:\n # https://github.com/nisl/tutorial/blob/master/nisl/datasets.py\n if hash_ is not None and (not isinstance(hash_, string_types) or\n len(hash_) != 32):\n raise ValueError('Bad hash value given, should be a 32-character '\n 'string:\\n%s' % (hash_,))\n temp_file_name = file_name + \".part\"\n verbose_bool = (logger.level <= 20) # 20 is info\n try:\n # Check file size and displaying it alongside the download url\n u = urllib.request.urlopen(url, timeout=timeout)\n u.close()\n # this is necessary to follow any redirects\n url = u.geturl()\n u = urllib.request.urlopen(url, timeout=timeout)\n try:\n file_size = int(u.headers.get('Content-Length', '1').strip())\n finally:\n u.close()\n del u\n logger.info('Downloading data from %s (%s)\\n'\n % (url, sizeof_fmt(file_size)))\n\n # Triage resume\n if not os.path.exists(temp_file_name):\n resume = False\n if resume:\n with open(temp_file_name, 'rb', buffering=0) as local_file:\n local_file.seek(0, 2)\n initial_size = local_file.tell()\n del local_file\n else:\n initial_size = 0\n # This should never happen if our functions work properly\n if initial_size > file_size:\n raise RuntimeError('Local file (%s) is larger than remote '\n 'file (%s), cannot resume download'\n % (sizeof_fmt(initial_size),\n sizeof_fmt(file_size)))\n\n scheme = urllib.parse.urlparse(url).scheme\n fun = _get_http if scheme in ('http', 'https') else _get_ftp\n fun(url, temp_file_name, initial_size, file_size, verbose_bool)\n\n # check md5sum\n if hash_ is not None:\n logger.info('Verifying download hash.')\n md5 = md5sum(temp_file_name)\n if hash_ != md5:\n raise RuntimeError('Hash mismatch for downloaded file %s, '\n 'expected %s but got %s'\n % (temp_file_name, hash_, md5))\n shutil.move(temp_file_name, file_name)\n if print_destination is True:\n logger.info('File saved as %s.\\n' % file_name)\n except Exception:\n logger.error('Error while fetching file %s.'\n ' Dataset fetching aborted.' % url)\n raise\n\n\ndef sizeof_fmt(num):\n \"\"\"Turn number of bytes into human-readable str.\n\n Parameters\n ----------\n num : int\n The number of bytes.\n\n Returns\n -------\n size : str\n The size in human-readable format.\n \"\"\"\n units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB']\n decimals = [0, 0, 1, 2, 2, 2]\n if num > 1:\n exponent = min(int(log(num, 1024)), len(units) - 1)\n quotient = float(num) / 1024 ** exponent\n unit = units[exponent]\n num_decimals = decimals[exponent]\n format_string = '{0:.%sf} {1}' % (num_decimals)\n return format_string.format(quotient, unit)\n if num == 0:\n return '0 bytes'\n if num == 1:\n return '1 byte'\n\n\nclass SizeMixin(object):\n \"\"\"Estimate MNE object sizes.\"\"\"\n\n @property\n def _size(self):\n \"\"\"Estimate the object size.\"\"\"\n try:\n size = object_size(self.info)\n except Exception:\n warn('Could not get size for self.info')\n return -1\n if hasattr(self, 'data'):\n size += object_size(self.data)\n elif hasattr(self, '_data'):\n size += object_size(self._data)\n return size\n\n def __hash__(self):\n \"\"\"Hash the object.\n\n Returns\n -------\n hash : int\n The hash\n \"\"\"\n from .evoked import Evoked\n from .epochs import BaseEpochs\n from .io.base import BaseRaw\n if isinstance(self, Evoked):\n return object_hash(dict(info=self.info, data=self.data))\n elif isinstance(self, (BaseEpochs, BaseRaw)):\n if not self.preload:\n raise RuntimeError('Cannot hash %s unless data are loaded'\n % self.__class__.__name__)\n return object_hash(dict(info=self.info, data=self._data))\n else:\n raise RuntimeError('Hashing unknown object type: %s' % type(self))\n\n\ndef _url_to_local_path(url, path):\n \"\"\"Mirror a url path in a local destination (keeping folder structure).\"\"\"\n destination = urllib.parse.urlparse(url).path\n # First char should be '/', and it needs to be discarded\n if len(destination) < 2 or destination[0] != '/':\n raise ValueError('Invalid URL')\n destination = os.path.join(path,\n urllib.request.url2pathname(destination)[1:])\n return destination\n\n\ndef _get_stim_channel(stim_channel, info, raise_error=True):\n \"\"\"Determine the appropriate stim_channel.\n\n First, 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', etc.\n are read. If these are not found, it will fall back to 'STI 014' if\n present, then fall back to the first channel of type 'stim', if present.\n\n Parameters\n ----------\n stim_channel : str | list of str | None\n The stim channel selected by the user.\n info : instance of Info\n An information structure containing information about the channels.\n\n Returns\n -------\n stim_channel : str | list of str\n The name of the stim channel(s) to use\n \"\"\"\n if stim_channel is not None:\n if not isinstance(stim_channel, list):\n if not isinstance(stim_channel, string_types):\n raise TypeError('stim_channel must be a str, list, or None')\n stim_channel = [stim_channel]\n if not all(isinstance(s, string_types) for s in stim_channel):\n raise TypeError('stim_channel list must contain all strings')\n return stim_channel\n\n stim_channel = list()\n ch_count = 0\n ch = get_config('MNE_STIM_CHANNEL')\n while(ch is not None and ch in info['ch_names']):\n stim_channel.append(ch)\n ch_count += 1\n ch = get_config('MNE_STIM_CHANNEL_%d' % ch_count)\n if ch_count > 0:\n return stim_channel\n\n if 'STI101' in info['ch_names']: # combination channel for newer systems\n return ['STI101']\n if 'STI 014' in info['ch_names']: # for older systems\n return ['STI 014']\n\n from .io.pick import pick_types\n stim_channel = pick_types(info, meg=False, ref_meg=False, stim=True)\n if len(stim_channel) > 0:\n stim_channel = [info['ch_names'][ch_] for ch_ in stim_channel]\n elif raise_error:\n raise ValueError(\"No stim channels found. Consider specifying them \"\n \"manually using the 'stim_channel' parameter.\")\n return stim_channel\n\n\ndef _check_fname(fname, overwrite=False, must_exist=False):\n \"\"\"Check for file existence.\"\"\"\n if not isinstance(fname, string_types):\n raise TypeError('file name is not a string')\n if must_exist and not op.isfile(fname):\n raise IOError('File \"%s\" does not exist' % fname)\n if op.isfile(fname):\n if not overwrite:\n raise IOError('Destination file exists. Please use option '\n '\"overwrite=True\" to force overwriting.')\n elif overwrite != 'read':\n logger.info('Overwriting existing file.')\n\n\ndef _check_subject(class_subject, input_subject, raise_error=True):\n \"\"\"Get subject name from class.\"\"\"\n if input_subject is not None:\n if not isinstance(input_subject, string_types):\n raise ValueError('subject input must be a string')\n else:\n return input_subject\n elif class_subject is not None:\n if not isinstance(class_subject, string_types):\n raise ValueError('Neither subject input nor class subject '\n 'attribute was a string')\n else:\n return class_subject\n else:\n if raise_error is True:\n raise ValueError('Neither subject input nor class subject '\n 'attribute was a string')\n return None\n\n\ndef _check_preload(inst, msg):\n \"\"\"Ensure data are preloaded.\"\"\"\n from .epochs import BaseEpochs\n\n name = 'raw'\n if isinstance(inst, BaseEpochs):\n name = 'epochs'\n if not inst.preload:\n raise RuntimeError(msg + ' requires %s data to be loaded. Use '\n 'preload=True (or string) in the constructor or '\n '%s.load_data().' % (name, name))\n\n\ndef _check_pandas_installed():\n \"\"\"Aux function.\"\"\"\n try:\n import pandas as pd\n return pd\n except ImportError:\n raise RuntimeError('For this method to work the Pandas library is'\n ' required.')\n\n\ndef _check_pandas_index_arguments(index, defaults):\n \"\"\"Check pandas index arguments.\"\"\"\n if not any(isinstance(index, k) for k in (list, tuple)):\n index = [index]\n invalid_choices = [e for e in index if e not in defaults]\n if invalid_choices:\n options = [', '.join(e) for e in [invalid_choices, defaults]]\n raise ValueError('[%s] is not an valid option. Valid index'\n 'values are \\'None\\' or %s' % tuple(options))\n\n\ndef _clean_names(names, remove_whitespace=False, before_dash=True):\n \"\"\"Remove white-space on topo matching.\n\n This function handles different naming\n conventions for old VS new VectorView systems (`remove_whitespace`).\n Also it allows to remove system specific parts in CTF channel names\n (`before_dash`).\n\n Usage\n -----\n # for new VectorView (only inside layout)\n ch_names = _clean_names(epochs.ch_names, remove_whitespace=True)\n\n # for CTF\n ch_names = _clean_names(epochs.ch_names, before_dash=True)\n\n \"\"\"\n cleaned = []\n for name in names:\n if ' ' in name and remove_whitespace:\n name = name.replace(' ', '')\n if '-' in name and before_dash:\n name = name.split('-')[0]\n if name.endswith('_virtual'):\n name = name[:-8]\n cleaned.append(name)\n\n return cleaned\n\n\ndef _check_type_picks(picks):\n \"\"\"Guarantee type integrity of picks.\"\"\"\n err_msg = 'picks must be None, a list or an array of integers'\n if picks is None:\n pass\n elif isinstance(picks, list):\n if not all(isinstance(i, int) for i in picks):\n raise ValueError(err_msg)\n picks = np.array(picks)\n elif isinstance(picks, np.ndarray):\n if not picks.dtype.kind == 'i':\n raise ValueError(err_msg)\n else:\n raise ValueError(err_msg)\n return picks\n\n\n@nottest\ndef run_tests_if_main(measure_mem=False):\n \"\"\"Run tests in a given file if it is run as a script.\"\"\"\n local_vars = inspect.currentframe().f_back.f_locals\n if not local_vars.get('__name__', '') == '__main__':\n return\n # we are in a \"__main__\"\n try:\n import faulthandler\n faulthandler.enable()\n except Exception:\n pass\n with warnings.catch_warnings(record=True): # memory_usage internal dep.\n mem = int(round(max(memory_usage(-1)))) if measure_mem else -1\n if mem >= 0:\n print('Memory consumption after import: %s' % mem)\n t0 = time.time()\n peak_mem, peak_name = mem, 'import'\n max_elapsed, elapsed_name = 0, 'N/A'\n count = 0\n for name in sorted(list(local_vars.keys()), key=lambda x: x.lower()):\n val = local_vars[name]\n if name.startswith('_'):\n continue\n elif callable(val) and name.startswith('test'):\n count += 1\n doc = val.__doc__.strip() if val.__doc__ else name\n sys.stdout.write('%s ... ' % doc)\n sys.stdout.flush()\n try:\n t1 = time.time()\n if measure_mem:\n with warnings.catch_warnings(record=True): # dep warn\n mem = int(round(max(memory_usage((val, (), {})))))\n else:\n val()\n mem = -1\n if mem >= peak_mem:\n peak_mem, peak_name = mem, name\n mem = (', mem: %s MB' % mem) if mem >= 0 else ''\n elapsed = int(round(time.time() - t1))\n if elapsed >= max_elapsed:\n max_elapsed, elapsed_name = elapsed, name\n sys.stdout.write('time: %0.3f sec%s\\n' % (elapsed, mem))\n sys.stdout.flush()\n except Exception as err:\n if 'skiptest' in err.__class__.__name__.lower():\n sys.stdout.write('SKIP (%s)\\n' % str(err))\n sys.stdout.flush()\n else:\n raise\n elapsed = int(round(time.time() - t0))\n sys.stdout.write('Total: %s tests\\n• %0.3f sec (%0.3f sec for %s)\\n• '\n 'Peak memory %s MB (%s)\\n'\n % (count, elapsed, max_elapsed, elapsed_name, peak_mem,\n peak_name))\n\n\nclass ArgvSetter(object):\n \"\"\"Temporarily set sys.argv.\"\"\"\n\n def __init__(self, args=(), disable_stdout=True,\n disable_stderr=True): # noqa: D102\n self.argv = list(('python',) + args)\n self.stdout = StringIO() if disable_stdout else sys.stdout\n self.stderr = StringIO() if disable_stderr else sys.stderr\n\n def __enter__(self): # noqa: D105\n self.orig_argv = sys.argv\n sys.argv = self.argv\n self.orig_stdout = sys.stdout\n sys.stdout = self.stdout\n self.orig_stderr = sys.stderr\n sys.stderr = self.stderr\n return self\n\n def __exit__(self, *args): # noqa: D105\n sys.argv = self.orig_argv\n sys.stdout = self.orig_stdout\n sys.stderr = self.orig_stderr\n\n\nclass SilenceStdout(object):\n \"\"\"Silence stdout.\"\"\"\n\n def __enter__(self): # noqa: D105\n self.stdout = sys.stdout\n sys.stdout = StringIO()\n return self\n\n def __exit__(self, *args): # noqa: D105\n sys.stdout = self.stdout\n\n\ndef md5sum(fname, block_size=1048576): # 2 ** 20\n \"\"\"Calculate the md5sum for a file.\n\n Parameters\n ----------\n fname : str\n Filename.\n block_size : int\n Block size to use when reading.\n\n Returns\n -------\n hash_ : str\n The hexadecimal digest of the hash.\n \"\"\"\n md5 = hashlib.md5()\n with open(fname, 'rb') as fid:\n while True:\n data = fid.read(block_size)\n if not data:\n break\n md5.update(data)\n return md5.hexdigest()\n\n\ndef create_slices(start, stop, step=None, length=1):\n \"\"\"Generate slices of time indexes.\n\n Parameters\n ----------\n start : int\n Index where first slice should start.\n stop : int\n Index where last slice should maximally end.\n length : int\n Number of time sample included in a given slice.\n step: int | None\n Number of time samples separating two slices.\n If step = None, step = length.\n\n Returns\n -------\n slices : list\n List of slice objects.\n \"\"\"\n # default parameters\n if step is None:\n step = length\n\n # slicing\n slices = [slice(t, t + length, 1) for t in\n range(start, stop - length + 1, step)]\n return slices\n\n\ndef _time_mask(times, tmin=None, tmax=None, sfreq=None, raise_error=True):\n \"\"\"Safely find sample boundaries.\"\"\"\n orig_tmin = tmin\n orig_tmax = tmax\n tmin = -np.inf if tmin is None else tmin\n tmax = np.inf if tmax is None else tmax\n if not np.isfinite(tmin):\n tmin = times[0]\n if not np.isfinite(tmax):\n tmax = times[-1]\n if sfreq is not None:\n # Push to a bit past the nearest sample boundary first\n sfreq = float(sfreq)\n tmin = int(round(tmin * sfreq)) / sfreq - 0.5 / sfreq\n tmax = int(round(tmax * sfreq)) / sfreq + 0.5 / sfreq\n if raise_error and tmin > tmax:\n raise ValueError('tmin (%s) must be less than or equal to tmax (%s)'\n % (orig_tmin, orig_tmax))\n mask = (times >= tmin)\n mask &= (times <= tmax)\n if raise_error and not mask.any():\n raise ValueError('No samples remain when using tmin=%s and tmax=%s '\n '(original time bounds are [%s, %s])'\n % (orig_tmin, orig_tmax, times[0], times[-1]))\n return mask\n\n\ndef random_permutation(n_samples, random_state=None):\n \"\"\"Emulate the randperm matlab function.\n\n It returns a vector containing a random permutation of the\n integers between 0 and n_samples-1. It returns the same random numbers\n than randperm matlab function whenever the random_state is the same\n as the matlab's random seed.\n\n This function is useful for comparing against matlab scripts\n which use the randperm function.\n\n Note: the randperm(n_samples) matlab function generates a random\n sequence between 1 and n_samples, whereas\n random_permutation(n_samples, random_state) function generates\n a random sequence between 0 and n_samples-1, that is:\n randperm(n_samples) = random_permutation(n_samples, random_state) - 1\n\n Parameters\n ----------\n n_samples : int\n End point of the sequence to be permuted (excluded, i.e., the end point\n is equal to n_samples-1)\n random_state : int | None\n Random seed for initializing the pseudo-random number generator.\n\n Returns\n -------\n randperm : ndarray, int\n Randomly permuted sequence between 0 and n-1.\n \"\"\"\n rng = check_random_state(random_state)\n idx = rng.rand(n_samples)\n randperm = np.argsort(idx)\n return randperm\n\n\ndef compute_corr(x, y):\n \"\"\"Compute pearson correlations between a vector and a matrix.\"\"\"\n if len(x) == 0 or len(y) == 0:\n raise ValueError('x or y has zero length')\n X = np.array(x, float)\n Y = np.array(y, float)\n X -= X.mean(0)\n Y -= Y.mean(0)\n x_sd = X.std(0, ddof=1)\n # if covariance matrix is fully expanded, Y needs a\n # transpose / broadcasting else Y is correct\n y_sd = Y.std(0, ddof=1)[:, None if X.shape == Y.shape else Ellipsis]\n return (np.dot(X.T, Y) / float(len(X) - 1)) / (x_sd * y_sd)\n\n\ndef grand_average(all_inst, interpolate_bads=True, drop_bads=True):\n \"\"\"Make grand average of a list evoked or AverageTFR data.\n\n For evoked data, the function interpolates bad channels based on\n `interpolate_bads` parameter. If `interpolate_bads` is True, the grand\n average file will contain good channels and the bad channels interpolated\n from the good MEG/EEG channels.\n For AverageTFR data, the function takes the subset of channels not marked\n as bad in any of the instances.\n\n The grand_average.nave attribute will be equal to the number\n of evoked datasets used to calculate the grand average.\n\n Note: Grand average evoked should not be used for source localization.\n\n Parameters\n ----------\n all_inst : list of Evoked or AverageTFR data\n The evoked datasets.\n interpolate_bads : bool\n If True, bad MEG and EEG channels are interpolated. Ignored for\n AverageTFR.\n drop_bads : bool\n If True, drop all bad channels marked as bad in any data set.\n If neither interpolate_bads nor drop_bads is True, in the output file,\n every channel marked as bad in at least one of the input files will be\n marked as bad, but no interpolation or dropping will be performed.\n\n Returns\n -------\n grand_average : Evoked | AverageTFR\n The grand average data. Same type as input.\n\n Notes\n -----\n .. versionadded:: 0.11.0\n \"\"\"\n # check if all elements in the given list are evoked data\n from .evoked import Evoked\n from .time_frequency import AverageTFR\n from .channels.channels import equalize_channels\n if not all(isinstance(inst, (Evoked, AverageTFR)) for inst in all_inst):\n raise ValueError(\"Not all input elements are Evoked or AverageTFR\")\n\n # Copy channels to leave the original evoked datasets intact.\n all_inst = [inst.copy() for inst in all_inst]\n\n # Interpolates if necessary\n if isinstance(all_inst[0], Evoked):\n if interpolate_bads:\n all_inst = [inst.interpolate_bads() if len(inst.info['bads']) > 0\n else inst for inst in all_inst]\n equalize_channels(all_inst) # apply equalize_channels\n from .evoked import combine_evoked as combine\n else: # isinstance(all_inst[0], AverageTFR):\n from .time_frequency.tfr import combine_tfr as combine\n\n if drop_bads:\n bads = list(set((b for inst in all_inst for b in inst.info['bads'])))\n if bads:\n for inst in all_inst:\n inst.drop_channels(bads)\n\n # make grand_average object using combine_[evoked/tfr]\n grand_average = combine(all_inst, weights='equal')\n # change the grand_average.nave to the number of Evokeds\n grand_average.nave = len(all_inst)\n # change comment field\n grand_average.comment = \"Grand average (n = %d)\" % grand_average.nave\n return grand_average\n\n\ndef _get_root_dir():\n \"\"\"Get as close to the repo root as possible.\"\"\"\n root_dir = op.abspath(op.dirname(__file__))\n up_dir = op.join(root_dir, '..')\n if op.isfile(op.join(up_dir, 'setup.py')) and all(\n op.isdir(op.join(up_dir, x)) for x in ('mne', 'examples', 'doc')):\n root_dir = op.abspath(up_dir)\n return root_dir\n\n\ndef sys_info(fid=None, show_paths=False):\n \"\"\"Print the system information for debugging.\n\n This function is useful for printing system information\n to help triage bugs.\n\n Parameters\n ----------\n fid : file-like | None\n The file to write to. Will be passed to :func:`print()`.\n Can be None to use :data:`sys.stdout`.\n show_paths : bool\n If True, print paths for each module.\n\n Examples\n --------\n Running this function with no arguments prints an output that is\n useful when submitting bug reports::\n\n >>> import mne\n >>> mne.sys_info() # doctest: +SKIP\n Platform: Linux-4.2.0-27-generic-x86_64-with-Ubuntu-15.10-wily\n Python: 2.7.10 (default, Oct 14 2015, 16:09:02) [GCC 5.2.1 20151010]\n Executable: /usr/bin/python\n\n mne: 0.12.dev0\n numpy: 1.12.0.dev0+ec5bd81 {lapack=mkl_rt, blas=mkl_rt}\n scipy: 0.18.0.dev0+3deede3\n matplotlib: 1.5.1+1107.g1fa2697\n\n sklearn: 0.18.dev0\n nibabel: 2.1.0dev\n mayavi: 4.3.1\n pycuda: 2015.1.3\n skcuda: 0.5.2\n pandas: 0.17.1+25.g547750a\n\n \"\"\" # noqa: E501\n ljust = 15\n out = 'Platform:'.ljust(ljust) + platform.platform() + '\\n'\n out += 'Python:'.ljust(ljust) + str(sys.version).replace('\\n', ' ') + '\\n'\n out += 'Executable:'.ljust(ljust) + sys.executable + '\\n'\n out += 'CPU:'.ljust(ljust) + ('%s: %s cores\\n' %\n (platform.processor(),\n multiprocessing.cpu_count()))\n out += 'Memory:'.ljust(ljust)\n try:\n import psutil\n except ImportError:\n out += 'Unavailable (requires \"psutil\" package)'\n else:\n out += '%0.1f GB\\n' % (psutil.virtual_memory().total / float(2 ** 30),)\n out += '\\n'\n old_stdout = sys.stdout\n capture = StringIO()\n try:\n sys.stdout = capture\n np.show_config()\n finally:\n sys.stdout = old_stdout\n lines = capture.getvalue().split('\\n')\n libs = []\n for li, line in enumerate(lines):\n for key in ('lapack', 'blas'):\n if line.startswith('%s_opt_info' % key):\n libs += ['%s=' % key +\n lines[li + 1].split('[')[1].split(\"'\")[1]]\n libs = ', '.join(libs)\n version_texts = dict(pycuda='VERSION_TEXT')\n for mod_name in ('mne', 'numpy', 'scipy', 'matplotlib', '',\n 'sklearn', 'nibabel', 'mayavi', 'pycuda', 'skcuda',\n 'pandas'):\n if mod_name == '':\n out += '\\n'\n continue\n out += ('%s:' % mod_name).ljust(ljust)\n try:\n mod = __import__(mod_name)\n except Exception:\n out += 'Not found\\n'\n else:\n version = getattr(mod, version_texts.get(mod_name, '__version__'))\n extra = (' (%s)' % op.dirname(mod.__file__)) if show_paths else ''\n if mod_name == 'numpy':\n extra = ' {%s}%s' % (libs, extra)\n out += '%s%s\\n' % (version, extra)\n print(out, end='', file=fid)\n\n\nclass ETSContext(object):\n \"\"\"Add more meaningful message to errors generated by ETS Toolkit.\"\"\"\n\n def __enter__(self): # noqa: D105\n pass\n\n def __exit__(self, type, value, traceback): # noqa: D105\n if isinstance(value, SystemExit) and value.code.\\\n startswith(\"This program needs access to the screen\"):\n value.code += (\"\\nThis can probably be solved by setting \"\n \"ETS_TOOLKIT=qt4. On bash, type\\n\\n $ export \"\n \"ETS_TOOLKIT=qt4\\n\\nand run the command again.\")\n\n\ndef open_docs(kind=None, version=None):\n \"\"\"Launch a new web browser tab with the MNE documentation.\n\n Parameters\n ----------\n kind : str | None\n Can be \"api\" (default), \"tutorials\", or \"examples\".\n The default can be changed by setting the configuration value\n MNE_DOCS_KIND.\n version : str | None\n Can be \"stable\" (default) or \"dev\".\n The default can be changed by setting the configuration value\n MNE_DOCS_VERSION.\n \"\"\"\n if kind is None:\n kind = get_config('MNE_DOCS_KIND', 'api')\n help_dict = dict(api='python_reference.html', tutorials='tutorials.html',\n examples='auto_examples/index.html')\n if kind not in help_dict:\n raise ValueError('kind must be one of %s, got %s'\n % (sorted(help_dict.keys()), kind))\n kind = help_dict[kind]\n if version is None:\n version = get_config('MNE_DOCS_VERSION', 'stable')\n versions = ('stable', 'dev')\n if version not in versions:\n raise ValueError('version must be one of %s, got %s'\n % (version, versions))\n webbrowser.open_new_tab('https://martinos.org/mne/%s/%s' % (version, kind))\n" ]
[ [ "scipy.sparse.isspmatrix", "numpy.dot", "numpy.amax", "scipy.linalg.svd", "numpy.isfinite", "numpy.array_equal", "numpy.asarray", "numpy.empty_like", "numpy.isfortran", "numpy.finfo", "numpy.max", "numpy.show_config", "scipy.sparse.isspmatrix_csc", "numpy.argsort", "scipy.sparse.isspmatrix_csr", "numpy.array", "numpy.sum", "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.12", "0.10" ], "tensorflow": [] } ]
miguelamendez/SignalPerceptron
[ "0e50f1b8e1c7f35675ccd2687f000d6534f70364" ]
[ "SP/ml/archs/config.py" ]
[ "\"\"\"\n Description: Config file to build DL Architectures.\n \n \"\"\"\n#Internal libraries\nimport os\nimport sys\nfull_path = os.path.dirname(os.path.realpath(__file__))\nprint(\"archs_config\",full_path)\nsys.path.append(os.path.dirname(os.path.dirname(full_path)))\n\n#Importing architecture libraries.\nprint(\"Importing all architecture libraries:\")\n#SP architectures\nfrom ml.archs.sp import baselines as sp\nfrom ml.archs.sp import asp_baselines as asp\n\n#RL architectures\n#from ml.archs.rl import su as su\n\n#Classifiers architectures\n#from ml.archs.classifiers\n\n#Generator architectures\n#from ml.archs.generators\n\n#Predictors architectures\n#from ml.archs.predictors\n\n#Other architectures\n#from ml.archs.other\n\n\n#External libraries\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\ndata={\n \"rl\":{\n \"ident\":{\"arch\":su.Ident,\"parameters\":None}\n ,\"rand\":{\"arch\":su.Rand,\"parameters\":None}\n ,\"simple_ac\":{\"arch\":su.Simple_FSP_AC,\"parameters\":{\"enc\":512,\"actor\":256,\"critic\":128},\"values\":[\"samples\",\"samples_grad\",\"values\"]}\n ,\"simple_enc\":{\"arch\":su.Simple_FSP_Enc,\"parameters\":512}\n ,\"squared_image_enc\":{\"arch\":su.Squared_Image_Enc,\"parameters\":{}}\n ,\"rnn\":{\"arch\":su.RNN,\"parameters\":{}}\n ,\"prob_model\":{\"arch\":su.ProbModel,\"parameters\":{}}\n ,\"df_model\":{\"arch\":su.DensFuncModel,\"parameters\":{}}\n ,\"mv_p_model\":{\"arch\":su.MultivarProbModel,\"parameters\":{}}\n ,\"mv_p_model_v2\":{\"arch\":su.MultivarProbModelv2,\"parameters\":{}}\n ,\"simple_edge\":{\"arch\":su.SimpleEdge,\"parameters\":{}}\n },\n \"sp\":{\n \"sp\":{\"arch\":sp.SP_pytorch,\"parameters\":{}}\n ,\"lsp\":{\"arch\":sp.RSP_pytorch,\"parameters\":{}}\n ,\"rsp\":{\"arch\":sp.RSP_pytorch,\"parameters\":{}}\n ,\"fsp\":{\"arch\":sp.FSP_pytorch,\"parameters\":{}}\n ,\"att_fsp\":{\"arch\":asp.Att_FSP,\"parameters\":{}}\n ,\"2d_fsp\":{\"arch\":asp.FSP_2D,\"parameters\":{}}\n ,\"mv_fsp\":{\"arch\":asp.MultivarFSP,\"parameters\":{}}\n ,\"att_lsp\":{\"arch\":None,\"parameters\":{}}\n ,\"2d_lsp\":{\"arch\":None,\"parameters\":{}}\n ,\"mv_lsp\":{\"arch\":None,\"parameters\":{}}\n },\n \"classifiers\":{\n },\n \"generators\":{\n },\n \"predictors\":{\n \"GRU\":{\"arch\":nn.GRUCell,\"parameters\":[]}\n },\n \"dummy\":{\n \"ident\":{\"arch\":lambda x: torch.tensor(x),\"parameters\":{}},\n \"rand\":{\"arch\":lambda x:torch.tensor(x),\"parameters\":{}}\n }\n}\n" ]
[ [ "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HarryPham0123/FPT_data_centric_competition
[ "3fa1e0ac48fdae2649b639229d9a74f75e461878" ]
[ "Data_Competition/utils/loss.py" ]
[ "\"\"\"\nSource: YOLOv5 🚀 by Ultralytics https://github.com/ultralytics/yolov5\n\nLoss functions\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom utils.metrics import bbox_iou\nfrom utils.torch_utils import is_parallel\n\n\ndef smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441\n # return positive, negative label smoothing BCE targets\n return 1.0 - 0.5 * eps, 0.5 * eps\n\n\nclass BCEBlurWithLogitsLoss(nn.Module):\n # BCEwithLogitLoss() with reduced missing label effects.\n def __init__(self, alpha=0.05):\n super(BCEBlurWithLogitsLoss, self).__init__()\n self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()\n self.alpha = alpha\n\n def forward(self, pred, true):\n loss = self.loss_fcn(pred, true)\n pred = torch.sigmoid(pred) # prob from logits\n dx = pred - true # reduce only missing label effects\n # dx = (pred - true).abs() # reduce missing label and false label effects\n alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))\n loss *= alpha_factor\n return loss.mean()\n\n\nclass FocalLoss(nn.Module):\n # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)\n def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):\n super(FocalLoss, self).__init__()\n self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()\n self.gamma = gamma\n self.alpha = alpha\n self.reduction = loss_fcn.reduction\n self.loss_fcn.reduction = 'none' # required to apply FL to each element\n\n def forward(self, pred, true):\n loss = self.loss_fcn(pred, true)\n # p_t = torch.exp(-loss)\n # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability\n\n # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py\n pred_prob = torch.sigmoid(pred) # prob from logits\n p_t = true * pred_prob + (1 - true) * (1 - pred_prob)\n alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)\n modulating_factor = (1.0 - p_t) ** self.gamma\n loss *= alpha_factor * modulating_factor\n\n if self.reduction == 'mean':\n return loss.mean()\n elif self.reduction == 'sum':\n return loss.sum()\n else: # 'none'\n return loss\n\n\nclass QFocalLoss(nn.Module):\n # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)\n def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):\n super(QFocalLoss, self).__init__()\n self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()\n self.gamma = gamma\n self.alpha = alpha\n self.reduction = loss_fcn.reduction\n self.loss_fcn.reduction = 'none' # required to apply FL to each element\n\n def forward(self, pred, true):\n loss = self.loss_fcn(pred, true)\n\n pred_prob = torch.sigmoid(pred) # prob from logits\n alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)\n modulating_factor = torch.abs(true - pred_prob) ** self.gamma\n loss *= alpha_factor * modulating_factor\n\n if self.reduction == 'mean':\n return loss.mean()\n elif self.reduction == 'sum':\n return loss.sum()\n else: # 'none'\n return loss\n\n\nclass ComputeLoss:\n # Compute losses\n def __init__(self, model, autobalance=False):\n self.sort_obj_iou = False\n device = next(model.parameters()).device # get model device\n h = model.hyp # hyperparameters\n\n # Define criteria\n BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))\n BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))\n\n # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3\n self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets\n\n # Focal loss\n g = h['fl_gamma'] # focal loss gamma\n if g > 0:\n BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)\n\n det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module\n self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7\n self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index\n self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance\n for k in 'na', 'nc', 'nl', 'anchors':\n setattr(self, k, getattr(det, k))\n\n def __call__(self, p, targets): # predictions, targets, model\n device = targets.device\n lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)\n tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets\n\n # Losses\n for i, pi in enumerate(p): # layer index, layer predictions\n b, a, gj, gi = indices[i] # image, anchor, gridy, gridx\n tobj = torch.zeros_like(pi[..., 0], device=device) # target obj\n\n n = b.shape[0] # number of targets\n if n:\n ps = pi[b, a, gj, gi] # prediction subset corresponding to targets\n\n # Regression\n pxy = ps[:, :2].sigmoid() * 2. - 0.5\n pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]\n pbox = torch.cat((pxy, pwh), 1) # predicted box\n iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)\n lbox += (1.0 - iou).mean() # iou loss\n\n # Objectness\n score_iou = iou.detach().clamp(0).type(tobj.dtype)\n if self.sort_obj_iou:\n sort_id = torch.argsort(score_iou)\n b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id]\n tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio\n\n # Classification\n if self.nc > 1: # cls loss (only if multiple classes)\n t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets\n t[range(n), tcls[i]] = self.cp\n lcls += self.BCEcls(ps[:, 5:], t) # BCE\n\n # Append targets to text file\n # with open('targets.txt', 'a') as file:\n # [file.write('%11.5g ' * 4 % tuple(x) + '\\n') for x in torch.cat((txy[i], twh[i]), 1)]\n\n obji = self.BCEobj(pi[..., 4], tobj)\n lobj += obji * self.balance[i] # obj loss\n if self.autobalance:\n self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()\n\n if self.autobalance:\n self.balance = [x / self.balance[self.ssi] for x in self.balance]\n lbox *= self.hyp['box']\n lobj *= self.hyp['obj']\n lcls *= self.hyp['cls']\n bs = tobj.shape[0] # batch size\n\n return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach()\n\n def build_targets(self, p, targets):\n # Build targets for compute_loss(), input targets(image,class,x,y,w,h)\n na, nt = self.na, targets.shape[0] # number of anchors, targets\n tcls, tbox, indices, anch = [], [], [], []\n gain = torch.ones(7, device=targets.device) # normalized to gridspace gain\n ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)\n targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices\n\n g = 0.5 # bias\n off = torch.tensor([[0, 0],\n [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m\n # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm\n ], device=targets.device).float() * g # offsets\n\n for i in range(self.nl):\n anchors = self.anchors[i]\n gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain\n\n # Match targets to anchors\n t = targets * gain\n if nt:\n # Matches\n r = t[:, :, 4:6] / anchors[:, None] # wh ratio\n j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare\n # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))\n t = t[j] # filter\n\n # Offsets\n gxy = t[:, 2:4] # grid xy\n gxi = gain[[2, 3]] - gxy # inverse\n j, k = ((gxy % 1. < g) & (gxy > 1.)).T\n l, m = ((gxi % 1. < g) & (gxi > 1.)).T\n j = torch.stack((torch.ones_like(j), j, k, l, m))\n t = t.repeat((5, 1, 1))[j]\n offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]\n else:\n t = targets[0]\n offsets = 0\n\n # Define\n b, c = t[:, :2].long().T # image, class\n gxy = t[:, 2:4] # grid xy\n gwh = t[:, 4:6] # grid wh\n gij = (gxy - offsets).long()\n gi, gj = gij.T # grid xy indices\n\n # Append\n a = t[:, 6].long() # anchor indices\n indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices\n tbox.append(torch.cat((gxy - gij, gwh), 1)) # box\n anch.append(anchors[a]) # anchors\n tcls.append(c) # class\n\n return tcls, tbox, indices, anch\n" ]
[ [ "torch.abs", "torch.sigmoid", "torch.ones", "torch.max", "torch.zeros", "torch.cat", "torch.zeros_like", "torch.tensor", "torch.exp", "torch.nn.BCEWithLogitsLoss", "torch.arange", "torch.full_like", "torch.argsort", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
leotam/MONAI
[ "866d53df3f754e25fb4635abeb3f27cdaaa718cd" ]
[ "monai/transforms/intensity/dictionary.py" ]
[ "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA collection of dictionary-based wrappers around the \"vanilla\" transforms for intensity adjustment\ndefined in :py:class:`monai.transforms.intensity.array`.\n\nClass names are ended with 'd' to denote dictionary-based transforms.\n\"\"\"\n\nfrom collections.abc import Iterable\nfrom typing import Any, Dict, Hashable, List, Mapping, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch\n\nfrom monai.config import DtypeLike, KeysCollection\nfrom monai.transforms.compose import MapTransform, Randomizable\nfrom monai.transforms.intensity.array import (\n AdjustContrast,\n GaussianSharpen,\n GaussianSmooth,\n MaskIntensity,\n NormalizeIntensity,\n ScaleIntensity,\n ScaleIntensityRange,\n ScaleIntensityRangePercentiles,\n ShiftIntensity,\n ThresholdIntensity,\n)\nfrom monai.utils import dtype_torch_to_numpy, ensure_tuple_rep, ensure_tuple_size\n\n__all__ = [\n \"RandGaussianNoised\",\n \"ShiftIntensityd\",\n \"RandShiftIntensityd\",\n \"ScaleIntensityd\",\n \"RandScaleIntensityd\",\n \"NormalizeIntensityd\",\n \"ThresholdIntensityd\",\n \"ScaleIntensityRanged\",\n \"AdjustContrastd\",\n \"RandAdjustContrastd\",\n \"ScaleIntensityRangePercentilesd\",\n \"MaskIntensityd\",\n \"GaussianSmoothd\",\n \"RandGaussianSmoothd\",\n \"GaussianSharpend\",\n \"RandGaussianSharpend\",\n \"RandHistogramShiftd\",\n \"RandGaussianNoiseD\",\n \"RandGaussianNoiseDict\",\n \"ShiftIntensityD\",\n \"ShiftIntensityDict\",\n \"RandShiftIntensityD\",\n \"RandShiftIntensityDict\",\n \"ScaleIntensityD\",\n \"ScaleIntensityDict\",\n \"RandScaleIntensityD\",\n \"RandScaleIntensityDict\",\n \"NormalizeIntensityD\",\n \"NormalizeIntensityDict\",\n \"ThresholdIntensityD\",\n \"ThresholdIntensityDict\",\n \"ScaleIntensityRangeD\",\n \"ScaleIntensityRangeDict\",\n \"AdjustContrastD\",\n \"AdjustContrastDict\",\n \"RandAdjustContrastD\",\n \"RandAdjustContrastDict\",\n \"ScaleIntensityRangePercentilesD\",\n \"ScaleIntensityRangePercentilesDict\",\n \"MaskIntensityD\",\n \"MaskIntensityDict\",\n \"GaussianSmoothD\",\n \"GaussianSmoothDict\",\n \"RandGaussianSmoothD\",\n \"RandGaussianSmoothDict\",\n \"GaussianSharpenD\",\n \"GaussianSharpenDict\",\n \"RandGaussianSharpenD\",\n \"RandGaussianSharpenDict\",\n \"RandHistogramShiftD\",\n \"RandHistogramShiftDict\",\n]\n\n\nclass RandGaussianNoised(Randomizable, MapTransform):\n \"\"\"\n Dictionary-based version :py:class:`monai.transforms.RandGaussianNoise`.\n Add Gaussian noise to image. This transform assumes all the expected fields have same shape.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n prob: Probability to add Gaussian noise.\n mean: Mean or “centre” of the distribution.\n std: Standard deviation (spread) of distribution.\n \"\"\"\n\n def __init__(\n self, keys: KeysCollection, prob: float = 0.1, mean: Union[Sequence[float], float] = 0.0, std: float = 0.1\n ) -> None:\n super().__init__(keys)\n self.prob = prob\n self.mean = ensure_tuple_rep(mean, len(self.keys))\n self.std = std\n self._do_transform = False\n self._noise: List[np.ndarray] = []\n\n def randomize(self, im_shape: Sequence[int]) -> None:\n self._do_transform = self.R.random() < self.prob\n self._noise.clear()\n for m in self.mean:\n self._noise.append(self.R.normal(m, self.R.uniform(0, self.std), size=im_shape))\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n\n image_shape = d[self.keys[0]].shape # image shape from the first data key\n self.randomize(image_shape)\n if len(self._noise) != len(self.keys):\n raise AssertionError\n if not self._do_transform:\n return d\n for noise, key in zip(self._noise, self.keys):\n dtype = dtype_torch_to_numpy(d[key].dtype) if isinstance(d[key], torch.Tensor) else d[key].dtype\n d[key] = d[key] + noise.astype(dtype)\n return d\n\n\nclass ShiftIntensityd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.ShiftIntensity`.\n \"\"\"\n\n def __init__(self, keys: KeysCollection, offset: float) -> None:\n \"\"\"\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n offset: offset value to shift the intensity of image.\n \"\"\"\n super().__init__(keys)\n self.shifter = ShiftIntensity(offset)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.keys:\n d[key] = self.shifter(d[key])\n return d\n\n\nclass RandShiftIntensityd(Randomizable, MapTransform):\n \"\"\"\n Dictionary-based version :py:class:`monai.transforms.RandShiftIntensity`.\n \"\"\"\n\n def __init__(self, keys: KeysCollection, offsets: Union[Tuple[float, float], float], prob: float = 0.1) -> None:\n \"\"\"\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n offsets: offset range to randomly shift.\n if single number, offset value is picked from (-offsets, offsets).\n prob: probability of rotating.\n (Default 0.1, with 10% probability it returns a rotated array.)\n \"\"\"\n super().__init__(keys)\n\n if isinstance(offsets, (int, float)):\n self.offsets = (min(-offsets, offsets), max(-offsets, offsets))\n else:\n if len(offsets) != 2:\n raise AssertionError(\"offsets should be a number or pair of numbers.\")\n self.offsets = (min(offsets), max(offsets))\n\n self.prob = prob\n self._do_transform = False\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self._offset = self.R.uniform(low=self.offsets[0], high=self.offsets[1])\n self._do_transform = self.R.random() < self.prob\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n self.randomize()\n if not self._do_transform:\n return d\n shifter = ShiftIntensity(self._offset)\n for key in self.keys:\n d[key] = shifter(d[key])\n return d\n\n\nclass ScaleIntensityd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.ScaleIntensity`.\n Scale the intensity of input image to the given value range (minv, maxv).\n If `minv` and `maxv` not provided, use `factor` to scale image by ``v = v * (1 + factor)``.\n \"\"\"\n\n def __init__(\n self, keys: KeysCollection, minv: float = 0.0, maxv: float = 1.0, factor: Optional[float] = None\n ) -> None:\n \"\"\"\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n minv: minimum value of output data.\n maxv: maximum value of output data.\n factor: factor scale by ``v = v * (1 + factor)``.\n\n \"\"\"\n super().__init__(keys)\n self.scaler = ScaleIntensity(minv, maxv, factor)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.keys:\n d[key] = self.scaler(d[key])\n return d\n\n\nclass RandScaleIntensityd(Randomizable, MapTransform):\n \"\"\"\n Dictionary-based version :py:class:`monai.transforms.RandScaleIntensity`.\n \"\"\"\n\n def __init__(self, keys: KeysCollection, factors: Union[Tuple[float, float], float], prob: float = 0.1) -> None:\n \"\"\"\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n factors: factor range to randomly scale by ``v = v * (1 + factor)``.\n if single number, factor value is picked from (-factors, factors).\n prob: probability of rotating.\n (Default 0.1, with 10% probability it returns a rotated array.)\n\n \"\"\"\n super().__init__(keys)\n\n if isinstance(factors, (int, float)):\n self.factors = (min(-factors, factors), max(-factors, factors))\n else:\n if len(factors) != 2:\n raise AssertionError(\"factors should be a number or pair of numbers.\")\n self.factors = (min(factors), max(factors))\n\n self.prob = prob\n self._do_transform = False\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self.factor = self.R.uniform(low=self.factors[0], high=self.factors[1])\n self._do_transform = self.R.random() < self.prob\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n self.randomize()\n if not self._do_transform:\n return d\n scaler = ScaleIntensity(minv=None, maxv=None, factor=self.factor)\n for key in self.keys:\n d[key] = scaler(d[key])\n return d\n\n\nclass NormalizeIntensityd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.NormalizeIntensity`.\n This transform can normalize only non-zero values or entire image, and can also calculate\n mean and std on each channel separately.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: monai.transforms.MapTransform\n subtrahend: the amount to subtract by (usually the mean)\n divisor: the amount to divide by (usually the standard deviation)\n nonzero: whether only normalize non-zero values.\n channel_wise: if using calculated mean and std, calculate on each channel separately\n or calculate on the entire image directly.\n dtype: output data type, defaut to float32.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n subtrahend: Optional[np.ndarray] = None,\n divisor: Optional[np.ndarray] = None,\n nonzero: bool = False,\n channel_wise: bool = False,\n dtype: DtypeLike = np.float32,\n ) -> None:\n super().__init__(keys)\n self.normalizer = NormalizeIntensity(subtrahend, divisor, nonzero, channel_wise, dtype)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.keys:\n d[key] = self.normalizer(d[key])\n return d\n\n\nclass ThresholdIntensityd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.ThresholdIntensity`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: monai.transforms.MapTransform\n threshold: the threshold to filter intensity values.\n above: filter values above the threshold or below the threshold, default is True.\n cval: value to fill the remaining parts of the image, default is 0.\n \"\"\"\n\n def __init__(self, keys: KeysCollection, threshold: float, above: bool = True, cval: float = 0.0) -> None:\n super().__init__(keys)\n self.filter = ThresholdIntensity(threshold, above, cval)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.keys:\n d[key] = self.filter(d[key])\n return d\n\n\nclass ScaleIntensityRanged(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.ScaleIntensityRange`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: monai.transforms.MapTransform\n a_min: intensity original range min.\n a_max: intensity original range max.\n b_min: intensity target range min.\n b_max: intensity target range max.\n clip: whether to perform clip after scaling.\n \"\"\"\n\n def __init__(\n self, keys: KeysCollection, a_min: float, a_max: float, b_min: float, b_max: float, clip: bool = False\n ) -> None:\n super().__init__(keys)\n self.scaler = ScaleIntensityRange(a_min, a_max, b_min, b_max, clip)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.keys:\n d[key] = self.scaler(d[key])\n return d\n\n\nclass AdjustContrastd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.AdjustContrast`.\n Changes image intensity by gamma. Each pixel/voxel intensity is updated as:\n\n `x = ((x - min) / intensity_range) ^ gamma * intensity_range + min`\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: monai.transforms.MapTransform\n gamma: gamma value to adjust the contrast as function.\n \"\"\"\n\n def __init__(self, keys: KeysCollection, gamma: float) -> None:\n super().__init__(keys)\n self.adjuster = AdjustContrast(gamma)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.keys:\n d[key] = self.adjuster(d[key])\n return d\n\n\nclass RandAdjustContrastd(Randomizable, MapTransform):\n \"\"\"\n Dictionary-based version :py:class:`monai.transforms.RandAdjustContrast`.\n Randomly changes image intensity by gamma. Each pixel/voxel intensity is updated as:\n\n `x = ((x - min) / intensity_range) ^ gamma * intensity_range + min`\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: monai.transforms.MapTransform\n prob: Probability of adjustment.\n gamma: Range of gamma values.\n If single number, value is picked from (0.5, gamma), default is (0.5, 4.5).\n \"\"\"\n\n def __init__(\n self, keys: KeysCollection, prob: float = 0.1, gamma: Union[Tuple[float, float], float] = (0.5, 4.5)\n ) -> None:\n super().__init__(keys)\n self.prob: float = prob\n\n if isinstance(gamma, (int, float)):\n if gamma <= 0.5:\n raise AssertionError(\n \"if gamma is single number, must greater than 0.5 and value is picked from (0.5, gamma)\"\n )\n self.gamma = (0.5, gamma)\n else:\n if len(gamma) != 2:\n raise AssertionError(\"gamma should be a number or pair of numbers.\")\n self.gamma = (min(gamma), max(gamma))\n\n self._do_transform = False\n self.gamma_value: Optional[float] = None\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self._do_transform = self.R.random_sample() < self.prob\n self.gamma_value = self.R.uniform(low=self.gamma[0], high=self.gamma[1])\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n self.randomize()\n if self.gamma_value is None:\n raise AssertionError\n if not self._do_transform:\n return d\n adjuster = AdjustContrast(self.gamma_value)\n for key in self.keys:\n d[key] = adjuster(d[key])\n return d\n\n\nclass ScaleIntensityRangePercentilesd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.ScaleIntensityRangePercentiles`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: monai.transforms.MapTransform\n lower: lower percentile.\n upper: upper percentile.\n b_min: intensity target range min.\n b_max: intensity target range max.\n clip: whether to perform clip after scaling.\n relative: whether to scale to the corresponding percentiles of [b_min, b_max]\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n lower: float,\n upper: float,\n b_min: float,\n b_max: float,\n clip: bool = False,\n relative: bool = False,\n ) -> None:\n super().__init__(keys)\n self.scaler = ScaleIntensityRangePercentiles(lower, upper, b_min, b_max, clip, relative)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.keys:\n d[key] = self.scaler(d[key])\n return d\n\n\nclass MaskIntensityd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.MaskIntensity`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n mask_data: if mask data is single channel, apply to every channel\n of input image. if multiple channels, the channel number must\n match input data. mask_data will be converted to `bool` values\n by `mask_data > 0` before applying transform to input image.\n if None, will extract the mask data from input data based on `mask_key`.\n mask_key: the key to extract mask data from input dictionary, only works\n when `mask_data` is None.\n\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n mask_data: Optional[np.ndarray] = None,\n mask_key: Optional[str] = None,\n ) -> None:\n super().__init__(keys)\n self.converter = MaskIntensity(mask_data)\n self.mask_key = mask_key if mask_data is None else None\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.keys:\n d[key] = self.converter(d[key], d[self.mask_key]) if self.mask_key is not None else self.converter(d[key])\n return d\n\n\nclass GaussianSmoothd(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.GaussianSmooth`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n sigma: if a list of values, must match the count of spatial dimensions of input data,\n and apply every value in the list to 1 spatial dimension. if only 1 value provided,\n use it for all spatial dimensions.\n approx: discrete Gaussian kernel type, available options are \"erf\", \"sampled\", and \"scalespace\".\n see also :py:meth:`monai.networks.layers.GaussianFilter`.\n\n \"\"\"\n\n def __init__(self, keys: KeysCollection, sigma: Union[Sequence[float], float], approx: str = \"erf\") -> None:\n super().__init__(keys)\n self.converter = GaussianSmooth(sigma, approx=approx)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.keys:\n d[key] = self.converter(d[key])\n return d\n\n\nclass RandGaussianSmoothd(Randomizable, MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.GaussianSmooth`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n sigma_x: randomly select sigma value for the first spatial dimension.\n sigma_y: randomly select sigma value for the second spatial dimension if have.\n sigma_z: randomly select sigma value for the third spatial dimension if have.\n approx: discrete Gaussian kernel type, available options are \"erf\", \"sampled\", and \"scalespace\".\n see also :py:meth:`monai.networks.layers.GaussianFilter`.\n prob: probability of Gaussian smooth.\n\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n sigma_x: Tuple[float, float] = (0.25, 1.5),\n sigma_y: Tuple[float, float] = (0.25, 1.5),\n sigma_z: Tuple[float, float] = (0.25, 1.5),\n approx: str = \"erf\",\n prob: float = 0.1,\n ) -> None:\n super().__init__(keys)\n self.sigma_x = sigma_x\n self.sigma_y = sigma_y\n self.sigma_z = sigma_z\n self.approx = approx\n self.prob = prob\n self._do_transform = False\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self._do_transform = self.R.random_sample() < self.prob\n self.x = self.R.uniform(low=self.sigma_x[0], high=self.sigma_x[1])\n self.y = self.R.uniform(low=self.sigma_y[0], high=self.sigma_y[1])\n self.z = self.R.uniform(low=self.sigma_z[0], high=self.sigma_z[1])\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n self.randomize()\n if not self._do_transform:\n return d\n for key in self.keys:\n sigma = ensure_tuple_size(tup=(self.x, self.y, self.z), dim=d[key].ndim - 1)\n d[key] = GaussianSmooth(sigma=sigma, approx=self.approx)(d[key])\n return d\n\n\nclass GaussianSharpend(MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.GaussianSharpen`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n sigma1: sigma parameter for the first gaussian kernel. if a list of values, must match the count\n of spatial dimensions of input data, and apply every value in the list to 1 spatial dimension.\n if only 1 value provided, use it for all spatial dimensions.\n sigma2: sigma parameter for the second gaussian kernel. if a list of values, must match the count\n of spatial dimensions of input data, and apply every value in the list to 1 spatial dimension.\n if only 1 value provided, use it for all spatial dimensions.\n alpha: weight parameter to compute the final result.\n approx: discrete Gaussian kernel type, available options are \"erf\", \"sampled\", and \"scalespace\".\n see also :py:meth:`monai.networks.layers.GaussianFilter`.\n\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n sigma1: Union[Sequence[float], float] = 3.0,\n sigma2: Union[Sequence[float], float] = 1.0,\n alpha: float = 30.0,\n approx: str = \"erf\",\n ) -> None:\n super().__init__(keys)\n self.converter = GaussianSharpen(sigma1, sigma2, alpha, approx=approx)\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n for key in self.keys:\n d[key] = self.converter(d[key])\n return d\n\n\nclass RandGaussianSharpend(Randomizable, MapTransform):\n \"\"\"\n Dictionary-based wrapper of :py:class:`monai.transforms.GaussianSharpen`.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.compose.MapTransform`\n sigma1_x: randomly select sigma value for the first spatial dimension of first gaussian kernel.\n sigma1_y: randomly select sigma value for the second spatial dimension(if have) of first gaussian kernel.\n sigma1_z: randomly select sigma value for the third spatial dimension(if have) of first gaussian kernel.\n sigma2_x: randomly select sigma value for the first spatial dimension of second gaussian kernel.\n if only 1 value `X` provided, it must be smaller than `sigma1_x` and randomly select from [X, sigma1_x].\n sigma2_y: randomly select sigma value for the second spatial dimension(if have) of second gaussian kernel.\n if only 1 value `Y` provided, it must be smaller than `sigma1_y` and randomly select from [Y, sigma1_y].\n sigma2_z: randomly select sigma value for the third spatial dimension(if have) of second gaussian kernel.\n if only 1 value `Z` provided, it must be smaller than `sigma1_z` and randomly select from [Z, sigma1_z].\n alpha: randomly select weight parameter to compute the final result.\n approx: discrete Gaussian kernel type, available options are \"erf\", \"sampled\", and \"scalespace\".\n see also :py:meth:`monai.networks.layers.GaussianFilter`.\n prob: probability of Gaussian sharpen.\n\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n sigma1_x: Tuple[float, float] = (0.5, 1.0),\n sigma1_y: Tuple[float, float] = (0.5, 1.0),\n sigma1_z: Tuple[float, float] = (0.5, 1.0),\n sigma2_x: Union[Tuple[float, float], float] = 0.5,\n sigma2_y: Union[Tuple[float, float], float] = 0.5,\n sigma2_z: Union[Tuple[float, float], float] = 0.5,\n alpha: Tuple[float, float] = (10.0, 30.0),\n approx: str = \"erf\",\n prob: float = 0.1,\n ):\n super().__init__(keys)\n self.sigma1_x = sigma1_x\n self.sigma1_y = sigma1_y\n self.sigma1_z = sigma1_z\n self.sigma2_x = sigma2_x\n self.sigma2_y = sigma2_y\n self.sigma2_z = sigma2_z\n self.alpha = alpha\n self.approx = approx\n self.prob = prob\n self._do_transform = False\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self._do_transform = self.R.random_sample() < self.prob\n self.x1 = self.R.uniform(low=self.sigma1_x[0], high=self.sigma1_x[1])\n self.y1 = self.R.uniform(low=self.sigma1_y[0], high=self.sigma1_y[1])\n self.z1 = self.R.uniform(low=self.sigma1_z[0], high=self.sigma1_z[1])\n sigma2_x = (self.sigma2_x, self.x1) if not isinstance(self.sigma2_x, Iterable) else self.sigma2_x\n sigma2_y = (self.sigma2_y, self.y1) if not isinstance(self.sigma2_y, Iterable) else self.sigma2_y\n sigma2_z = (self.sigma2_z, self.z1) if not isinstance(self.sigma2_z, Iterable) else self.sigma2_z\n self.x2 = self.R.uniform(low=sigma2_x[0], high=sigma2_x[1])\n self.y2 = self.R.uniform(low=sigma2_y[0], high=sigma2_y[1])\n self.z2 = self.R.uniform(low=sigma2_z[0], high=sigma2_z[1])\n self.a = self.R.uniform(low=self.alpha[0], high=self.alpha[1])\n\n def __call__(self, data):\n d = dict(data)\n self.randomize()\n if not self._do_transform:\n return d\n for key in self.keys:\n sigma1 = ensure_tuple_size(tup=(self.x1, self.y1, self.z1), dim=d[key].ndim - 1)\n sigma2 = ensure_tuple_size(tup=(self.x2, self.y2, self.z2), dim=d[key].ndim - 1)\n d[key] = GaussianSharpen(sigma1=sigma1, sigma2=sigma2, alpha=self.a, approx=self.approx)(d[key])\n return d\n\n\nclass RandHistogramShiftd(Randomizable, MapTransform):\n \"\"\"\n Dictionary-based version :py:class:`monai.transforms.RandHistogramShift`.\n Apply random nonlinear transform the the image's intensity histogram.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: monai.transforms.MapTransform\n num_control_points: number of control points governing the nonlinear intensity mapping.\n a smaller number of control points allows for larger intensity shifts. if two values provided, number of\n control points selecting from range (min_value, max_value).\n prob: probability of histogram shift.\n \"\"\"\n\n def __init__(\n self, keys: KeysCollection, num_control_points: Union[Tuple[int, int], int] = 10, prob: float = 0.1\n ) -> None:\n super().__init__(keys)\n if isinstance(num_control_points, int):\n if num_control_points <= 2:\n raise AssertionError(\"num_control_points should be greater than or equal to 3\")\n self.num_control_points = (num_control_points, num_control_points)\n else:\n if len(num_control_points) != 2:\n raise AssertionError(\"num_control points should be a number or a pair of numbers\")\n if min(num_control_points) <= 2:\n raise AssertionError(\"num_control_points should be greater than or equal to 3\")\n self.num_control_points = (min(num_control_points), max(num_control_points))\n self.prob = prob\n self._do_transform = False\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self._do_transform = self.R.random() < self.prob\n num_control_point = self.R.randint(self.num_control_points[0], self.num_control_points[1] + 1)\n self.reference_control_points = np.linspace(0, 1, num_control_point)\n self.floating_control_points = np.copy(self.reference_control_points)\n for i in range(1, num_control_point - 1):\n self.floating_control_points[i] = self.R.uniform(\n self.floating_control_points[i - 1], self.floating_control_points[i + 1]\n )\n\n def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:\n d = dict(data)\n self.randomize()\n if not self._do_transform:\n return d\n for key in self.keys:\n img_min, img_max = d[key].min(), d[key].max()\n reference_control_points_scaled = self.reference_control_points * (img_max - img_min) + img_min\n floating_control_points_scaled = self.floating_control_points * (img_max - img_min) + img_min\n dtype = d[key].dtype\n d[key] = np.interp(d[key], reference_control_points_scaled, floating_control_points_scaled).astype(dtype)\n return d\n\n\nRandGaussianNoiseD = RandGaussianNoiseDict = RandGaussianNoised\nShiftIntensityD = ShiftIntensityDict = ShiftIntensityd\nRandShiftIntensityD = RandShiftIntensityDict = RandShiftIntensityd\nScaleIntensityD = ScaleIntensityDict = ScaleIntensityd\nRandScaleIntensityD = RandScaleIntensityDict = RandScaleIntensityd\nNormalizeIntensityD = NormalizeIntensityDict = NormalizeIntensityd\nThresholdIntensityD = ThresholdIntensityDict = ThresholdIntensityd\nScaleIntensityRangeD = ScaleIntensityRangeDict = ScaleIntensityRanged\nAdjustContrastD = AdjustContrastDict = AdjustContrastd\nRandAdjustContrastD = RandAdjustContrastDict = RandAdjustContrastd\nScaleIntensityRangePercentilesD = ScaleIntensityRangePercentilesDict = ScaleIntensityRangePercentilesd\nMaskIntensityD = MaskIntensityDict = MaskIntensityd\nGaussianSmoothD = GaussianSmoothDict = GaussianSmoothd\nRandGaussianSmoothD = RandGaussianSmoothDict = RandGaussianSmoothd\nGaussianSharpenD = GaussianSharpenDict = GaussianSharpend\nRandGaussianSharpenD = RandGaussianSharpenDict = RandGaussianSharpend\nRandHistogramShiftD = RandHistogramShiftDict = RandHistogramShiftd\n" ]
[ [ "numpy.copy", "numpy.interp", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tsmbland/pims
[ "4f51b31f6a5bac41f4c680c7db1396f00838369e" ]
[ "pims/ffmpeg_reader.py" ]
[ "# The MIT License (MIT)\n#\n# Copyright (c) 2014 Zulko\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# The MIT License (MIT)\n# [OSI Approved License]\n#\n# https://github.com/Zulko/moviepy\n#\n# source files:\n# moviepy/conf.py\n# moviepy/video/io/ffmpeg_reader.py\n# moviepy/tools.py\n#\n# Files heavily edited by PIMS contributors\n# January 2014\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\n\nimport re\nimport subprocess as sp\nimport sys\nimport os\n\nimport numpy as np\n\nfrom pims.base_frames import FramesSequence\nfrom pims.frame import Frame\n\n\ntry:\n from subprocess import DEVNULL # py3k\nexcept ImportError:\n DEVNULL = open(os.devnull, 'wb')\n\n\ndef try_ffmpeg(FFMPEG_BINARY):\n try:\n proc = sp.Popen([FFMPEG_BINARY],\n stdout=sp.PIPE,\n stderr=sp.PIPE)\n proc.wait()\n except:\n return False\n else:\n return True\n\n\n# Name (and location if needed) of the FFMPEG binary. It will be\n# \"ffmpeg\" on linux, certainly \"ffmpeg.exe\" on windows, else any path.\nFFMPEG_BINARY_SUGGESTIONS = ['ffmpeg', 'ffmpeg.exe']\n\nFFMPEG_BINARY = None\nfor name in FFMPEG_BINARY_SUGGESTIONS:\n if try_ffmpeg(name):\n FFMPEG_BINARY = name\n break\n\n\ndef available():\n return FFMPEG_BINARY is not None\n\n_pix_fmt_dict = {'rgb24': 3,\n 'rgba': 4}\n\n\nclass FFmpegVideoReader(FramesSequence):\n \"\"\"Read images from the frames of a standard video file into an\n iterable object that returns images as numpy arrays.\n\n This reader, based on ffmpeg, should be able to read most video\n files\n\n Parameters\n ----------\n filename : string\n pix_fmt : string, optional\n Expected number of color channels. Either 'rgb24' or 'rgba'.\n Defaults to 'rgba'.\n use_cache : boolean, optional\n Saves time if the file was previously opened. Defaults to True.\n Set to False if the file may have changed since it was\n last read.\n\n Examples\n --------\n >>> video = FFmpegVideoReader('video.avi') # or .mov, etc.\n >>> imshow(video[0]) # Show the first frame.\n >>> imshow(video[-1]) # Show the last frame.\n >>> imshow(video[1][0:10, 0:10]) # Show one corner of the second frame.\n\n >>> for frame in video[:]:\n ... # Do something with every frame.\n\n >>> for frame in video[10:20]:\n ... # Do something with frames 10-20.\n\n >>> for frame in video[[5, 7, 13]]:\n ... # Do something with frames 5, 7, and 13.\n\n >>> frame_count = len(video) # Number of frames in video\n >>> frame_shape = video.frame_shape # Pixel dimensions of video\n\n \"\"\"\n def __init__(self, filename, pix_fmt=\"rgb24\", use_cache=True):\n\n self.filename = filename\n self.pix_fmt = pix_fmt\n self._initialize(use_cache)\n try:\n self.depth = _pix_fmt_dict[pix_fmt]\n except KeyError:\n raise ValueError(\"invalid pixel format\")\n w, h = self._size\n self._stride = self.depth*w*h\n\n def _initialize(self, use_cache):\n \"\"\" Opens the file, creates the pipe. \"\"\"\n\n buffer_filename = '{0}.pims_buffer'.format(self.filename)\n meta_filename = '{0}.pims_meta'.format(self.filename)\n\n cmd = [FFMPEG_BINARY, '-i', self.filename,\n '-f', 'image2pipe',\n \"-pix_fmt\", self.pix_fmt,\n '-vcodec', 'rawvideo', '-']\n proc = sp.Popen(cmd, stdin=sp.PIPE,\n stdout=sp.PIPE,\n stderr=sp.PIPE)\n\n print(\"Decoding video file...\")\n\n if (os.path.isfile(buffer_filename) and os.path.isfile(meta_filename)\n and use_cache):\n print(\"Reusing buffer from previous opening of this video.\")\n self.data_buffer = open(buffer_filename, 'rb')\n self.metafile = open(meta_filename, 'r')\n self._len = int(self.metafile.readline())\n w = int(self.metafile.readline())\n h = int(self.metafile.readline())\n self._size = [w, h]\n return\n\n self.data_buffer = open(buffer_filename, 'wb')\n self.metafile = open(meta_filename, 'w')\n print (\"Decoding video file. This is slow, but only the first time.\")\n sys.stdout.flush()\n CHUNKSIZE = 2**14 # utterly arbitrary\n while True:\n try:\n chunk = proc.stdout.read(CHUNKSIZE)\n if len(chunk) == 0:\n break\n self.data_buffer.write(chunk)\n except EOFError:\n break\n self.data_buffer.close()\n self.data_buffer = open(buffer_filename, 'rb')\n\n self._process_ffmpeg_stderr(proc.stderr.read())\n\n proc.terminate()\n for std in proc.stdin, proc.stdout, proc.stderr:\n std.close()\n\n def _process_ffmpeg_stderr(self, stderr, verbose=False):\n if verbose:\n print(stderr)\n\n lines = stderr.splitlines()\n if \"No such file or directory\" in lines[-1]:\n raise IOError(\"%s not found ! Wrong path ?\" % self.filename)\n\n # get the output lines that describe the video\n line = [l for l in lines if ' Video: ' in l][0]\n # logic to parse all of the MD goes here\n\n # get the size, of the form 460x320 (w x h)\n match = re.search(\" [0-9]*x[0-9]*(,| )\", line)\n self._size = map(int, line[match.start():match.end()-1].split('x'))\n # this needs to be more robust\n self._len = int(lines[-2].split()[1])\n self.metafile.write('{0}\\n'.format(self._len))\n self.metafile.write('{0}\\n'.format(self._size[0]))\n self.metafile.write('{0}\\n'.format(self._size[1]))\n self.metafile.close()\n\n def __len__(self):\n return self._len\n\n @property\n def frame_shape(self):\n return self._size\n\n def get_frame(self, j):\n self.data_buffer.seek(self._stride*j)\n s = self.data_buffer.read(self._stride)\n w, h = self._size\n result = np.frombuffer(s,\n dtype='uint8').reshape((h, w, self.depth))\n return Frame(result, frame_no=j)\n\n @property\n def pixel_type(self):\n raise np.uint8\n\n @classmethod\n def class_exts(cls):\n return set(['mov', 'avi', 'webm'])\n\n def __repr__(self):\n # May be overwritten by subclasses\n return \"\"\"<Frames>\nSource: {filename}\nLength: {count} frames\nFrame Shape: {frame_shape!r}\nPixel Format: {pix_fmt}\"\"\".format(frame_shape=self.frame_shape,\n count=len(self),\n filename=self.filename,\n pix_fmt=self.pix_fmt)\n" ]
[ [ "numpy.frombuffer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
konykwj/TANAGRA
[ "72180991e89d748d937ad192f0e386bc474fc453" ]
[ "tanagra/NMF_Analysis_Functions.py" ]
[ "#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 3 21:31:48 2019\r\n\r\n@author: Bill Konyk\r\n\r\nThis contains all the functions needed to execute the main NMF Analysis strategy as contained in the NMF_Analysis class.\r\n\r\nThe process follows the method described in https://arxiv.org/pdf/1702.07186.pdf\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport scipy.sparse\r\nfrom sklearn.decomposition import NMF\r\nimport sklearn.preprocessing\r\nimport scipy\r\n\r\n'''\r\nModifications to H that ensure each topic is mapped to a unit vector in the term space.\r\n'''\r\ndef norm_fun(vector):\r\n \"\"\"\r\n Calculates the norm of a vector\r\n \r\n Parameters\r\n ----------\r\n vector : np array\r\n Some vector\r\n \r\n Returns\r\n -------\r\n norm : float\r\n Norm of the vector\r\n \"\"\"\r\n \r\n return np.linalg.norm(vector)\r\n\r\ndef b_mat(H):\r\n \"\"\"\r\n Defines the B matrix so that H is normalized to unit length. THis exploits the fact that H B B_inv W = H W\r\n Note that B is diagonal, so the inverse is simple to define and calculate\r\n \r\n Parameters\r\n ----------\r\n H : np array\r\n H matrix from the NMF process\r\n \r\n Returns\r\n -------\r\n B : np array\r\n B matrix\r\n B_inv : np array\r\n Inverse B matrix\r\n \"\"\"\r\n \r\n num_topics = np.shape(H)[0]\r\n B = np.zeros((num_topics,num_topics), dtype = float) #Create matrices\r\n B_inv = np.zeros((num_topics,num_topics), dtype = float) #Create inverse matrix\r\n \r\n for topic in range(num_topics):\r\n norm = norm_fun(H[topic])\r\n B[topic,topic] = 1/norm\r\n B_inv[topic,topic] = norm\r\n \r\n return B, B_inv\r\n\r\n\r\ndef run_ensemble_NMF_strategy(num_topics, num_folds, num_runs, doc_term_matrix):\r\n \r\n \"\"\"\r\n Main function to process text using NMF.\r\n This implements the method described in https://arxiv.org/pdf/1702.07186.pdf\r\n It also normalizes the H matrix so that each topic has a norm of length 1\r\n \r\n \r\n Parameters\r\n ----------\r\n num_topics : int\r\n Number of topics to generate\r\n num_folds : int\r\n Number of times to partition the set of documents. In each run one of the folds will randomly be excluded\r\n num_runs : int\r\n Number of times to run NMF\r\n doc_term_matrix : np.array\r\n Vectorized document-term matrix from preprocessing\r\n \r\n Returns\r\n -------\r\n ensemble_W : sparse matrix\r\n Sparse form of the W matrix\r\n ensemble_H : sparse matrix\r\n Sparse form of the H matrix\r\n \"\"\"\r\n \r\n #Identify number of documents\r\n num_docs = doc_term_matrix.shape[0]\r\n\r\n #Defines the number of elements in each fold and ensures that the total sums correctly\r\n fold_sizes = (num_docs // num_folds) * np.ones(num_folds, dtype=np.int)\r\n fold_sizes[:num_docs % num_folds] += 1\r\n \r\n #Creates a list that will save all the final H matrices for the last NMF application.\r\n H_list = []\r\n \r\n #For every run over all folds\r\n for run in range(num_runs):\r\n doc_ids = np.arange(num_docs)\r\n np.random.shuffle(doc_ids)\r\n \r\n current_fold = 0 \r\n for fold, fold_size in enumerate(fold_sizes):\r\n #Updates the currentfold in the process\r\n start, stop = current_fold, current_fold+fold_size\r\n current_fold = stop\r\n \r\n #Removes the current fold\r\n sample_ids = list(doc_ids)\r\n for id in doc_ids[start:stop]:\r\n sample_ids.remove(id)\r\n \r\n \r\n sample_doc_ids = []\r\n for doc_index in sample_ids:\r\n sample_doc_ids.append(doc_ids[doc_index])\r\n \r\n S = doc_term_matrix[sample_ids,:]\r\n S = scipy.sparse.csr_matrix(S)\r\n \r\n model = NMF( init=\"nndsvd\", n_components = num_topics ) \r\n W = model.fit_transform( doc_term_matrix )\r\n H = model.components_ \r\n H_list.append(H)\r\n \r\n H = 0.0\r\n W = 0.0\r\n model = 0.0\r\n \r\n M = np.vstack(H_list)\r\n \r\n model = NMF( init=\"nndsvd\", n_components = num_topics )\r\n W = model.fit_transform(M)\r\n ensemble_H = model.components_ \r\n \r\n HT = sklearn.preprocessing.normalize( ensemble_H.T, \"l2\", axis=0 )\r\n \r\n ensemble_W = doc_term_matrix.dot(HT)\r\n \r\n #Updating the W and H matrices to normalize H.\r\n B,B_inv = b_mat(ensemble_H)\r\n ensemble_H = np.matmul(B,ensemble_H)\r\n ensemble_W = np.matmul(ensemble_W, B_inv)\r\n \r\n print(num_topics, 'th topic analyzed')\r\n \r\n return ensemble_W, ensemble_H" ]
[ [ "sklearn.decomposition.NMF", "numpy.arange", "numpy.matmul", "numpy.linalg.norm", "numpy.random.shuffle", "numpy.ones", "scipy.sparse.csr_matrix", "numpy.shape", "numpy.zeros", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
jl45621/SLM-Lab
[ "42c48af308dfe36401990aca3795bc481cf28c17" ]
[ "slm_lab/lib/util.py" ]
[ "from contextlib import contextmanager\nfrom datetime import datetime\nfrom importlib import reload\nfrom slm_lab import ROOT_DIR, EVAL_MODES\nimport cv2\nimport json\nimport numpy as np\nimport operator\nimport os\nimport pandas as pd\nimport pydash as ps\nimport regex as re\nimport subprocess\nimport sys\nimport torch\nimport torch.multiprocessing as mp\nimport ujson\nimport yaml\n\nNUM_CPUS = mp.cpu_count()\nFILE_TS_FORMAT = '%Y_%m_%d_%H%M%S'\nRE_FILE_TS = re.compile(r'(\\d{4}_\\d{2}_\\d{2}_\\d{6})')\nSPACE_PATH = ['agent', 'agent_space', 'aeb_space', 'env_space', 'env']\n\n\nclass LabJsonEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, (np.ndarray, pd.Series)):\n return obj.tolist()\n else:\n return str(obj)\n\n\ndef calc_ts_diff(ts2, ts1):\n '''\n Calculate the time from tss ts1 to ts2\n @param {str} ts2 Later ts in the FILE_TS_FORMAT\n @param {str} ts1 Earlier ts in the FILE_TS_FORMAT\n @returns {str} delta_t in %H:%M:%S format\n @example\n\n ts1 = '2017_10_17_084739'\n ts2 = '2017_10_17_084740'\n ts_diff = util.calc_ts_diff(ts2, ts1)\n # => '0:00:01'\n '''\n delta_t = datetime.strptime(ts2, FILE_TS_FORMAT) - datetime.strptime(ts1, FILE_TS_FORMAT)\n return str(delta_t)\n\n\ndef cast_df(val):\n '''missing pydash method to cast value as DataFrame'''\n if isinstance(val, pd.DataFrame):\n return val\n return pd.DataFrame(val)\n\n\ndef cast_list(val):\n '''missing pydash method to cast value as list'''\n if ps.is_list(val):\n return val\n else:\n return [val]\n\n\ndef clear_periodic_ckpt(prepath):\n '''Clear periodic (with -epi) ckpt files in prepath'''\n if '-epi' in prepath:\n run_cmd(f'rm {prepath}*')\n\n\ndef concat_batches(batches):\n '''\n Concat batch objects from body.memory.sample() into one batch, when all bodies experience similar envs\n Also concat any nested epi sub-batches into flat batch\n {k: arr1} + {k: arr2} = {k: arr1 + arr2}\n '''\n # if is nested, then is episodic\n is_episodic = isinstance(batches[0]['dones'][0], (list, np.ndarray))\n concat_batch = {}\n for k in batches[0]:\n datas = []\n for batch in batches:\n data = batch[k]\n if is_episodic: # make into plain batch instead of nested\n data = np.concatenate(data)\n datas.append(data)\n concat_batch[k] = np.concatenate(datas)\n return concat_batch\n\n\ndef cond_multiget(arr, idxs):\n '''Get multi-idxs from an array depending if it's a python list or np.array'''\n if isinstance(arr, list):\n return np.array(operator.itemgetter(*idxs)(arr))\n else:\n return arr[idxs]\n\n\ndef count_nonan(arr):\n try:\n return np.count_nonzero(~np.isnan(arr))\n except Exception:\n return len(filter_nonan(arr))\n\n\ndef downcast_float32(df):\n '''Downcast any float64 col to float32 to allow safer pandas comparison'''\n for col in df.columns:\n if df[col].dtype == 'float':\n df[col] = df[col].astype('float32')\n return df\n\n\ndef find_ckpt(prepath):\n '''Find the ckpt-lorem-ipsum in a string and return lorem-ipsum'''\n if 'ckpt' in prepath:\n ckpt_str = ps.find(prepath.split('_'), lambda s: s.startswith('ckpt'))\n ckpt = ckpt_str.replace('ckpt-', '')\n else:\n ckpt = None\n return ckpt\n\n\ndef flatten_dict(obj, delim='.'):\n '''Missing pydash method to flatten dict'''\n nobj = {}\n for key, val in obj.items():\n if ps.is_dict(val) and not ps.is_empty(val):\n strip = flatten_dict(val, delim)\n for k, v in strip.items():\n nobj[key + delim + k] = v\n elif ps.is_list(val) and not ps.is_empty(val) and ps.is_dict(val[0]):\n for idx, v in enumerate(val):\n nobj[key + delim + str(idx)] = v\n if ps.is_object(v):\n nobj = flatten_dict(nobj, delim)\n else:\n nobj[key] = val\n return nobj\n\n\ndef filter_nonan(arr):\n '''Filter to np array with no nan'''\n try:\n return arr[~np.isnan(arr)]\n except Exception:\n mixed_type = []\n for v in arr:\n if not gen_isnan(v):\n mixed_type.append(v)\n return np.array(mixed_type, dtype=arr.dtype)\n\n\ndef fix_multi_index_dtype(df):\n '''Restore aeb multi_index dtype from string to int, when read from file'''\n df.columns = pd.MultiIndex.from_tuples([(int(x[0]), int(x[1]), int(x[2]), x[3]) for x in df.columns])\n return df\n\n\ndef nanflatten(arr):\n '''Flatten np array while ignoring nan, like np.nansum etc.'''\n flat_arr = arr.reshape(-1)\n return filter_nonan(flat_arr)\n\n\ndef gen_isnan(v):\n '''Check isnan for general type (np.isnan is only operable on np type)'''\n try:\n return np.isnan(v).all()\n except Exception:\n return v is None\n\n\ndef get_df_aeb_list(session_df):\n '''Get the aeb list for session_df for iterating.'''\n aeb_list = sorted(ps.uniq([(a, e, b) for a, e, b, col in session_df.columns.tolist()]))\n return aeb_list\n\n\ndef get_aeb_shape(aeb_list):\n return np.amax(aeb_list, axis=0) + 1\n\n\ndef get_class_name(obj, lower=False):\n '''Get the class name of an object'''\n class_name = obj.__class__.__name__\n if lower:\n class_name = class_name.lower()\n return class_name\n\n\ndef get_class_attr(obj):\n '''Get the class attr of an object as dict'''\n attr_dict = {}\n for k, v in obj.__dict__.items():\n if hasattr(v, '__dict__') or ps.is_tuple(v):\n val = str(v)\n else:\n val = v\n attr_dict[k] = val\n return attr_dict\n\n\ndef get_file_ext(data_path):\n '''get the `.ext` of file.ext'''\n return os.path.splitext(data_path)[-1]\n\n\ndef get_fn_list(a_cls):\n '''\n Get the callable, non-private functions of a class\n @returns {[*str]} A list of strings of fn names\n '''\n fn_list = ps.filter_(dir(a_cls), lambda fn: not fn.endswith('__') and callable(getattr(a_cls, fn)))\n return fn_list\n\n\ndef get_git_sha():\n return subprocess.check_output(['git', 'rev-parse', 'HEAD'], close_fds=True, cwd=ROOT_DIR).decode().strip()\n\n\ndef get_lab_mode():\n return os.environ.get('lab_mode')\n\n\ndef get_prepath(spec, info_space, unit='experiment'):\n spec_name = spec['name']\n predir = f'data/{spec_name}_{info_space.experiment_ts}'\n prename = f'{spec_name}'\n trial_index = info_space.get('trial')\n session_index = info_space.get('session')\n t_str = '' if trial_index is None else f'_t{trial_index}'\n s_str = '' if session_index is None else f'_s{session_index}'\n if unit == 'trial':\n prename += t_str\n elif unit == 'session':\n prename += f'{t_str}{s_str}'\n ckpt = ps.get(info_space, 'ckpt')\n if ckpt is not None:\n prename += f'_ckpt-{ckpt}'\n prepath = f'{predir}/{prename}'\n return prepath\n\n\ndef get_ts(pattern=FILE_TS_FORMAT):\n '''\n Get current ts, defaults to format used for filename\n @param {str} pattern To format the ts\n @returns {str} ts\n @example\n\n util.get_ts()\n # => '2017_10_17_084739'\n '''\n ts_obj = datetime.now()\n ts = ts_obj.strftime(pattern)\n assert RE_FILE_TS.search(ts)\n return ts\n\n\ndef guard_data_a(cls, data_a, data_name):\n '''Guard data_a in case if it scalar, create a data_a and fill.'''\n if np.isscalar(data_a):\n new_data_a, = s_get(cls, 'aeb_space').init_data_s([data_name], a=cls.a)\n for eb, body in ndenumerate_nonan(cls.body_a):\n new_data_a[eb] = data_a\n data_a = new_data_a\n return data_a\n\n\ndef in_eval_lab_modes():\n '''Check if lab_mode is one of EVAL_MODES'''\n return get_lab_mode() in EVAL_MODES\n\n\ndef is_jupyter():\n '''Check if process is in Jupyter kernel'''\n try:\n get_ipython().config\n return True\n except NameError:\n return False\n return False\n\n\n@contextmanager\ndef ctx_lab_mode(lab_mode):\n '''\n Creates context to run method with a specific lab_mode\n @example\n with util.ctx_lab_mode('eval'):\n run_eval()\n '''\n prev_lab_mode = os.environ.get('lab_mode')\n os.environ['lab_mode'] = lab_mode\n yield\n if prev_lab_mode is None:\n del os.environ['lab_mode']\n else:\n os.environ['lab_mode'] = prev_lab_mode\n\n\ndef monkey_patch(base_cls, extend_cls):\n '''Monkey patch a base class with methods from extend_cls'''\n ext_fn_list = get_fn_list(extend_cls)\n for fn in ext_fn_list:\n setattr(base_cls, fn, getattr(extend_cls, fn))\n\n\ndef ndenumerate_nonan(arr):\n '''Generic ndenumerate for np.ndenumerate with only not gen_isnan values'''\n return (idx_v for idx_v in np.ndenumerate(arr) if not gen_isnan(idx_v[1]))\n\n\ndef nonan_all(v):\n '''Generic np.all that also returns false if array is all np.nan'''\n return bool(np.all(v) and ~np.all(np.isnan(v)))\n\n\ndef parallelize_fn(fn, args, num_cpus=NUM_CPUS):\n '''\n Parallelize a method fn, args and return results with order preserved per args.\n fn should take only a single arg.\n @returns {list} results Order preserved output from fn.\n '''\n pool = mp.Pool(num_cpus, maxtasksperchild=1)\n results = pool.map(fn, args)\n pool.close()\n pool.join()\n return results\n\n\ndef prepath_split(prepath):\n '''\n Split prepath into useful names. Works with predir (prename will be None)\n prepath: data/dqn_pong_2018_12_02_082510/dqn_pong_t0_s0\n predir: data/dqn_pong_2018_12_02_082510\n prefolder: dqn_pong_2018_12_02_082510\n prename: dqn_pong_t0_s0\n spec_name: dqn_pong\n experiment_ts: 2018_12_02_082510\n ckpt: ckpt-best of dqn_pong_t0_s0_ckpt-best if available\n '''\n prepath = prepath.strip('_')\n tail = prepath.split('data/')[-1]\n ckpt = find_ckpt(tail)\n if ckpt is not None: # separate ckpt\n tail = tail.replace(f'_ckpt-{ckpt}', '')\n if '/' in tail: # tail = prefolder/prename\n prefolder, prename = tail.split('/')\n else:\n prefolder, prename = tail, None\n predir = f'data/{prefolder}'\n spec_name = RE_FILE_TS.sub('', prefolder).strip('_')\n experiment_ts = RE_FILE_TS.findall(prefolder)[0]\n return predir, prefolder, prename, spec_name, experiment_ts, ckpt\n\n\ndef prepath_to_idxs(prepath):\n '''Extract trial index and session index from prepath if available'''\n _, _, prename, spec_name, _, _ = prepath_split(prepath)\n idxs_tail = prename.replace(spec_name, '').strip('_')\n idxs_strs = ps.compact(idxs_tail.split('_')[:2])\n if ps.is_empty(idxs_strs):\n return None, None\n tidx = idxs_strs[0]\n assert tidx.startswith('t')\n trial_index = int(tidx.strip('t'))\n if len(idxs_strs) == 1: # has session\n session_index = None\n else:\n sidx = idxs_strs[1]\n assert sidx.startswith('s')\n session_index = int(sidx.strip('s'))\n return trial_index, session_index\n\n\ndef prepath_to_spec(prepath):\n '''Create spec from prepath such that it returns the same prepath with info_space'''\n predir, _, prename, _, _, _ = prepath_split(prepath)\n sidx_res = re.search('_s\\d+', prename)\n if sidx_res: # replace the _s0 if any\n prename = prename.replace(sidx_res[0], '')\n spec_path = f'{predir}/{prename}_spec.json'\n # read the spec of prepath\n spec = read(spec_path)\n return spec\n\n\ndef prepath_to_info_space(prepath):\n '''Create info_space from prepath such that it returns the same prepath with spec'''\n from slm_lab.experiment.monitor import InfoSpace\n _, _, _, _, experiment_ts, ckpt = prepath_split(prepath)\n trial_index, session_index = prepath_to_idxs(prepath)\n # create info_space for prepath\n info_space = InfoSpace()\n info_space.experiment_ts = experiment_ts\n info_space.ckpt = ckpt\n info_space.set('experiment', 0)\n info_space.set('trial', trial_index)\n info_space.set('session', session_index)\n return info_space\n\n\ndef prepath_to_spec_info_space(prepath):\n '''\n Given a prepath, read the correct spec and craete the info_space that will return the same prepath\n This is used for lab_mode: enjoy\n example: data/a2c_cartpole_2018_06_13_220436/a2c_cartpole_t0_s0\n '''\n spec = prepath_to_spec(prepath)\n info_space = prepath_to_info_space(prepath)\n check_prepath = get_prepath(spec, info_space, unit='session')\n assert check_prepath in prepath, f'{check_prepath}, {prepath}'\n return spec, info_space\n\n\ndef read(data_path, **kwargs):\n '''\n Universal data reading method with smart data parsing\n - {.csv} to DataFrame\n - {.json} to dict, list\n - {.yml} to dict\n - {*} to str\n @param {str} data_path The data path to read from\n @returns {data} The read data in sensible format\n @example\n\n data_df = util.read('test/fixture/lib/util/test_df.csv')\n # => <DataFrame>\n\n data_dict = util.read('test/fixture/lib/util/test_dict.json')\n data_dict = util.read('test/fixture/lib/util/test_dict.yml')\n # => <dict>\n\n data_list = util.read('test/fixture/lib/util/test_list.json')\n # => <list>\n\n data_str = util.read('test/fixture/lib/util/test_str.txt')\n # => <str>\n '''\n data_path = smart_path(data_path)\n try:\n assert os.path.isfile(data_path)\n except AssertionError:\n raise FileNotFoundError(data_path)\n ext = get_file_ext(data_path)\n if ext == '.csv':\n data = read_as_df(data_path, **kwargs)\n else:\n data = read_as_plain(data_path, **kwargs)\n return data\n\n\ndef read_as_df(data_path, **kwargs):\n '''Submethod to read data as DataFrame'''\n ext = get_file_ext(data_path)\n data = pd.read_csv(data_path, **kwargs)\n return data\n\n\ndef read_as_plain(data_path, **kwargs):\n '''Submethod to read data as plain type'''\n open_file = open(data_path, 'r')\n ext = get_file_ext(data_path)\n if ext == '.json':\n data = ujson.load(open_file, **kwargs)\n elif ext == '.yml':\n data = yaml.load(open_file, **kwargs)\n else:\n data = open_file.read()\n open_file.close()\n return data\n\n\ndef run_cmd(cmd):\n '''Run shell command'''\n print(f'+ {cmd}')\n proc = subprocess.Popen(cmd, cwd=ROOT_DIR, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)\n return proc\n\n\ndef run_cmd_wait(proc):\n '''Wait on a running process created by util.run_cmd and print its stdout'''\n for line in proc.stdout:\n print(line.decode(), end='')\n output = proc.communicate()[0]\n if proc.returncode != 0:\n raise subprocess.CalledProcessError(proc.args, proc.returncode, output)\n else:\n return output\n\n\ndef s_get(cls, attr_path):\n '''\n Method to get attribute across space via inferring agent <-> env paths.\n @example\n self.agent.agent_space.aeb_space.clock\n # equivalently\n util.s_get(self, 'aeb_space.clock')\n '''\n from_class_name = get_class_name(cls, lower=True)\n from_idx = ps.find_index(SPACE_PATH, lambda s: from_class_name in (s, s.replace('_', '')))\n from_idx = max(from_idx, 0)\n attr_path = attr_path.split('.')\n to_idx = SPACE_PATH.index(attr_path[0])\n assert -1 not in (from_idx, to_idx)\n if from_idx < to_idx:\n path_link = SPACE_PATH[from_idx: to_idx]\n else:\n path_link = ps.reverse(SPACE_PATH[to_idx: from_idx])\n\n res = cls\n for attr in path_link + attr_path:\n if not (get_class_name(res, lower=True) in (attr, attr.replace('_', ''))):\n res = getattr(res, attr)\n return res\n\n\ndef self_desc(cls):\n '''Method to get self description, used at init.'''\n desc_list = [f'{get_class_name(cls)}:']\n for k, v in get_class_attr(cls).items():\n if k == 'spec':\n desc_v = v['name']\n elif ps.is_dict(v) or ps.is_dict(ps.head(v)):\n desc_v = to_json(v)\n else:\n desc_v = v\n desc_list.append(f'- {k} = {desc_v}')\n desc = '\\n'.join(desc_list)\n return desc\n\n\ndef session_df_to_data(session_df):\n '''\n Convert a multi_index session_df (df) with column levels (a,e,b,col) to session_data[aeb] = aeb_df\n @example\n\n session_df = util.read(filepath, header=[0, 1, 2, 3])\n session_data = util.session_df_to_data(session_df)\n '''\n session_data = {}\n fix_multi_index_dtype(session_df)\n aeb_list = get_df_aeb_list(session_df)\n for aeb in aeb_list:\n aeb_df = session_df.loc[:, aeb]\n aeb_df.reset_index(inplace=True, drop=True) # guard for eval append-row\n session_data[aeb] = aeb_df\n return session_data\n\n\ndef set_attr(obj, attr_dict, keys=None):\n '''Set attribute of an object from a dict'''\n if keys is not None:\n attr_dict = ps.pick(attr_dict, keys)\n for attr, val in attr_dict.items():\n setattr(obj, attr, val)\n return obj\n\n\ndef set_rand_seed(random_seed, env_space):\n '''Set all the module random seeds'''\n torch.cuda.manual_seed_all(random_seed)\n torch.manual_seed(random_seed)\n np.random.seed(random_seed)\n envs = env_space.envs if hasattr(env_space, 'envs') else [env_space]\n for env in envs:\n try:\n env.u_env.seed(random_seed)\n except Exception as e:\n pass\n\n\ndef set_logger(spec, info_space, logger, unit=None):\n '''Set the logger for a lab unit give its spec and info_space'''\n os.environ['PREPATH'] = get_prepath(spec, info_space, unit=unit)\n reload(logger) # to set session-specific logger\n\n\ndef _sizeof(obj, seen=None):\n '''Recursively finds size of objects'''\n size = sys.getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if isinstance(obj, dict):\n size += sum([_sizeof(v, seen) for v in obj.values()])\n size += sum([_sizeof(k, seen) for k in obj.keys()])\n elif hasattr(obj, '__dict__'):\n size += _sizeof(obj.__dict__, seen)\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n size += sum([_sizeof(i, seen) for i in obj])\n return size\n\n\ndef sizeof(obj, divisor=1e6):\n '''Return the size of object, in MB by default'''\n return _sizeof(obj) / divisor\n\n\ndef smart_path(data_path, as_dir=False):\n '''\n Resolve data_path into abspath with fallback to join from ROOT_DIR\n @param {str} data_path The input data path to resolve\n @param {bool} as_dir Whether to return as dirname\n @returns {str} The normalized absolute data_path\n @example\n\n util.smart_path('slm_lab/lib')\n # => '/Users/ANON/Documents/slm_lab/slm_lab/lib'\n\n util.smart_path('/tmp')\n # => '/tmp'\n '''\n if not os.path.isabs(data_path):\n abs_path = os.path.abspath(data_path)\n if os.path.exists(abs_path):\n data_path = abs_path\n else:\n data_path = os.path.join(ROOT_DIR, data_path)\n if as_dir:\n data_path = os.path.dirname(data_path)\n return os.path.normpath(data_path)\n\n\ndef to_json(d, indent=2):\n '''Shorthand method for stringify JSON with indent'''\n return json.dumps(d, indent=indent, cls=LabJsonEncoder)\n\n\ndef to_render():\n return get_lab_mode() in ('dev', 'enjoy') and os.environ.get('RENDER', 'true') == 'true'\n\n\ndef to_torch_batch(batch, device, is_episodic):\n '''Mutate a batch (dict) to make its values from numpy into PyTorch tensor'''\n for k in batch:\n if is_episodic: # for episodic format\n batch[k] = np.concatenate(batch[k])\n elif ps.is_list(batch[k]):\n batch[k] = np.array(batch[k])\n batch[k] = torch.from_numpy(batch[k].astype('float32')).to(device)\n return batch\n\n\ndef try_set_cuda_id(spec, info_space):\n '''Use trial and session id to hash and modulo cuda device count for a cuda_id to maximize device usage. Sets the net_spec for the base Net class to pick up.'''\n # Don't trigger any cuda call if not using GPU. Otherwise will break multiprocessing on machines with CUDA.\n # see issues https://github.com/pytorch/pytorch/issues/334 https://github.com/pytorch/pytorch/issues/3491 https://github.com/pytorch/pytorch/issues/9996\n for agent_spec in spec['agent']:\n if not agent_spec['net'].get('gpu'):\n return\n trial_idx = info_space.get('trial') or 0\n session_idx = info_space.get('session') or 0\n job_idx = trial_idx * spec['meta']['max_session'] + session_idx\n job_idx += int(os.environ.get('CUDA_ID_OFFSET', 0))\n device_count = torch.cuda.device_count()\n if device_count == 0:\n cuda_id = None\n else:\n cuda_id = job_idx % device_count\n\n for agent_spec in spec['agent']:\n agent_spec['net']['cuda_id'] = cuda_id\n\n\ndef write(data, data_path):\n '''\n Universal data writing method with smart data parsing\n - {.csv} from DataFrame\n - {.json} from dict, list\n - {.yml} from dict\n - {*} from str(*)\n @param {*} data The data to write\n @param {str} data_path The data path to write to\n @returns {data_path} The data path written to\n @example\n\n data_path = util.write(data_df, 'test/fixture/lib/util/test_df.csv')\n\n data_path = util.write(data_dict, 'test/fixture/lib/util/test_dict.json')\n data_path = util.write(data_dict, 'test/fixture/lib/util/test_dict.yml')\n\n data_path = util.write(data_list, 'test/fixture/lib/util/test_list.json')\n\n data_path = util.write(data_str, 'test/fixture/lib/util/test_str.txt')\n '''\n data_path = smart_path(data_path)\n data_dir = os.path.dirname(data_path)\n os.makedirs(data_dir, exist_ok=True)\n ext = get_file_ext(data_path)\n if ext == '.csv':\n write_as_df(data, data_path)\n else:\n write_as_plain(data, data_path)\n return data_path\n\n\ndef write_as_df(data, data_path):\n '''Submethod to write data as DataFrame'''\n df = cast_df(data)\n ext = get_file_ext(data_path)\n df.to_csv(data_path)\n return data_path\n\n\ndef write_as_plain(data, data_path):\n '''Submethod to write data as plain type'''\n open_file = open(data_path, 'w')\n ext = get_file_ext(data_path)\n if ext == '.json':\n json.dump(data, open_file, indent=2, cls=LabJsonEncoder)\n elif ext == '.yml':\n yaml.dump(data, open_file)\n else:\n open_file.write(str(data))\n open_file.close()\n return data_path\n\n\n# Atari image transformation\n\ndef grayscale_image(im):\n return cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)\n\n\ndef resize_image(im, w_h):\n return cv2.resize(im, w_h, interpolation=cv2.INTER_AREA)\n\n\ndef crop_image(im):\n '''Crop away the unused top-bottom game borders of Atari'''\n return im[18:102, :]\n\n\ndef normalize_image(im):\n '''Normalizing image by dividing max value 255'''\n # NOTE: beware in its application, may cause loss to be 255 times lower due to smaller input values\n return np.divide(im, 255.0)\n\n\ndef nature_transform_image(im):\n '''\n Image preprocessing from the paper \"Playing Atari with Deep Reinforcement Learning, 2013, Mnih et al\"\n Takes an RGB image and converts it to grayscale, downsizes to 110 x 84 and crops to square 84 x 84 without the game border\n '''\n im = grayscale_image(im)\n im = resize_image(im, (84, 110))\n im = crop_image(im)\n return im\n\n\ndef openai_transform_image(im):\n '''\n Image transformation using OpenAI's baselines method: greyscale, resize\n Instead of cropping as done in nature_transform_image(), this resizes and stretches the image.\n '''\n im = grayscale_image(im)\n im = resize_image(im, (84, 84))\n return im\n\n\ndef transform_image(im, method='openai'):\n '''Apply image transformation using nature or openai method'''\n if method == 'nature':\n return nature_transform_image(im)\n elif method == 'openai':\n return openai_transform_image(im)\n else:\n raise ValueError('method must be one of: nature, openai')\n\n\ndef debug_image(im):\n '''Use this method to render image the agent sees; waits for a key press before continuing'''\n cv2.imshow('image', im)\n cv2.waitKey(0)\n\n\ndef mpl_debug_image(im):\n '''Uses matplotlib to plot image with bigger size, axes, and false color on greyscaled images'''\n import matplotlib.pyplot as plt\n plt.figure()\n plt.imshow(im)\n plt.show()\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.amax", "pandas.DataFrame", "numpy.concatenate", "numpy.all", "torch.cuda.manual_seed_all", "numpy.divide", "pandas.read_csv", "matplotlib.pyplot.figure", "numpy.isnan", "torch.multiprocessing.Pool", "numpy.ndenumerate", "torch.cuda.device_count", "numpy.array", "matplotlib.pyplot.show", "numpy.random.seed", "torch.manual_seed", "torch.multiprocessing.cpu_count", "numpy.isscalar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
sebi06/czitools
[ "3fed073d5e56db0aaebe87f0e38af80b0724f005" ]
[ "examples/scripts/test_zenheatmap.py" ]
[ "# -*- coding: utf-8 -*-\n\n#################################################################\n# File : test_zenheatmap.py\n# Version : 0.1\n# Author : sebi06\n# Date : 04.12.2021\n#\n# Disclaimer: This code is purely experimental. Feel free to\n# use it at your own risk.\n#\n#################################################################\n\nimport pandas as pd\nfrom czitools import visutools as vt\nimport numpy as np\nimport plotly.graph_objects as go\nimport matplotlib.pyplot as plt\nfrom plotly.subplots import make_subplots\nfrom itertools import product\n\nfilename = r\"../../experimental/Full Plate 20x_heatmap.csv\"\n#filename = \"fixed endpoint 3C 25 384well_Entire Carrier.csv\"\n\n# get the separator and use it to read the CSV data table\nseparator = vt.check_separator(filename)\nprint('Separator used:', separator)\n\n# read the CSV table containing all the single object data for\ndf = pd.read_csv(filename, sep=separator)\n\n# get the size of the wellplate\nplatetype = df.shape[0] - 1\nprint(\"Type of WellPlate: \", platetype)\n\nnum_rows, num_cols = vt.getrowandcolumn(platetype=platetype)\nprint(\"Number Rows - Cols:\", num_rows, num_cols)\n\n# correct decimal separator of the CSV table\ndf = df.apply(lambda x: x.astype(str).str.replace(',', '.'))\n\n# replace nan inside units with empty string\ndf = df.applymap(lambda x: np.nan if x == \" \" else x)\n\n# get headers and make names readable\nnew_headers = vt.clean_zenia_headernames(df, match=\"::\")\n\n# rename columns with \"cleaned\" header names\ndf = vt.rename_columns(df, new_headers, verbose=False)\n\n# check for the existence of well category\nif \"ImageSceneCategoryName\" in new_headers:\n category_exist = True\nelse:\n category_exist = False\n df.insert(1, \"ImageSceneCategoryName\", [\" \"] * (platetype + 1))\n\n# get the updated headers, units, number of parameters etc.\nheaders = list(df.columns.values)\n\n# get a list of the used \"units\" for the measured parameters\nunits = list(df.loc[0, :])[2:]\n\n# replace empty string with NaN\nunits = list(map(lambda x: \"\" if x == \"nan\" else x, units))\n\n\nnumparams = len(headers) - 2\nparams = headers[2:]\n\n# use meaningful categories for wells\nfor w in range(platetype):\n ch = df.iloc[w + 1, 1]\n if ch == \" \":\n df.iloc[w + 1, 1] = None\n if pd.isnull(ch):\n df.iloc[w + 1, 1] = \"default\"\n\n# get well categories\nwell_categories = {}\nfor w in range(platetype):\n wellid = df.iloc[w+1, 0]\n category = df.iloc[w+1, 1]\n well_categories[wellid] = df.iloc[w+1, 1]\n \n# create heatmap dictionary with empty array based on the platetype\nheatmap_dict = {}\nfor p in range(0, numparams):\n heatmap_dict[params[p]] = np.full([num_rows, num_cols], np.nan)\n\n# loop over all measured parameters\nfor p in params:\n for i in range(platetype):\n\n # read values\n well = df.iloc[i+1, 0]\n\n # get row and col index from well string\n rowindex, colindex = vt.get_wellID(well)\n\n # read the actual value measure for a specific well\n value = df[p][i+1]\n\n # store the value inside the correct heatmap\n heatmap_dict[p][rowindex, colindex] = value\n \n # convert numpy array into pd.dataframe with labels\n heatmap_dict[p] = vt.convert_array_to_heatmap(heatmap_dict[p],\n num_rows,\n num_cols)\n\n\np2d = 1\n\n# created a single heatmap using matplotlib\nsavename = vt.showheatmap(heatmap_dict[params[p2d]], params[p2d],\n fontsize_title=14,\n fontsize_label=12,\n colormap='Blues',\n linecolor='black',\n linewidth=1.0,\n save=True,\n robust=True\n )\n\n# create the figure of a single heatmap plot using plotly\nxaxis_template = dict(constrain=\"domain\",\n side=\"top\",\n #autorange=False,\n showgrid=False,\n zeroline=False,\n showticklabels=True,\n #scaleanchor=\"x\",\n scaleratio=1,\n )\n\nyaxis_template = dict(constrain=\"domain\",\n side=\"left\",\n autorange=\"reversed\",\n showgrid=False,\n zeroline=False,\n showticklabels=True,\n scaleanchor=\"x\",\n scaleratio=1,\n )\n\nfig = vt.create_heatmap_plotly(heatmap_dict[params[p2d]],\n params[p2d],\n units[p2d],\n xaxis_template=xaxis_template,\n yaxis_template=yaxis_template,\n showscale=True,\n colorscale=\"Viridis\")\n\n# display the figure\nfig.show()\n\n# save the figure\nfig.write_html(\"test.html\")\n\nplotgrid, deletelast = vt.determine_plotgrid(numparams, columns=2)\n\nsavename = vt.showheatmap_all(heatmap_dict, plotgrid,\n fontsize_title=14,\n fontsize_label=12,\n colormap='Blues',\n linecolor='black',\n linewidth=1.0,\n save=False,\n robust=True,\n deletelast=deletelast\n )\n\nplt.show()\n\n\n# create titles for subplots\nsubtitles = []\nfor st, un in zip(params, units):\n subtitles.append(st + \" [\" + un + \"]\")\n\n\n# subplots with plotly\nfig2 = make_subplots(rows=plotgrid[0],\n cols=plotgrid[1],\n shared_yaxes=False,\n shared_xaxes=False,\n subplot_titles=subtitles,\n horizontal_spacing=0.01,\n vertical_spacing=0.1,\n )\n# create zero-based counter for subplots\nplotid = -1\n\n# cycle heatmaps heatmaps\nfor r, c in product(range(plotgrid[0]), range(plotgrid[1])):\n plotid = plotid + 1\n\n # check if is a parameter, e.g. a 2x3 grid but only 5 parameters\n if plotid < numparams:\n\n # get the desired heatmap from the dictionary containing all heatmaps\n heatmap_test = heatmap_dict[params[plotid]]\n\n # create dictionary with XYZ data for the heatmap\n xyz = vt.df_to_plotly(heatmap_test)\n\n # create the data for the individual heatmaps\n data = vt.create_heatmap_data(xyz,\n colorscale=\"Viridis\",\n showscale=False,\n unit=units[plotid])\n\n # add the individual subplots\n fig2.add_trace(data, row=r+1, col=c+1)\n\n\n# style x and y axis for all subplots by iterating over them\nfor i in range(len(params)):\n\n fig2.layout[f'yaxis{i + 1}'].update(dict(showgrid=False,\n #side=\"top\",\n constrain=\"domain\",\n autorange='reversed',\n scaleanchor=\"x\",\n scaleratio=1\n )\n )\n\n fig2.layout[f'xaxis{i + 1}'].update(dict(showgrid=False,\n #side=\"bottom\",\n constrain=\"domain\",\n #autorange='reversed',\n scaleanchor=\"x\",\n scaleratio=1\n )\n )\n\n# display the figure\nfig2.show()\n\n# save the figure\nfig2.write_html(\"test2.html\")\n\n" ]
[ [ "pandas.isnull", "pandas.read_csv", "matplotlib.pyplot.show", "numpy.full" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
onesandzeroes/pandas
[ "22d982a8afdef3c438c9c93dfe5299cc5ca07de2" ]
[ "pandas/sparse/tests/test_array.py" ]
[ "from pandas.compat import range\nimport re\nimport operator\nimport warnings\n\nfrom numpy import nan\nimport numpy as np\n\nfrom pandas import _np_version_under1p8\nfrom pandas.sparse.api import SparseArray, SparseSeries\nfrom pandas._sparse import IntIndex\nfrom pandas.util.testing import assert_almost_equal, assertRaisesRegexp\nimport pandas.util.testing as tm\n\n\nclass TestSparseArray(tm.TestCase):\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.arr_data = np.array([nan, nan, 1, 2, 3, nan, 4, 5, nan, 6])\n self.arr = SparseArray(self.arr_data)\n self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)\n\n def test_constructor_dtype(self):\n arr = SparseArray([np.nan, 1, 2, np.nan])\n self.assertEqual(arr.dtype, np.float64)\n self.assertTrue(np.isnan(arr.fill_value))\n\n arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)\n self.assertEqual(arr.dtype, np.float64)\n self.assertEqual(arr.fill_value, 0)\n\n arr = SparseArray([0, 1, 2, 4], dtype=np.float64)\n self.assertEqual(arr.dtype, np.float64)\n self.assertTrue(np.isnan(arr.fill_value))\n\n arr = SparseArray([0, 1, 2, 4], dtype=np.int64)\n self.assertEqual(arr.dtype, np.int64)\n self.assertEqual(arr.fill_value, 0)\n\n arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)\n self.assertEqual(arr.dtype, np.int64)\n self.assertEqual(arr.fill_value, 0)\n\n arr = SparseArray([0, 1, 2, 4], dtype=None)\n self.assertEqual(arr.dtype, np.int64)\n self.assertEqual(arr.fill_value, 0)\n\n arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)\n self.assertEqual(arr.dtype, np.int64)\n self.assertEqual(arr.fill_value, 0)\n\n def test_constructor_object_dtype(self):\n # GH 11856\n arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)\n self.assertEqual(arr.dtype, np.object)\n self.assertTrue(np.isnan(arr.fill_value))\n\n arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,\n fill_value='A')\n self.assertEqual(arr.dtype, np.object)\n self.assertEqual(arr.fill_value, 'A')\n\n def test_constructor_spindex_dtype(self):\n arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))\n tm.assert_sp_array_equal(arr, SparseArray([np.nan, 1, 2, np.nan]))\n self.assertEqual(arr.dtype, np.float64)\n self.assertTrue(np.isnan(arr.fill_value))\n\n arr = SparseArray(data=[1, 2, 3],\n sparse_index=IntIndex(4, [1, 2, 3]),\n dtype=np.int64, fill_value=0)\n exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)\n tm.assert_sp_array_equal(arr, exp)\n self.assertEqual(arr.dtype, np.int64)\n self.assertEqual(arr.fill_value, 0)\n\n arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),\n fill_value=0, dtype=np.int64)\n exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)\n tm.assert_sp_array_equal(arr, exp)\n self.assertEqual(arr.dtype, np.int64)\n self.assertEqual(arr.fill_value, 0)\n\n arr = SparseArray(data=[1, 2, 3],\n sparse_index=IntIndex(4, [1, 2, 3]),\n dtype=None, fill_value=0)\n exp = SparseArray([0, 1, 2, 3], dtype=None)\n tm.assert_sp_array_equal(arr, exp)\n self.assertEqual(arr.dtype, np.int64)\n self.assertEqual(arr.fill_value, 0)\n\n # scalar input\n arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)\n exp = SparseArray([1], dtype=None)\n tm.assert_sp_array_equal(arr, exp)\n self.assertEqual(arr.dtype, np.int64)\n self.assertEqual(arr.fill_value, 0)\n\n arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),\n fill_value=0, dtype=None)\n exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)\n tm.assert_sp_array_equal(arr, exp)\n self.assertEqual(arr.dtype, np.int64)\n self.assertEqual(arr.fill_value, 0)\n\n def test_sparseseries_roundtrip(self):\n # GH 13999\n for kind in ['integer', 'block']:\n for fill in [1, np.nan, 0]:\n arr = SparseArray([np.nan, 1, np.nan, 2, 3], kind=kind,\n fill_value=fill)\n res = SparseArray(SparseSeries(arr))\n tm.assert_sp_array_equal(arr, res)\n\n arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,\n kind=kind, fill_value=fill)\n res = SparseArray(SparseSeries(arr), dtype=np.int64)\n tm.assert_sp_array_equal(arr, res)\n\n res = SparseArray(SparseSeries(arr))\n tm.assert_sp_array_equal(arr, res)\n\n for fill in [True, False, np.nan]:\n arr = SparseArray([True, False, True, True], dtype=np.bool,\n kind=kind, fill_value=fill)\n res = SparseArray(SparseSeries(arr))\n tm.assert_sp_array_equal(arr, res)\n\n res = SparseArray(SparseSeries(arr))\n tm.assert_sp_array_equal(arr, res)\n\n def test_get_item(self):\n\n self.assertTrue(np.isnan(self.arr[1]))\n self.assertEqual(self.arr[2], 1)\n self.assertEqual(self.arr[7], 5)\n\n self.assertEqual(self.zarr[0], 0)\n self.assertEqual(self.zarr[2], 1)\n self.assertEqual(self.zarr[7], 5)\n\n errmsg = re.compile(\"bounds\")\n assertRaisesRegexp(IndexError, errmsg, lambda: self.arr[11])\n assertRaisesRegexp(IndexError, errmsg, lambda: self.arr[-11])\n self.assertEqual(self.arr[-1], self.arr[len(self.arr) - 1])\n\n def test_take(self):\n self.assertTrue(np.isnan(self.arr.take(0)))\n self.assertTrue(np.isscalar(self.arr.take(2)))\n\n # np.take in < 1.8 doesn't support scalar indexing\n if not _np_version_under1p8:\n self.assertEqual(self.arr.take(2), np.take(self.arr_data, 2))\n self.assertEqual(self.arr.take(6), np.take(self.arr_data, 6))\n\n exp = SparseArray(np.take(self.arr_data, [2, 3]))\n tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)\n\n exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))\n tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)\n\n def test_take_fill_value(self):\n data = np.array([1, np.nan, 0, 3, 0])\n sparse = SparseArray(data, fill_value=0)\n\n exp = SparseArray(np.take(data, [0]), fill_value=0)\n tm.assert_sp_array_equal(sparse.take([0]), exp)\n\n exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)\n tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)\n\n def test_take_negative(self):\n exp = SparseArray(np.take(self.arr_data, [-1]))\n tm.assert_sp_array_equal(self.arr.take([-1]), exp)\n\n exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))\n tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)\n\n def test_bad_take(self):\n assertRaisesRegexp(IndexError, \"bounds\", lambda: self.arr.take(11))\n self.assertRaises(IndexError, lambda: self.arr.take(-11))\n\n def test_take_invalid_kwargs(self):\n msg = r\"take\\(\\) got an unexpected keyword argument 'foo'\"\n tm.assertRaisesRegexp(TypeError, msg, self.arr.take,\n [2, 3], foo=2)\n\n msg = \"the 'out' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg, self.arr.take,\n [2, 3], out=self.arr)\n\n msg = \"the 'mode' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg, self.arr.take,\n [2, 3], mode='clip')\n\n def test_take_filling(self):\n # similar tests as GH 12631\n sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])\n result = sparse.take(np.array([1, 0, -1]))\n expected = SparseArray([np.nan, np.nan, 4])\n tm.assert_sp_array_equal(result, expected)\n\n # fill_value\n result = sparse.take(np.array([1, 0, -1]), fill_value=True)\n expected = SparseArray([np.nan, np.nan, np.nan])\n tm.assert_sp_array_equal(result, expected)\n\n # allow_fill=False\n result = sparse.take(np.array([1, 0, -1]),\n allow_fill=False, fill_value=True)\n expected = SparseArray([np.nan, np.nan, 4])\n tm.assert_sp_array_equal(result, expected)\n\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n with tm.assertRaisesRegexp(ValueError, msg):\n sparse.take(np.array([1, 0, -2]), fill_value=True)\n with tm.assertRaisesRegexp(ValueError, msg):\n sparse.take(np.array([1, 0, -5]), fill_value=True)\n\n with tm.assertRaises(IndexError):\n sparse.take(np.array([1, -6]))\n with tm.assertRaises(IndexError):\n sparse.take(np.array([1, 5]))\n with tm.assertRaises(IndexError):\n sparse.take(np.array([1, 5]), fill_value=True)\n\n def test_take_filling_fill_value(self):\n # same tests as GH 12631\n sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)\n result = sparse.take(np.array([1, 0, -1]))\n expected = SparseArray([0, np.nan, 4], fill_value=0)\n tm.assert_sp_array_equal(result, expected)\n\n # fill_value\n result = sparse.take(np.array([1, 0, -1]), fill_value=True)\n expected = SparseArray([0, np.nan, 0], fill_value=0)\n tm.assert_sp_array_equal(result, expected)\n\n # allow_fill=False\n result = sparse.take(np.array([1, 0, -1]),\n allow_fill=False, fill_value=True)\n expected = SparseArray([0, np.nan, 4], fill_value=0)\n tm.assert_sp_array_equal(result, expected)\n\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n with tm.assertRaisesRegexp(ValueError, msg):\n sparse.take(np.array([1, 0, -2]), fill_value=True)\n with tm.assertRaisesRegexp(ValueError, msg):\n sparse.take(np.array([1, 0, -5]), fill_value=True)\n\n with tm.assertRaises(IndexError):\n sparse.take(np.array([1, -6]))\n with tm.assertRaises(IndexError):\n sparse.take(np.array([1, 5]))\n with tm.assertRaises(IndexError):\n sparse.take(np.array([1, 5]), fill_value=True)\n\n def test_take_filling_all_nan(self):\n sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])\n result = sparse.take(np.array([1, 0, -1]))\n expected = SparseArray([np.nan, np.nan, np.nan])\n tm.assert_sp_array_equal(result, expected)\n\n result = sparse.take(np.array([1, 0, -1]), fill_value=True)\n expected = SparseArray([np.nan, np.nan, np.nan])\n tm.assert_sp_array_equal(result, expected)\n\n with tm.assertRaises(IndexError):\n sparse.take(np.array([1, -6]))\n with tm.assertRaises(IndexError):\n sparse.take(np.array([1, 5]))\n with tm.assertRaises(IndexError):\n sparse.take(np.array([1, 5]), fill_value=True)\n\n def test_set_item(self):\n def setitem():\n self.arr[5] = 3\n\n def setslice():\n self.arr[1:5] = 2\n\n assertRaisesRegexp(TypeError, \"item assignment\", setitem)\n assertRaisesRegexp(TypeError, \"item assignment\", setslice)\n\n def test_constructor_from_too_large_array(self):\n assertRaisesRegexp(TypeError, \"expected dimension <= 1 data\",\n SparseArray, np.arange(10).reshape((2, 5)))\n\n def test_constructor_from_sparse(self):\n res = SparseArray(self.zarr)\n self.assertEqual(res.fill_value, 0)\n assert_almost_equal(res.sp_values, self.zarr.sp_values)\n\n def test_constructor_copy(self):\n cp = SparseArray(self.arr, copy=True)\n cp.sp_values[:3] = 0\n self.assertFalse((self.arr.sp_values[:3] == 0).any())\n\n not_copy = SparseArray(self.arr)\n not_copy.sp_values[:3] = 0\n self.assertTrue((self.arr.sp_values[:3] == 0).all())\n\n def test_constructor_bool(self):\n # GH 10648\n data = np.array([False, False, True, True, False, False])\n arr = SparseArray(data, fill_value=False, dtype=bool)\n\n self.assertEqual(arr.dtype, bool)\n tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))\n tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))\n tm.assert_numpy_array_equal(arr.sp_index.indices,\n np.array([2, 3], np.int32))\n\n for dense in [arr.to_dense(), arr.values]:\n self.assertEqual(dense.dtype, bool)\n tm.assert_numpy_array_equal(dense, data)\n\n def test_constructor_bool_fill_value(self):\n arr = SparseArray([True, False, True], dtype=None)\n self.assertEqual(arr.dtype, np.bool)\n self.assertFalse(arr.fill_value)\n\n arr = SparseArray([True, False, True], dtype=np.bool)\n self.assertEqual(arr.dtype, np.bool)\n self.assertFalse(arr.fill_value)\n\n arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)\n self.assertEqual(arr.dtype, np.bool)\n self.assertTrue(arr.fill_value)\n\n def test_constructor_float32(self):\n # GH 10648\n data = np.array([1., np.nan, 3], dtype=np.float32)\n arr = SparseArray(data, dtype=np.float32)\n\n self.assertEqual(arr.dtype, np.float32)\n tm.assert_numpy_array_equal(arr.sp_values,\n np.array([1, 3], dtype=np.float32))\n tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))\n tm.assert_numpy_array_equal(arr.sp_index.indices,\n np.array([0, 2], dtype=np.int32))\n\n for dense in [arr.to_dense(), arr.values]:\n self.assertEqual(dense.dtype, np.float32)\n self.assert_numpy_array_equal(dense, data)\n\n def test_astype(self):\n res = self.arr.astype('f8')\n res.sp_values[:3] = 27\n self.assertFalse((self.arr.sp_values[:3] == 27).any())\n\n msg = \"unable to coerce current fill_value nan to int64 dtype\"\n with tm.assertRaisesRegexp(ValueError, msg):\n self.arr.astype('i8')\n\n arr = SparseArray([0, np.nan, 0, 1])\n with tm.assertRaisesRegexp(ValueError, msg):\n arr.astype('i8')\n\n arr = SparseArray([0, np.nan, 0, 1], fill_value=0)\n msg = \"Cannot convert NA to integer\"\n with tm.assertRaisesRegexp(ValueError, msg):\n arr.astype('i8')\n\n def test_astype_all(self):\n vals = np.array([1, 2, 3])\n arr = SparseArray(vals, fill_value=1)\n\n types = [np.float64, np.float32, np.int64,\n np.int32, np.int16, np.int8]\n for typ in types:\n res = arr.astype(typ)\n self.assertEqual(res.dtype, typ)\n self.assertEqual(res.sp_values.dtype, typ)\n\n tm.assert_numpy_array_equal(res.values, vals.astype(typ))\n\n def test_set_fill_value(self):\n arr = SparseArray([1., np.nan, 2.], fill_value=np.nan)\n arr.fill_value = 2\n self.assertEqual(arr.fill_value, 2)\n\n arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)\n arr.fill_value = 2\n self.assertEqual(arr.fill_value, 2)\n\n # coerces to int\n msg = \"unable to set fill_value 3\\\\.1 to int64 dtype\"\n with tm.assertRaisesRegexp(ValueError, msg):\n arr.fill_value = 3.1\n\n msg = \"unable to set fill_value nan to int64 dtype\"\n with tm.assertRaisesRegexp(ValueError, msg):\n arr.fill_value = np.nan\n\n arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)\n arr.fill_value = True\n self.assertTrue(arr.fill_value)\n\n # coerces to bool\n msg = \"unable to set fill_value 0 to bool dtype\"\n with tm.assertRaisesRegexp(ValueError, msg):\n arr.fill_value = 0\n\n msg = \"unable to set fill_value nan to bool dtype\"\n with tm.assertRaisesRegexp(ValueError, msg):\n arr.fill_value = np.nan\n\n # invalid\n msg = \"fill_value must be a scalar\"\n for val in [[1, 2, 3], np.array([1, 2]), (1, 2, 3)]:\n with tm.assertRaisesRegexp(ValueError, msg):\n arr.fill_value = val\n\n def test_copy_shallow(self):\n arr2 = self.arr.copy(deep=False)\n\n def _get_base(values):\n base = values.base\n while base.base is not None:\n base = base.base\n return base\n\n assert (_get_base(arr2) is _get_base(self.arr))\n\n def test_values_asarray(self):\n assert_almost_equal(self.arr.values, self.arr_data)\n assert_almost_equal(self.arr.to_dense(), self.arr_data)\n assert_almost_equal(self.arr.sp_values, np.asarray(self.arr))\n\n def test_to_dense(self):\n vals = np.array([1, np.nan, np.nan, 3, np.nan])\n res = SparseArray(vals).to_dense()\n tm.assert_numpy_array_equal(res, vals)\n\n res = SparseArray(vals, fill_value=0).to_dense()\n tm.assert_numpy_array_equal(res, vals)\n\n vals = np.array([1, np.nan, 0, 3, 0])\n res = SparseArray(vals).to_dense()\n tm.assert_numpy_array_equal(res, vals)\n\n res = SparseArray(vals, fill_value=0).to_dense()\n tm.assert_numpy_array_equal(res, vals)\n\n vals = np.array([np.nan, np.nan, np.nan, np.nan, np.nan])\n res = SparseArray(vals).to_dense()\n tm.assert_numpy_array_equal(res, vals)\n\n res = SparseArray(vals, fill_value=0).to_dense()\n tm.assert_numpy_array_equal(res, vals)\n\n # see gh-14647\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n SparseArray(vals).to_dense(fill=2)\n\n def test_getitem(self):\n def _checkit(i):\n assert_almost_equal(self.arr[i], self.arr.values[i])\n\n for i in range(len(self.arr)):\n _checkit(i)\n _checkit(-i)\n\n def test_getslice(self):\n result = self.arr[:-3]\n exp = SparseArray(self.arr.values[:-3])\n tm.assert_sp_array_equal(result, exp)\n\n result = self.arr[-4:]\n exp = SparseArray(self.arr.values[-4:])\n tm.assert_sp_array_equal(result, exp)\n\n # two corner cases from Series\n result = self.arr[-12:]\n exp = SparseArray(self.arr)\n tm.assert_sp_array_equal(result, exp)\n\n result = self.arr[:-12]\n exp = SparseArray(self.arr.values[:0])\n tm.assert_sp_array_equal(result, exp)\n\n def test_getslice_tuple(self):\n dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])\n\n sparse = SparseArray(dense)\n res = sparse[4:, ]\n exp = SparseArray(dense[4:, ])\n tm.assert_sp_array_equal(res, exp)\n\n sparse = SparseArray(dense, fill_value=0)\n res = sparse[4:, ]\n exp = SparseArray(dense[4:, ], fill_value=0)\n tm.assert_sp_array_equal(res, exp)\n\n with tm.assertRaises(IndexError):\n sparse[4:, :]\n\n with tm.assertRaises(IndexError):\n # check numpy compat\n dense[4:, :]\n\n def test_binary_operators(self):\n data1 = np.random.randn(20)\n data2 = np.random.randn(20)\n data1[::2] = np.nan\n data2[::3] = np.nan\n\n arr1 = SparseArray(data1)\n arr2 = SparseArray(data2)\n\n data1[::2] = 3\n data2[::3] = 3\n farr1 = SparseArray(data1, fill_value=3)\n farr2 = SparseArray(data2, fill_value=3)\n\n def _check_op(op, first, second):\n res = op(first, second)\n exp = SparseArray(op(first.values, second.values),\n fill_value=first.fill_value)\n tm.assertIsInstance(res, SparseArray)\n assert_almost_equal(res.values, exp.values)\n\n res2 = op(first, second.values)\n tm.assertIsInstance(res2, SparseArray)\n tm.assert_sp_array_equal(res, res2)\n\n res3 = op(first.values, second)\n tm.assertIsInstance(res3, SparseArray)\n tm.assert_sp_array_equal(res, res3)\n\n res4 = op(first, 4)\n tm.assertIsInstance(res4, SparseArray)\n\n # ignore this if the actual op raises (e.g. pow)\n try:\n exp = op(first.values, 4)\n exp_fv = op(first.fill_value, 4)\n assert_almost_equal(res4.fill_value, exp_fv)\n assert_almost_equal(res4.values, exp)\n except ValueError:\n pass\n\n def _check_inplace_op(op):\n tmp = arr1.copy()\n self.assertRaises(NotImplementedError, op, tmp, arr2)\n\n with np.errstate(all='ignore'):\n bin_ops = [operator.add, operator.sub, operator.mul,\n operator.truediv, operator.floordiv, operator.pow]\n for op in bin_ops:\n _check_op(op, arr1, arr2)\n _check_op(op, farr1, farr2)\n\n inplace_ops = ['iadd', 'isub', 'imul', 'itruediv', 'ifloordiv',\n 'ipow']\n for op in inplace_ops:\n _check_inplace_op(getattr(operator, op))\n\n def test_pickle(self):\n def _check_roundtrip(obj):\n unpickled = self.round_trip_pickle(obj)\n tm.assert_sp_array_equal(unpickled, obj)\n\n _check_roundtrip(self.arr)\n _check_roundtrip(self.zarr)\n\n def test_generator_warnings(self):\n sp_arr = SparseArray([1, 2, 3])\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings(action='always',\n category=DeprecationWarning)\n warnings.filterwarnings(action='always',\n category=PendingDeprecationWarning)\n for _ in sp_arr:\n pass\n assert len(w) == 0\n\n def test_fillna(self):\n s = SparseArray([1, np.nan, np.nan, 3, np.nan])\n res = s.fillna(-1)\n exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64)\n tm.assert_sp_array_equal(res, exp)\n\n s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)\n res = s.fillna(-1)\n exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64)\n tm.assert_sp_array_equal(res, exp)\n\n s = SparseArray([1, np.nan, 0, 3, 0])\n res = s.fillna(-1)\n exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64)\n tm.assert_sp_array_equal(res, exp)\n\n s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0)\n res = s.fillna(-1)\n exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64)\n tm.assert_sp_array_equal(res, exp)\n\n s = SparseArray([np.nan, np.nan, np.nan, np.nan])\n res = s.fillna(-1)\n exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64)\n tm.assert_sp_array_equal(res, exp)\n\n s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0)\n res = s.fillna(-1)\n exp = SparseArray([-1, -1, -1, -1], fill_value=0, dtype=np.float64)\n tm.assert_sp_array_equal(res, exp)\n\n # float dtype's fill_value is np.nan, replaced by -1\n s = SparseArray([0., 0., 0., 0.])\n res = s.fillna(-1)\n exp = SparseArray([0., 0., 0., 0.], fill_value=-1)\n tm.assert_sp_array_equal(res, exp)\n\n # int dtype shouldn't have missing. No changes.\n s = SparseArray([0, 0, 0, 0])\n self.assertEqual(s.dtype, np.int64)\n self.assertEqual(s.fill_value, 0)\n res = s.fillna(-1)\n tm.assert_sp_array_equal(res, s)\n\n s = SparseArray([0, 0, 0, 0], fill_value=0)\n self.assertEqual(s.dtype, np.int64)\n self.assertEqual(s.fill_value, 0)\n res = s.fillna(-1)\n exp = SparseArray([0, 0, 0, 0], fill_value=0)\n tm.assert_sp_array_equal(res, exp)\n\n # fill_value can be nan if there is no missing hole.\n # only fill_value will be changed\n s = SparseArray([0, 0, 0, 0], fill_value=np.nan)\n self.assertEqual(s.dtype, np.int64)\n self.assertTrue(np.isnan(s.fill_value))\n res = s.fillna(-1)\n exp = SparseArray([0, 0, 0, 0], fill_value=-1)\n tm.assert_sp_array_equal(res, exp)\n\n def test_fillna_overlap(self):\n s = SparseArray([1, np.nan, np.nan, 3, np.nan])\n # filling with existing value doesn't replace existing value with\n # fill_value, i.e. existing 3 remains in sp_values\n res = s.fillna(3)\n exp = np.array([1, 3, 3, 3, 3], dtype=np.float64)\n tm.assert_numpy_array_equal(res.to_dense(), exp)\n\n s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0)\n res = s.fillna(3)\n exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64)\n tm.assert_sp_array_equal(res, exp)\n\n\nclass TestSparseArrayAnalytics(tm.TestCase):\n def test_sum(self):\n data = np.arange(10).astype(float)\n out = SparseArray(data).sum()\n self.assertEqual(out, 45.0)\n\n data[5] = np.nan\n out = SparseArray(data, fill_value=2).sum()\n self.assertEqual(out, 40.0)\n\n out = SparseArray(data, fill_value=np.nan).sum()\n self.assertEqual(out, 40.0)\n\n def test_numpy_sum(self):\n data = np.arange(10).astype(float)\n out = np.sum(SparseArray(data))\n self.assertEqual(out, 45.0)\n\n data[5] = np.nan\n out = np.sum(SparseArray(data, fill_value=2))\n self.assertEqual(out, 40.0)\n\n out = np.sum(SparseArray(data, fill_value=np.nan))\n self.assertEqual(out, 40.0)\n\n msg = \"the 'dtype' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg, np.sum,\n SparseArray(data), dtype=np.int64)\n\n msg = \"the 'out' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg, np.sum,\n SparseArray(data), out=out)\n\n def test_cumsum(self):\n data = np.arange(10).astype(float)\n out = SparseArray(data).cumsum()\n expected = SparseArray(data.cumsum())\n tm.assert_sp_array_equal(out, expected)\n\n # TODO: gh-12855 - return a SparseArray here\n data[5] = np.nan\n out = SparseArray(data, fill_value=2).cumsum()\n self.assertNotIsInstance(out, SparseArray)\n tm.assert_numpy_array_equal(out, data.cumsum())\n\n out = SparseArray(data, fill_value=np.nan).cumsum()\n expected = SparseArray(np.array([\n 0, 1, 3, 6, 10, np.nan, 16, 23, 31, 40]))\n tm.assert_sp_array_equal(out, expected)\n\n def test_numpy_cumsum(self):\n data = np.arange(10).astype(float)\n out = np.cumsum(SparseArray(data))\n expected = SparseArray(data.cumsum())\n tm.assert_sp_array_equal(out, expected)\n\n # TODO: gh-12855 - return a SparseArray here\n data[5] = np.nan\n out = np.cumsum(SparseArray(data, fill_value=2))\n self.assertNotIsInstance(out, SparseArray)\n tm.assert_numpy_array_equal(out, data.cumsum())\n\n out = np.cumsum(SparseArray(data, fill_value=np.nan))\n expected = SparseArray(np.array([\n 0, 1, 3, 6, 10, np.nan, 16, 23, 31, 40]))\n tm.assert_sp_array_equal(out, expected)\n\n msg = \"the 'dtype' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg, np.cumsum,\n SparseArray(data), dtype=np.int64)\n\n msg = \"the 'out' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg, np.cumsum,\n SparseArray(data), out=out)\n\n def test_mean(self):\n data = np.arange(10).astype(float)\n out = SparseArray(data).mean()\n self.assertEqual(out, 4.5)\n\n data[5] = np.nan\n out = SparseArray(data).mean()\n self.assertEqual(out, 40.0 / 9)\n\n def test_numpy_mean(self):\n data = np.arange(10).astype(float)\n out = np.mean(SparseArray(data))\n self.assertEqual(out, 4.5)\n\n data[5] = np.nan\n out = np.mean(SparseArray(data))\n self.assertEqual(out, 40.0 / 9)\n\n msg = \"the 'dtype' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg, np.mean,\n SparseArray(data), dtype=np.int64)\n\n msg = \"the 'out' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg, np.mean,\n SparseArray(data), out=out)\n\n def test_ufunc(self):\n # GH 13853 make sure ufunc is applied to fill_value\n sparse = SparseArray([1, np.nan, 2, np.nan, -2])\n result = SparseArray([1, np.nan, 2, np.nan, 2])\n tm.assert_sp_array_equal(abs(sparse), result)\n tm.assert_sp_array_equal(np.abs(sparse), result)\n\n sparse = SparseArray([1, -1, 2, -2], fill_value=1)\n result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,\n fill_value=1)\n tm.assert_sp_array_equal(abs(sparse), result)\n tm.assert_sp_array_equal(np.abs(sparse), result)\n\n sparse = SparseArray([1, -1, 2, -2], fill_value=-1)\n result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index,\n fill_value=1)\n tm.assert_sp_array_equal(abs(sparse), result)\n tm.assert_sp_array_equal(np.abs(sparse), result)\n\n sparse = SparseArray([1, np.nan, 2, np.nan, -2])\n result = SparseArray(np.sin([1, np.nan, 2, np.nan, -2]))\n tm.assert_sp_array_equal(np.sin(sparse), result)\n\n sparse = SparseArray([1, -1, 2, -2], fill_value=1)\n result = SparseArray(np.sin([1, -1, 2, -2]), fill_value=np.sin(1))\n tm.assert_sp_array_equal(np.sin(sparse), result)\n\n sparse = SparseArray([1, -1, 0, -2], fill_value=0)\n result = SparseArray(np.sin([1, -1, 0, -2]), fill_value=np.sin(0))\n tm.assert_sp_array_equal(np.sin(sparse), result)\n\n def test_ufunc_args(self):\n # GH 13853 make sure ufunc is applied to fill_value, including its arg\n sparse = SparseArray([1, np.nan, 2, np.nan, -2])\n result = SparseArray([2, np.nan, 3, np.nan, -1])\n tm.assert_sp_array_equal(np.add(sparse, 1), result)\n\n sparse = SparseArray([1, -1, 2, -2], fill_value=1)\n result = SparseArray([2, 0, 3, -1], fill_value=2)\n tm.assert_sp_array_equal(np.add(sparse, 1), result)\n\n sparse = SparseArray([1, -1, 0, -2], fill_value=0)\n result = SparseArray([2, 0, 1, -1], fill_value=1)\n tm.assert_sp_array_equal(np.add(sparse, 1), result)\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n" ]
[ [ "pandas.util.testing.assertIsInstance", "numpy.take", "numpy.asarray", "pandas.util.testing.assert_produces_warning", "numpy.random.randn", "pandas.sparse.api.SparseSeries", "pandas.util.testing.assert_numpy_array_equal", "numpy.arange", "numpy.sin", "pandas._sparse.IntIndex", "numpy.isnan", "pandas.util.testing.assert_almost_equal", "pandas.util.testing.assert_sp_array_equal", "numpy.errstate", "numpy.array", "numpy.abs", "pandas.util.testing.assertRaisesRegexp", "pandas.sparse.api.SparseArray", "pandas.util.testing.assertRaises", "numpy.add" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.19" ], "scipy": [], "tensorflow": [] } ]
chok68/video-scene-inventory
[ "fad046aebe824834cc891e9b81b7a300debf7409" ]
[ "scenedetect/detectors/motion_detector.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# PySceneDetect: Python-Based Video Scene Detector\n# ---------------------------------------------------------------\n# [ Site: http://www.bcastell.com/projects/pyscenedetect/ ]\n# [ Github: https://github.com/Breakthrough/PySceneDetect/ ]\n# [ Documentation: http://pyscenedetect.readthedocs.org/ ]\n#\n# Copyright (C) 2012-2018 Brandon Castellano <http://www.bcastell.com>.\n#\n# PySceneDetect is licensed under the BSD 2-Clause License; see the included\n# LICENSE file, or visit one of the following pages for details:\n# - https://github.com/Breakthrough/PySceneDetect/\n# - http://www.bcastell.com/projects/pyscenedetect/\n#\n# This software uses Numpy, OpenCV, click, pytest, mkvmerge, and ffmpeg. See\n# the included LICENSE-* files, or one of the above URLs for more information.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\n# Third-Party Library Imports\nimport cv2\nimport numpy\n\n\nfrom scenedetect.scene_detector import SceneDetector\n\nclass MotionDetector(SceneDetector):\n \"\"\"Detects motion events in scenes containing a static background.\n\n Uses background subtraction followed by noise removal (via morphological\n opening) to generate a frame score compared against the set threshold.\n\n Attributes:\n threshold: floating point value compared to each frame's score, which\n represents average intensity change per pixel (lower values are\n more sensitive to motion changes). Default 0.5, must be > 0.0.\n num_frames_post_scene: Number of frames to include in each motion\n event after the frame score falls below the threshold, adding any\n subsequent motion events to the same scene.\n kernel_size: Size of morphological opening kernel for noise removal.\n Setting to -1 (default) will auto-compute based on video resolution\n (typically 3 for SD, 5-7 for HD). Must be an odd integer > 1.\n \"\"\"\n def __init__(self, threshold = 0.50, num_frames_post_scene = 30,\n kernel_size = -1):\n \"\"\"Initializes motion-based scene detector object.\"\"\"\n # Requires porting to v0.5 API.\n raise NotImplementedError()\n\n self.threshold = float(threshold)\n self.num_frames_post_scene = int(num_frames_post_scene)\n\n self.kernel_size = int(kernel_size)\n if self.kernel_size < 0:\n # Set kernel size when process_frame first runs based on\n # video resolution (480p = 3x3, 720p = 5x5, 1080p = 7x7).\n pass\n\n self.bg_subtractor = cv2.createBackgroundSubtractorMOG2( \n detectShadows = False )\n\n self.last_frame_score = 0.0\n\n self.in_motion_event = False\n self.first_motion_frame_index = -1\n self.last_motion_frame_index = -1\n self.cli_name = 'detect-motion'\n return\n\n def process_frame(self, frame_num, frame_img, frame_metrics, scene_list):\n\n # Value to return indiciating if a scene cut was found or not.\n cut_detected = False\n\n frame_grayscale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n masked_frame = self.bg_subtractor.apply(frame_grayscale)\n\n kernel = numpy.ones((self.kernel_size, self.kernel_size), numpy.uint8)\n filtered_frame = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)\n\n frame_score = numpy.sum(filtered_frame) / float( \n filtered_frame.shape[0] * filtered_frame.shape[1] )\n\n return cut_detected\n\n def post_process(self, scene_list, frame_num):\n \"\"\"Writes the last scene if the video ends while in a motion event.\n \"\"\"\n\n # If the last fade detected was a fade out, we add a corresponding new\n # scene break to indicate the end of the scene. This is only done for\n # fade-outs, as a scene cut is already added when a fade-in is found.\n\n if self.in_motion_event:\n # Write new scene based on first and last motion event frames.\n pass\n return self.in_motion_event\n\n\n" ]
[ [ "numpy.sum", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
balisujohn/adversarial-patch-vgg16
[ "63d092e3059afa09df13d360acae5c8567cdeed9" ]
[ "test_adversarial_patch.py" ]
[ "import json\nfrom random import randrange\nimport random\nimport os\n\n\nimport matplotlib.pyplot as plt\nimport torch\nimport torchvision\nimport torchvision.models as models\nfrom torchvision import transforms, datasets\nimport torch.nn as nn\nimport torch.nn.functional as functional\nimport torch.optim as optim\nimport tqdm \nfrom tqdm import trange\nimport numpy as np\n\n\n\nimport helper\nimport label_tags\nfrom train_adversarial_patch import n_most_likely, freeze_model\n\n\nSAMPLES = 100\n#CORRECT_CLASS = 859 # toaster\n#CORRECT_CLASS = 145 # king penguin\nCORRECT_CLASS = 74 # garden spider\nRENDER = True\n\nimage = None\ntransform = transforms.Compose([transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor()])\n\ndataset = datasets.ImageFolder(os.path.join('training_images'), transform = transform )\n\ndataloader = torch.utils.data.DataLoader(dataset, batch_size = 1, shuffle = True)\n\nimages, labels = next(iter(dataloader))\n\n\n\nwith open(\"adversarial_patch.json\", 'r') as json_file:\n patch = torch.Tensor(np.array(json.load(json_file)['image'])).view(200,200,3).swapaxes(0,2)\n patch_map = torch.ones((3,200,200))\n \n if RENDER:\n helper.imshow(patch, normalize=False)\n plt.show()\n\n\n vgg16 = models.vgg16(pretrained=True)\n freeze_model(vgg16)\n vgg16.eval()\n\n correct_count = 0\n\n\n for i in range(SAMPLES):\n print(f\"Sample {str(i)}\")\n images, _ = next(iter(dataloader))\n x = torch.clone(images[0].view(1,3,224,224))\n \n\n if RENDER:\n helper.imshow(x.view(3,224,224).detach(), normalize = False)\n plt.show()\n \n result = vgg16.forward(x)\n print(\"predictions before patch added:\")\n n_most_likely(result)\n\n\n\n patch_size = randrange(120,200)\n split_upper = 224 - patch_size\n patch_map = torch.ones((patch_size,patch_size,3))\n for i in range(patch_size):\n for c in range(patch_size):\n if ((i-(patch_size/2.0))**2) + ((c-(patch_size/2.0))**2) > ((patch_size/2.0)**2):\n patch_map[i,c,:] = 0\n\n\n v_split_index = randrange(0,split_upper)\n v_before_padding = v_split_index\n v_after_padding = 224 - (v_split_index + patch_size)\n\n h_split_index = randrange(0,split_upper)\n h_before_padding = h_split_index\n h_after_padding = 224 - (h_split_index + patch_size)\n\n\n x = x.swapaxes(1,3)\n resized_fixed_input = torchvision.transforms.functional.rotate(torchvision.transforms.functional.resize(patch, (patch_size, patch_size)), randrange(-45,45),interpolation =torchvision.transforms.InterpolationMode.NEAREST)\n padded_patch = torchvision.transforms.functional.pad(resized_fixed_input.swapaxes(0,2).view(1,patch_size,patch_size,3).swapaxes(1,3),padding = (h_before_padding,v_before_padding,h_after_padding,v_after_padding)).swapaxes(1,3)\n sample_patch_map = torchvision.transforms.functional.pad(patch_map.view(1,patch_size,patch_size,3).swapaxes(1,3),padding = (h_before_padding,v_before_padding,h_after_padding,v_after_padding)).swapaxes(1,3)\n x = torch.where(sample_patch_map > 0, padded_patch , x) \n\n \n \n\n if RENDER:\n helper.imshow(x.swapaxes(1,3).view(3,224,224).detach(), normalize = False)\n plt.show()\n\n x = torchvision.transforms.functional.adjust_brightness(x.view(224,224,3).swapaxes(0,2),random.uniform(0.8,1.2))\n x = torchvision.transforms.functional.adjust_contrast(x,random.uniform(0.5,2))\n x = torchvision.transforms.functional.adjust_saturation(x,random.uniform(0.2,1.3))\n x = torchvision.transforms.functional.adjust_hue(x,random.uniform(-0.1,0.1)).view(1,3,224,224)\n\n\n\n result = vgg16.forward(x)\n print(\"predictions after patch added:\")\n n_most_likely(result)\n if np.argmax(result).item() == CORRECT_CLASS:\n correct_count += 1.0\n\n if RENDER:\n helper.imshow(x.view(3,224,224).detach(), normalize = False)\n plt.show()\n print(\"Percent Correct: \" + str(100.0 * (correct_count/SAMPLES)))\n" ]
[ [ "torch.ones", "torch.utils.data.DataLoader", "numpy.argmax", "torch.where", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TICCLAT/ticcl-output-reader
[ "9474533092f6438053d660fd57b645a41b0f9345" ]
[ "tests/test_extension.py" ]
[ "import ticcl_output_reader as m\nfrom unittest import TestCase\nimport numpy as np\n\n\nclass ExampleTest(TestCase):\n\n def test_example1(self):\n self.assertEqual(4, m.example1([4, 5, 6]))\n\n def test_example2(self):\n x = np.array([[0., 1.], [2., 3.]])\n res = np.array([[2., 3.], [4., 5.]])\n y = m.example2(x)\n np.testing.assert_allclose(y, res, 1e-12)\n\n def test_vectorize(self):\n x1 = np.array([[0, 1], [2, 3]])\n x2 = np.array([0, 1])\n res = np.array([[ 1. , 1.381773290676036],\n [ 1.909297426825682, 0.681422313928007]])\n y = m.vectorize_example1(x1, x2)\n np.testing.assert_allclose(y, res, 1e-12)\n\n def test_readme_example1(self):\n v = np.arange(15).reshape(3, 5)\n y = m.readme_example1(v)\n np.testing.assert_allclose(y, 1.2853996391883833, 1e-12)\n\n" ]
[ [ "numpy.arange", "numpy.array", "numpy.testing.assert_allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fuying-wang/tianshou
[ "866e35d550fe9aaaff5492f463eb59d9196efc83" ]
[ "test/continuous/test_sac_with_il.py" ]
[ "import os\nimport gym\nimport torch\nimport pprint\nimport argparse\nimport numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom tianshou.env import DummyVectorEnv\nfrom tianshou.utils.net.common import Net\nfrom tianshou.trainer import offpolicy_trainer\nfrom tianshou.data import Collector, ReplayBuffer\nfrom tianshou.policy import SACPolicy, ImitationPolicy\nfrom tianshou.utils.net.continuous import Actor, ActorProb, Critic\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--task', type=str, default='Pendulum-v0')\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--buffer-size', type=int, default=20000)\n parser.add_argument('--actor-lr', type=float, default=3e-4)\n parser.add_argument('--critic-lr', type=float, default=1e-3)\n parser.add_argument('--il-lr', type=float, default=1e-3)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--tau', type=float, default=0.005)\n parser.add_argument('--alpha', type=float, default=0.2)\n parser.add_argument('--epoch', type=int, default=20)\n parser.add_argument('--step-per-epoch', type=int, default=2400)\n parser.add_argument('--collect-per-step', type=int, default=10)\n parser.add_argument('--batch-size', type=int, default=128)\n parser.add_argument('--layer-num', type=int, default=1)\n parser.add_argument('--training-num', type=int, default=8)\n parser.add_argument('--test-num', type=int, default=100)\n parser.add_argument('--logdir', type=str, default='log')\n parser.add_argument('--render', type=float, default=0.)\n parser.add_argument('--rew-norm', type=int, default=1)\n parser.add_argument('--ignore-done', type=int, default=1)\n parser.add_argument('--n-step', type=int, default=4)\n parser.add_argument(\n '--device', type=str,\n default='cuda' if torch.cuda.is_available() else 'cpu')\n args = parser.parse_known_args()[0]\n return args\n\n\ndef test_sac_with_il(args=get_args()):\n torch.set_num_threads(1) # we just need only one thread for NN\n env = gym.make(args.task)\n if args.task == 'Pendulum-v0':\n env.spec.reward_threshold = -250\n args.state_shape = env.observation_space.shape or env.observation_space.n\n args.action_shape = env.action_space.shape or env.action_space.n\n args.max_action = env.action_space.high[0]\n # you can also use tianshou.env.SubprocVectorEnv\n # train_envs = gym.make(args.task)\n train_envs = DummyVectorEnv(\n [lambda: gym.make(args.task) for _ in range(args.training_num)])\n # test_envs = gym.make(args.task)\n test_envs = DummyVectorEnv(\n [lambda: gym.make(args.task) for _ in range(args.test_num)])\n # seed\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n train_envs.seed(args.seed)\n test_envs.seed(args.seed)\n # model\n net = Net(args.layer_num, args.state_shape, device=args.device)\n actor = ActorProb(\n net, args.action_shape, args.max_action, args.device, unbounded=True\n ).to(args.device)\n actor_optim = torch.optim.Adam(actor.parameters(), lr=args.actor_lr)\n net_c1 = Net(args.layer_num, args.state_shape,\n args.action_shape, concat=True, device=args.device)\n critic1 = Critic(net_c1, args.device).to(args.device)\n critic1_optim = torch.optim.Adam(critic1.parameters(), lr=args.critic_lr)\n net_c2 = Net(args.layer_num, args.state_shape,\n args.action_shape, concat=True, device=args.device)\n critic2 = Critic(net_c2, args.device).to(args.device)\n critic2_optim = torch.optim.Adam(critic2.parameters(), lr=args.critic_lr)\n policy = SACPolicy(\n actor, actor_optim, critic1, critic1_optim, critic2, critic2_optim,\n action_range=[env.action_space.low[0], env.action_space.high[0]],\n tau=args.tau, gamma=args.gamma, alpha=args.alpha,\n reward_normalization=args.rew_norm,\n ignore_done=args.ignore_done,\n estimation_step=args.n_step)\n # collector\n train_collector = Collector(\n policy, train_envs, ReplayBuffer(args.buffer_size))\n test_collector = Collector(policy, test_envs)\n # train_collector.collect(n_step=args.buffer_size)\n # log\n log_path = os.path.join(args.logdir, args.task, 'sac')\n writer = SummaryWriter(log_path)\n\n def save_fn(policy):\n torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth'))\n\n def stop_fn(mean_rewards):\n return mean_rewards >= env.spec.reward_threshold\n\n # trainer\n result = offpolicy_trainer(\n policy, train_collector, test_collector, args.epoch,\n args.step_per_epoch, args.collect_per_step, args.test_num,\n args.batch_size, stop_fn=stop_fn, save_fn=save_fn, writer=writer)\n assert stop_fn(result['best_reward'])\n if __name__ == '__main__':\n pprint.pprint(result)\n # Let's watch its performance!\n env = gym.make(args.task)\n policy.eval()\n collector = Collector(policy, env)\n result = collector.collect(n_episode=1, render=args.render)\n print(f'Final reward: {result[\"rew\"]}, length: {result[\"len\"]}')\n\n # here we define an imitation collector with a trivial policy\n policy.eval()\n if args.task == 'Pendulum-v0':\n env.spec.reward_threshold = -300 # lower the goal\n net = Actor(\n Net(1, args.state_shape, device=args.device),\n args.action_shape, args.max_action, args.device\n ).to(args.device)\n optim = torch.optim.Adam(net.parameters(), lr=args.il_lr)\n il_policy = ImitationPolicy(net, optim, mode='continuous')\n il_test_collector = Collector(\n il_policy,\n DummyVectorEnv(\n [lambda: gym.make(args.task) for _ in range(args.test_num)])\n )\n train_collector.reset()\n result = offpolicy_trainer(\n il_policy, train_collector, il_test_collector, args.epoch,\n args.step_per_epoch // 5, args.collect_per_step, args.test_num,\n args.batch_size, stop_fn=stop_fn, save_fn=save_fn, writer=writer)\n assert stop_fn(result['best_reward'])\n if __name__ == '__main__':\n pprint.pprint(result)\n # Let's watch its performance!\n env = gym.make(args.task)\n il_policy.eval()\n collector = Collector(il_policy, env)\n result = collector.collect(n_episode=1, render=args.render)\n print(f'Final reward: {result[\"rew\"]}, length: {result[\"len\"]}')\n\n\nif __name__ == '__main__':\n test_sac_with_il()\n" ]
[ [ "numpy.random.seed", "torch.manual_seed", "torch.set_num_threads", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DarkstartsUp/stock_predict_with_LSTM
[ "42dc56954b5262d6e50d1552505f1e87741d6256" ]
[ "data_download.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@Author: jifeng\r\n@File Create: 20200606\r\n@Last Modify: 20200606\r\n@Function: from tushare get stock data, generate array\r\n\"\"\"\r\n\r\nimport tushare as ts\r\nimport pandas as pd\r\nimport time\r\nimport numpy as np\r\nts.set_token('33aeeaf3e4e6b1cac85a6035f0adf9fe0efb9e386d9c20ad0d3e4b81')\r\npro = ts.pro_api()\r\n\r\nhs300 = list(set(pro.index_weight(index_code='399300.SZ', start_date='20160101', end_date='20170101')['con_code']))\r\n\r\nroe_hs300 = pd.DataFrame()\r\neps_hs300 = pd.DataFrame()\r\nfor stock in hs300:\r\n data = pro.query('fina_indicator', ts_code=stock, start_date='20160101', end_date='20170801')\r\n roe_hs300[stock] = data['roe']\r\n eps_hs300[stock] = data['eps']\r\n time.sleep(1)\r\n \r\n \r\nsort_stock_list = list((roe_hs300.loc[5].argsort()+eps_hs300.loc[5].argsort()).argsort().sort_values().index)\r\ndata = [[0 for i in range(18)] for j in range(18)]\r\n\r\nfor stock in sort_stock_list:\r\n df = pro.query('daily', ts_code=stock, start_date='20160101', end_date='20200101')\r\n df.to_csv(stock+'.csv')\r\ndt = pd.DataFrame()\r\nfor stock in sort_stock_list:\r\n print(stock)\r\n df = pd.read_csv(stock+'.csv',index_col=2)\r\n dt[stock] = df['close']\r\n print(df.head(1))\r\nt=0 \r\nfor i in range(36-1):\r\n for j in range(i+1):\r\n k = i-j\r\n if k<18 and k>=0 and j<18:\r\n #df = pro.query('daily', ts_code=sort_stock_list[t], start_date='20160101', end_date='20200101')\r\n #df_price = df['close']\r\n t = t+1\r\n data[j][k]=dt[sort_stock_list[t]]\r\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
po3navy/tf-quant-finance
[ "cd63262d94a1aaeeeb33f37709a25b14d4d993a6" ]
[ "tf_quant_finance/experimental/pricing_platform/framework/rate_instruments/cashflow_streams.py" ]
[ "# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Cashflow streams objects.\"\"\"\n\nfrom typing import Optional, Tuple, Callable, Any, List, Union\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tf_quant_finance import datetime as dateslib\nfrom tf_quant_finance.experimental.pricing_platform.framework.core import curve_types as curve_types_lib\nfrom tf_quant_finance.experimental.pricing_platform.framework.core import processed_market_data as pmd\nfrom tf_quant_finance.experimental.pricing_platform.framework.core import types\nfrom tf_quant_finance.experimental.pricing_platform.framework.market_data import rate_curve\nfrom tf_quant_finance.experimental.pricing_platform.framework.market_data import utils as market_data_utils\nfrom tf_quant_finance.experimental.pricing_platform.framework.rate_instruments import coupon_specs\nfrom tf_quant_finance.experimental.pricing_platform.instrument_protos import period_pb2\nfrom tf_quant_finance.math import pad\n\n\n_CurveType = curve_types_lib.CurveType\n\n\nclass FixedCashflowStream:\n \"\"\"Represents a batch of fixed stream of cashflows.\"\"\"\n\n def __init__(self,\n coupon_spec: coupon_specs.FixedCouponSpecs,\n discount_curve_type: _CurveType,\n start_date: types.DateTensor = None,\n end_date: types.DateTensor = None,\n discount_curve_mask: types.IntTensor = None,\n first_coupon_date: Optional[types.DateTensor] = None,\n penultimate_coupon_date: Optional[types.DateTensor] = None,\n schedule_fn: Optional[Callable[..., Any]] = None,\n schedule: Optional[types.DateTensor] = None,\n dtype: Optional[types.Dtype] = None,\n name: Optional[str] = None):\n \"\"\"Initializes a batch of fixed cashflow streams.\n\n Args:\n coupon_spec: An instance of `FixedCouponSpecs` specifying the\n details of the coupon payment for the cashflow stream.\n discount_curve_type: An instance of `CurveType` or a list of those.\n If supplied as a list and `discount_curve_mask` is not supplied,\n the size of the list should be the same as the number of priced\n instruments. Defines discount curves for the instruments.\n start_date: A `DateTensor` of `batch_shape` specifying the starting dates\n of the accrual of the first coupon of the cashflow stream. The shape of\n the input correspond to the number of streams being created.\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Either this of `schedule` should be supplied\n Default value: `None`\n end_date: A `DateTensor` of `batch_shape`specifying the end dates for\n accrual of the last coupon in each cashflow stream. The shape of the\n input should be the same as that of `start_date`.\n Either this of `schedule` should be supplied\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Default value: `None`\n discount_curve_mask: An optional integer `Tensor` of values ranging from\n `0` to `len(discount_curve_type) - 1` and of shape `batch_shape`.\n Identifies a mapping between `discount_curve_type` list and the\n underlying instruments.\n Default value: `None`.\n first_coupon_date: An optional `DateTensor` specifying the payment dates\n of the first coupon of the cashflow stream. Use this input for cashflows\n with irregular first coupon. Should be of the same shape as\n `start_date`.\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Default value: None which implies regular first coupon.\n penultimate_coupon_date: An optional `DateTensor` specifying the payment\n dates of the penultimate (next to last) coupon of the cashflow\n stream. Use this input for cashflows with irregular last coupon.\n Should be of the same shape as `end_date`.\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Default value: None which implies regular last coupon.\n schedule_fn: A callable that accepts `start_date`, `end_date`,\n `coupon_frequency`, `settlement_days`, `first_coupon_date`, and\n `penultimate_coupon_date` as `Tensor`s and returns coupon payment\n days.\n Default value: `None`.\n schedule: A `DateTensor` of coupon payment dates including the start and\n end dates of the cashflows.\n Default value: `None`.\n dtype: `tf.Dtype` of the input and output real `Tensor`s.\n Default value: None which maps to the default dtype inferred by\n TensorFlow.\n name: Python str. The name to give to the ops created by this class.\n Default value: `None` which maps to 'fixed_cashflow_stream'.\n \"\"\"\n self._name = name or \"fixed_cashflow_stream\"\n\n with tf.name_scope(self._name):\n curve_list = to_list(discount_curve_type)\n [\n self._discount_curve_type,\n self._mask\n ] = process_curve_types(curve_list, discount_curve_mask)\n\n if schedule is None:\n if (start_date is None) or (end_date is None):\n raise ValueError(\"If `schedule` is not supplied both \"\n \"`start_date` and `end_date` should be supplied\")\n if isinstance(start_date, tf.Tensor):\n self._start_date = dateslib.dates_from_tensor(\n start_date)\n else:\n self._start_date = dateslib.convert_to_date_tensor(\n start_date)\n if isinstance(start_date, tf.Tensor):\n self._end_date = dateslib.dates_from_tensor(\n end_date)\n else:\n self._end_date = dateslib.convert_to_date_tensor(\n end_date)\n self._first_coupon_date = first_coupon_date\n self._penultimate_coupon_date = penultimate_coupon_date\n if self._first_coupon_date is not None:\n if isinstance(start_date, tf.Tensor):\n self._first_coupon_date = dateslib.dates_from_tensor(\n first_coupon_date)\n else:\n self._first_coupon_date = dateslib.convert_to_date_tensor(\n first_coupon_date)\n if self._penultimate_coupon_date is not None:\n if isinstance(start_date, tf.Tensor):\n self._penultimate_coupon_date = dateslib.dates_from_tensor(\n penultimate_coupon_date)\n else:\n self._penultimate_coupon_date = dateslib.convert_to_date_tensor(\n penultimate_coupon_date)\n\n # Update coupon frequency\n coupon_frequency = _get_attr(coupon_spec, \"coupon_frequency\")\n if isinstance(coupon_frequency, period_pb2.Period):\n coupon_frequency = market_data_utils.get_period(\n _get_attr(coupon_spec, \"coupon_frequency\"))\n if isinstance(coupon_frequency, (list, tuple)):\n coupon_frequency = market_data_utils.period_from_list(\n *_get_attr(coupon_spec, \"coupon_frequency\"))\n if isinstance(coupon_frequency, dict):\n coupon_frequency = market_data_utils.period_from_dict(\n _get_attr(coupon_spec, \"coupon_frequency\"))\n\n businessday_rule = coupon_spec.businessday_rule\n # Business day roll convention and the end of month flag\n roll_convention, eom = market_data_utils.get_business_day_convention(\n businessday_rule)\n\n notional = tf.convert_to_tensor(\n _get_attr(coupon_spec, \"notional_amount\"),\n dtype=dtype,\n name=\"notional\")\n self._dtype = dtype or notional.dtype\n fixed_rate = tf.convert_to_tensor(_get_attr(coupon_spec, \"fixed_rate\"),\n dtype=self._dtype,\n name=\"fixed_rate\")\n # TODO(b/160446193): Calendar is ignored and weekends only is used\n calendar = dateslib.create_holiday_calendar(\n weekend_mask=dateslib.WeekendMask.SATURDAY_SUNDAY)\n daycount_fn = market_data_utils.get_daycount_fn(\n _get_attr(coupon_spec, \"daycount_convention\"), self._dtype)\n\n self._settlement_days = tf.convert_to_tensor(\n _get_attr(coupon_spec, \"settlement_days\"),\n dtype=tf.int32,\n name=\"settlement_days\")\n\n if schedule is not None:\n if isinstance(start_date, tf.Tensor):\n coupon_dates = dateslib.dates_from_tensor(schedule)\n else:\n coupon_dates = dateslib.convert_to_date_tensor(schedule)\n # Extract starting date for the cashflow\n self._start_date = coupon_dates[..., 0]\n elif schedule_fn is None:\n coupon_dates = _generate_schedule(\n start_date=self._start_date,\n end_date=self._end_date,\n coupon_frequency=coupon_frequency,\n roll_convention=roll_convention,\n calendar=calendar,\n settlement_days=self._settlement_days,\n end_of_month=eom,\n first_coupon_date=self._first_coupon_date,\n penultimate_coupon_date=self._penultimate_coupon_date)\n # Extract starting date for the cashflow\n self._start_date = coupon_dates[..., 0]\n else:\n if first_coupon_date is not None:\n first_coupon_date = self._first_coupon_date.to_tensor()\n if penultimate_coupon_date is not None:\n penultimate_coupon_date = self._penultimate_coupon_date.to_tensor()\n coupon_dates = schedule_fn(\n start_date=self._start_date.to_tensor(),\n end_date=self._end_date.to_tensor(),\n coupon_frequency=coupon_frequency.quantity(),\n settlement_days=self._settlement_days,\n first_coupon_date=first_coupon_date,\n penultimate_coupon_date=penultimate_coupon_date)\n\n # Convert to DateTensor if the result comes from a tf.function\n coupon_dates = dateslib.convert_to_date_tensor(coupon_dates)\n\n self._batch_shape = tf.shape(coupon_dates.ordinal())[:-1]\n payment_dates = coupon_dates[..., 1:]\n\n daycount_fractions = daycount_fn(\n start_date=coupon_dates[..., :-1],\n end_date=coupon_dates[..., 1:])\n\n coupon_rate = tf.expand_dims(fixed_rate, axis=-1)\n\n self._num_cashflows = tf.shape(payment_dates.ordinal())[-1]\n self._payment_dates = payment_dates\n self._notional = notional\n self._daycount_fractions = daycount_fractions\n self._coupon_rate = coupon_rate\n self._calendar = coupon_rate\n self._fixed_rate = tf.convert_to_tensor(fixed_rate, dtype=self._dtype)\n self._daycount_fn = daycount_fn\n\n def daycount_fn(self) -> Callable[..., Any]:\n return self._daycount_fn\n\n @property\n def daycount_fractions(self) -> types.FloatTensor:\n return self._daycount_fractions\n\n @property\n def fixed_rate(self) -> types.FloatTensor:\n return self._fixed_rate\n\n @property\n def notional(self) -> types.FloatTensor:\n return self._notional\n\n @property\n def discount_curve_type(self) -> _CurveType:\n return self._discount_curve_type\n\n @property\n def batch_shape(self) -> types.StringTensor:\n return self._batch_shape\n\n @property\n def cashflow_dates(self) -> types.DateTensor:\n return self._payment_dates\n\n def cashflows(self,\n market: pmd.ProcessedMarketData,\n name: Optional[str] = None\n ) -> Tuple[types.DateTensor, types.FloatTensor]:\n \"\"\"Returns cashflows for the fixed leg.\n\n Args:\n market: An instance of `ProcessedMarketData`.\n name: Python str. The name to give to the ops created by this function.\n Default value: `None` which maps to 'cashflows'.\n\n Returns:\n A tuple of two `Tensor`s of shape `batch_shape + [num_cashflows]` and\n containing the dates and the corresponding cashflows price for each\n stream based on the input market data.\n \"\"\"\n name = name or (self._name + \"_cashflows\")\n with tf.name_scope(name):\n valuation_date = dateslib.convert_to_date_tensor(market.date)\n future_cashflows = tf.cast(self._payment_dates >= valuation_date,\n dtype=self._dtype)\n # self._notional is of shape [batch_shape], so broadcasting is needed\n notional = tf.expand_dims(self._notional, axis=-1)\n # Cashflow present values.\n cashflows = notional * (\n future_cashflows * self._daycount_fractions * self._coupon_rate)\n return self._payment_dates, cashflows\n\n def price(self,\n market: pmd.ProcessedMarketData,\n name: Optional[str] = None):\n \"\"\"Returns the present value of the stream on the valuation date.\n\n Args:\n market: An instance of `ProcessedMarketData`.\n name: Python str. The name to give to the ops created by this function.\n Default value: `None` which maps to 'price'.\n\n Returns:\n A `Tensor` of shape `batch_shape` containing the modeled price of each\n stream based on the input market data.\n \"\"\"\n name = name or (self._name + \"_price\")\n with tf.name_scope(name):\n discount_curve = get_discount_curve(\n self._discount_curve_type, market, self._mask)\n discount_factors = discount_curve.discount_factor(\n self._payment_dates)\n _, cashflows = self.cashflows(market)\n # Cashflow present values\n cashflow_pvs = (cashflows * discount_factors)\n return tf.math.reduce_sum(cashflow_pvs, axis=1)\n\n\nclass FloatingCashflowStream:\n \"\"\"Represents a batch of cashflows indexed to a floating rate.\"\"\"\n\n def __init__(self,\n coupon_spec: coupon_specs.FloatCouponSpecs,\n discount_curve_type: _CurveType,\n start_date: types.DateTensor = None,\n end_date: types.DateTensor = None,\n discount_curve_mask: types.IntTensor = None,\n rate_index_curves: curve_types_lib.RateIndexCurve = None,\n reference_mask: types.IntTensor = None,\n first_coupon_date: Optional[types.DateTensor] = None,\n penultimate_coupon_date: Optional[types.DateTensor] = None,\n schedule_fn: Optional[Callable[..., Any]] = None,\n schedule: Optional[types.DateTensor] = None,\n dtype: Optional[types.Dtype] = None,\n name: Optional[str] = None):\n \"\"\"Initializes a batch of floating cashflow streams.\n\n Args:\n coupon_spec: An instance of `FloatCouponSpecs` specifying the\n details of the coupon payment for the cashflow stream.\n discount_curve_type: An instance of `CurveType` or a list of those.\n If supplied as a list and `discount_curve_mask` is not supplied,\n the size of the list should be the same as the number of priced\n instruments. Defines discount curves for the instruments.\n start_date: A `DateTensor` of `batch_shape` specifying the starting dates\n of the accrual of the first coupon of the cashflow stream. The shape of\n the input correspond to the number of streams being created.\n Either this of `schedule` should be supplied.\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Default value: `None`\n end_date: A `DateTensor` of `batch_shape`specifying the end dates for\n accrual of the last coupon in each cashflow stream. The shape of the\n input should be the same as that of `start_date`.\n Either this of `schedule` should be supplied.\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Default value: `None`\n discount_curve_mask: An optional integer `Tensor` of values ranging from\n `0` to `len(discount_curve_type) - 1` and of shape `batch_shape`.\n Identifies a mapping between `discount_curve_type` list and the\n underlying instruments.\n Default value: `None`.\n rate_index_curves: An instance of `RateIndexCurve` or a list of those.\n If supplied as a list and `reference_mask` is not supplid,\n the size of the list should be the same as the number of priced\n instruments. Defines the index curves for each instrument. If not\n supplied, `coupon_spec.floating_rate_type` is used to identify the\n curves.\n Default value: `None`.\n reference_mask: An optional integer `Tensor` of values ranging from\n `0` to `len(rate_index_curves) - 1` and of shape `batch_shape`.\n Identifies a mapping between `rate_index_curves` list and the underlying\n instruments.\n Default value: `None`.\n first_coupon_date: An optional `DateTensor` specifying the payment dates\n of the first coupon of the cashflow stream. Use this input for cashflows\n with irregular first coupon. Should be of the same shape as\n `start_date`.\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Default value: None which implies regular first coupon.\n penultimate_coupon_date: An optional `DateTensor` specifying the payment\n dates of the penultimate (next to last) coupon of the cashflow\n stream. Use this input for cashflows with irregular last coupon.\n Should be of the same shape as `end_date`.\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Default value: None which implies regular last coupon.\n schedule_fn: A callable that accepts `start_date`, `end_date`,\n `coupon_frequency`, `settlement_days`, `first_coupon_date`, and\n `penultimate_coupon_date` as `Tensor`s and returns coupon payment\n days.\n Default value: `None`.\n schedule: A `DateTensor` of coupon payment dates including the start and\n end dates of the cashflows.\n Default value: `None`.\n dtype: `tf.Dtype` of the input and output real `Tensor`s.\n Default value: None which maps to the default dtype inferred by\n TensorFlow.\n name: Python str. The name to give to the ops created by this class.\n Default value: `None` which maps to 'floating_cashflow_stream'.\n \"\"\"\n\n self._name = name or \"floating_cashflow_stream\"\n with tf.name_scope(self._name):\n curve_list = to_list(discount_curve_type)\n [\n self._discount_curve_type,\n self._mask\n ] = process_curve_types(curve_list, discount_curve_mask)\n self._first_coupon_date = None\n self._penultimate_coupon_date = None\n if schedule is None:\n if (start_date is None) or (end_date is None):\n raise ValueError(\"If `schedule` is not supplied both \"\n \"`start_date` and `end_date` should be supplied\")\n\n if schedule is None:\n if isinstance(start_date, tf.Tensor):\n self._start_date = dateslib.dates_from_tensor(\n start_date)\n else:\n self._start_date = dateslib.convert_to_date_tensor(\n start_date)\n if isinstance(start_date, tf.Tensor):\n self._end_date = dateslib.dates_from_tensor(\n end_date)\n else:\n self._end_date = dateslib.convert_to_date_tensor(\n end_date)\n self._first_coupon_date = first_coupon_date\n self._penultimate_coupon_date = penultimate_coupon_date\n if self._first_coupon_date is not None:\n if isinstance(start_date, tf.Tensor):\n self._first_coupon_date = dateslib.dates_from_tensor(\n first_coupon_date)\n else:\n self._first_coupon_date = dateslib.convert_to_date_tensor(\n first_coupon_date)\n if self._penultimate_coupon_date is not None:\n if isinstance(start_date, tf.Tensor):\n self._penultimate_coupon_date = dateslib.dates_from_tensor(\n penultimate_coupon_date)\n else:\n self._penultimate_coupon_date = dateslib.convert_to_date_tensor(\n penultimate_coupon_date)\n # Ignored and weekends only is used\n calendar = dateslib.create_holiday_calendar(\n weekend_mask=dateslib.WeekendMask.SATURDAY_SUNDAY)\n # Convert coupon and reset frequencies to PeriodTensor\n coupon_frequency = _get_attr(coupon_spec, \"coupon_frequency\")\n # Update coupon frequency\n if isinstance(coupon_frequency, period_pb2.Period):\n coupon_frequency = market_data_utils.get_period(\n _get_attr(coupon_spec, \"coupon_frequency\"))\n if isinstance(coupon_frequency, (list, tuple)):\n coupon_frequency = market_data_utils.period_from_list(\n *_get_attr(coupon_spec, \"coupon_frequency\"))\n if isinstance(coupon_frequency, dict):\n coupon_frequency = market_data_utils.period_from_dict(\n _get_attr(coupon_spec, \"coupon_frequency\"))\n # Update reset frequency\n reset_frequency = _get_attr(coupon_spec, \"reset_frequency\")\n if isinstance(reset_frequency, period_pb2.Period):\n reset_frequency = market_data_utils.get_period(\n _get_attr(coupon_spec, \"reset_frequency\"))\n if isinstance(reset_frequency, (list, tuple)):\n reset_frequency = market_data_utils.period_from_list(\n *_get_attr(coupon_spec, \"reset_frequency\"))\n if isinstance(reset_frequency, dict):\n reset_frequency = market_data_utils.period_from_dict(\n _get_attr(coupon_spec, \"reset_frequency\"))\n self._reset_frequency = reset_frequency\n businessday_rule = _get_attr(coupon_spec, \"businessday_rule\")\n roll_convention, eom = market_data_utils.get_business_day_convention(\n businessday_rule)\n notional = tf.convert_to_tensor(\n _get_attr(coupon_spec, \"notional_amount\"),\n dtype=dtype,\n name=\"notional\")\n self._dtype = dtype or notional.dtype\n\n daycount_convention = _get_attr(coupon_spec, \"daycount_convention\")\n\n daycount_fn = market_data_utils.get_daycount_fn(\n _get_attr(coupon_spec, \"daycount_convention\"), self._dtype)\n self._daycount_convention = daycount_convention\n\n self._settlement_days = tf.convert_to_tensor(\n _get_attr(coupon_spec, \"settlement_days\"),\n dtype=tf.int32,\n name=\"settlement_days\")\n spread = tf.convert_to_tensor(_get_attr(coupon_spec, \"spread\"),\n dtype=self._dtype,\n name=\"spread\")\n if schedule is not None:\n if isinstance(start_date, tf.Tensor):\n coupon_dates = dateslib.dates_from_tensor(schedule)\n else:\n coupon_dates = dateslib.convert_to_date_tensor(schedule)\n # Extract starting date for the cashflow\n self._start_date = coupon_dates[..., 0]\n elif schedule_fn is None:\n coupon_dates = _generate_schedule(\n start_date=self._start_date,\n end_date=self._end_date,\n coupon_frequency=coupon_frequency,\n roll_convention=roll_convention,\n calendar=calendar,\n settlement_days=self._settlement_days,\n end_of_month=eom,\n first_coupon_date=self._first_coupon_date,\n penultimate_coupon_date=self._penultimate_coupon_date)\n # Extract starting date for the cashflow\n self._start_date = coupon_dates[..., 0]\n else:\n if first_coupon_date is not None:\n first_coupon_date = self._first_coupon_date.to_tensor()\n if penultimate_coupon_date is not None:\n penultimate_coupon_date = self._penultimate_coupon_date.to_tensor()\n coupon_dates = schedule_fn(\n start_date=self._start_date.to_tensor(),\n end_date=self._end_date.to_tensor(),\n coupon_frequency=coupon_frequency.quantity(),\n settlement_days=self._settlement_days,\n first_coupon_date=first_coupon_date,\n penultimate_coupon_date=penultimate_coupon_date)\n # Convert to DateTensor if the result comes from a tf.function\n coupon_dates = dateslib.convert_to_date_tensor(coupon_dates)\n # Extract batch shape\n self._batch_shape = tf.shape(coupon_dates.ordinal())[:-1]\n\n accrual_start_dates = coupon_dates[..., :-1]\n\n coupon_start_dates = coupon_dates[..., :-1]\n coupon_end_dates = coupon_dates[..., 1:]\n\n accrual_end_dates = accrual_start_dates + reset_frequency.expand_dims(\n axis=-1)\n\n # Adjust for irregular coupons\n accrual_end_dates = dateslib.DateTensor.concat(\n [coupon_end_dates[..., :1],\n accrual_end_dates[..., 1:-1],\n coupon_end_dates[..., -1:]], axis=-1)\n daycount_fractions = daycount_fn(\n start_date=coupon_start_dates,\n end_date=coupon_end_dates)\n\n self._num_cashflows = tf.shape(daycount_fractions)[-1]\n self._coupon_start_dates = coupon_start_dates\n self._coupon_end_dates = coupon_end_dates\n self._accrual_start_date = accrual_start_dates\n self._accrual_end_date = accrual_end_dates\n self._notional = notional\n self._daycount_fractions = daycount_fractions\n self._spread = spread\n self._currency = _get_attr(coupon_spec, \"currency\")\n self._daycount_fn = daycount_fn\n # Construct the reference curve object\n # Extract all rate_curves\n self._floating_rate_type = to_list(\n _get_attr(coupon_spec, \"floating_rate_type\"))\n self._currency = to_list(self._currency)\n if rate_index_curves is None:\n rate_index_curves = []\n for currency, floating_rate_type in zip(self._currency,\n self._floating_rate_type):\n rate_index_curves.append(curve_types_lib.RateIndexCurve(\n currency=currency, index=floating_rate_type))\n [\n self._reference_curve_type,\n self._reference_mask\n ] = process_curve_types(rate_index_curves, reference_mask)\n\n def daycount_fn(self) -> Callable[..., Any]:\n return self._daycount_fn\n\n @property\n def notional(self) -> types.FloatTensor:\n return self._notional\n\n @property\n def discount_curve_type(self) -> _CurveType:\n return self._discount_curve_type\n\n @property\n def reference_curve_type(self) -> _CurveType:\n return self._reference_curve_type\n\n @property\n def batch_shape(self) -> types.StringTensor:\n return self._batch_shape\n\n @property\n def daycount_fractions(self) -> types.FloatTensor:\n return self._daycount_fractions\n\n @property\n def cashflow_dates(self) -> types.DateTensor:\n return self._coupon_end_dates\n\n @property\n def coupon_start_dates(self) -> types.DateTensor:\n return self._coupon_start_dates\n\n @property\n def coupon_end_dates(self) -> types.DateTensor:\n return self._coupon_end_dates\n\n def forward_rates(self,\n market: pmd.ProcessedMarketData,\n name: Optional[str] = None\n ) -> Tuple[types.DateTensor, types.FloatTensor]:\n \"\"\"Returns forward rates for the floating leg.\n\n Args:\n market: An instance of `ProcessedMarketData`.\n name: Python str. The name to give to the ops created by this function.\n Default value: `None` which maps to 'forward_rates'.\n\n Returns:\n A tuple of two `Tensor`s of shape `batch_shape + [num_cashflows]`\n containing the dates and the corresponding forward rates for each stream\n based on the input market data.\n \"\"\"\n name = name or (self._name + \"_forward_rates\")\n with tf.name_scope(name):\n reference_curve = get_discount_curve(\n self._reference_curve_type, market, self._reference_mask)\n valuation_date = dateslib.convert_to_date_tensor(market.date)\n\n # Previous fixing date\n coupon_start_date_ord = self._coupon_start_dates.ordinal()\n coupon_end_date_ord = self._coupon_end_dates.ordinal()\n valuation_date_ord = valuation_date.ordinal()\n batch_shape = tf.shape(coupon_start_date_ord)[:-1]\n # Broadcast valuation date batch shape for tf.searchsorted\n valuation_date_ord += tf.expand_dims(\n tf.zeros(batch_shape, dtype=tf.int32), axis=-1)\n ind = tf.maximum(tf.searchsorted(coupon_start_date_ord,\n valuation_date_ord) - 1, 0)\n # Fixings are assumed to be the same as coupon start dates\n # TODO(b/177047910): add fixing settlement dates.\n # Shape `batch_shape + [1]`\n fixing_dates_ord = tf.gather(\n coupon_start_date_ord, ind,\n batch_dims=len(coupon_start_date_ord.shape) - 1)\n fixing_end_dates_ord = tf.gather(\n coupon_end_date_ord, ind,\n batch_dims=len(coupon_start_date_ord.shape) - 1)\n fixing_dates = dateslib.dates_from_ordinals(fixing_dates_ord)\n fixing_end_dates = dateslib.dates_from_ordinals(fixing_end_dates_ord)\n # Get fixings. Shape batch_shape + [1]\n past_fixing = _get_fixings(\n fixing_dates,\n fixing_end_dates,\n self._reference_curve_type,\n self._reference_mask,\n market)\n print(\"past_fixing: \", past_fixing)\n forward_rates = reference_curve.forward_rate(\n self._accrual_start_date,\n self._accrual_end_date,\n day_count_fraction=self._daycount_fractions)\n # Shape batch_shape + [num_cashflows]\n forward_rates = tf.where(self._daycount_fractions > 0., forward_rates,\n tf.zeros_like(forward_rates))\n # If coupon end date is before the valuation date, the payment is in the\n # past. If valuation date is between coupon start date and coupon end\n # date, then the rate has been fixed but not paid. Otherwise the rate is\n # not fixed and should be read from the curve.\n # Shape batch_shape + [num_cashflows]\n forward_rates = tf.where(\n self._coupon_end_dates < valuation_date,\n tf.constant(0, dtype=self._dtype),\n tf.where(self._coupon_start_dates >= valuation_date,\n forward_rates, past_fixing))\n return self._coupon_end_dates, forward_rates\n\n def cashflows(self,\n market: pmd.ProcessedMarketData,\n name: Optional[str] = None\n ) -> Tuple[types.DateTensor, types.FloatTensor]:\n \"\"\"Returns cashflows for the floating leg.\n\n Args:\n market: An instance of `ProcessedMarketData`.\n name: Python str. The name to give to the ops created by this function.\n Default value: `None` which maps to 'cashflows'.\n\n Returns:\n A tuple of two `Tensor`s of shape `batch_shape + [num_cashflows]` and\n containing the dates and the corresponding cashflows price for each\n stream based on the input market data.\n \"\"\"\n name = name or (self._name + \"_cashflows\")\n with tf.name_scope(name):\n _, forward_rates = self.forward_rates(market)\n\n coupon_rate = forward_rates + tf.expand_dims(\n self._spread, axis=-1)\n # self._notion is of shape [batch_shape], so broadcasting is needed\n notional = tf.expand_dims(self._notional, axis=-1)\n\n cashflows = notional * (\n self._daycount_fractions * coupon_rate)\n return self._coupon_end_dates, cashflows\n\n def price(self,\n market: pmd.ProcessedMarketData,\n name: Optional[str] = None) -> types.FloatTensor:\n \"\"\"Returns the present value of the stream on the valuation date.\n\n Args:\n market: An instance of `ProcessedMarketData`.\n name: Python str. The name to give to the ops created by this function.\n Default value: `None` which maps to 'price'.\n\n Returns:\n A `Tensor` of shape `batch_shape` containing the modeled price of each\n stream based on the input market data.\n \"\"\"\n\n name = name or (self._name + \"_price\")\n with tf.name_scope(name):\n discount_curve = get_discount_curve(\n self._discount_curve_type, market, self._mask)\n discount_factors = discount_curve.discount_factor(self._coupon_end_dates)\n _, cashflows = self.cashflows(market)\n # Cashflows present values\n cashflow_pvs = cashflows * discount_factors\n return tf.math.reduce_sum(cashflow_pvs, axis=1)\n\n\ndef _generate_schedule(\n start_date: dateslib.DateTensor,\n end_date: dateslib.DateTensor,\n coupon_frequency: dateslib.PeriodTensor,\n calendar: dateslib.HolidayCalendar,\n roll_convention: dateslib.BusinessDayConvention,\n settlement_days: tf.Tensor,\n end_of_month: bool = False,\n first_coupon_date: Optional[dateslib.DateTensor] = None,\n penultimate_coupon_date: Optional[dateslib.DateTensor] = None) -> tf.Tensor:\n \"\"\"Method to generate coupon dates.\n\n Args:\n start_date: Starting dates of schedule.\n end_date: End dates of the schedule.\n coupon_frequency: A `PeriodTensor` specifying the frequency of coupon\n payments.\n calendar: calendar: An instance of `BankHolidays`.\n roll_convention: Business day roll convention of the schedule.\n settlement_days: An integer `Tensor` with the shape compatible with\n `start_date` and `end_date` specifying the number of settlement days.\n end_of_month: Python `bool`. If `True`, shifts all dates in schedule to\n the ends of corresponding months, if `start_date` or `end_date` (\n depending on `backward`) is at the end of a month. The shift is applied\n before applying `roll_convention`.\n first_coupon_date: First day of the irregular coupon, if any.\n penultimate_coupon_date: Penultimate day of the coupon, if any.\n\n Returns:\n A `DateTensor` containing the generated date schedule of shape\n `batch_shape + [max_num_coupon_days]`, where `max_num_coupon_days` is the\n number of coupon days for the longest living swap in the batch. The coupon\n days for the rest of the swaps are padded with their final coupon day.\n \"\"\"\n if first_coupon_date is not None and penultimate_coupon_date is not None:\n raise ValueError(\"Only first or last coupon dates can be specified \"\n \" for an irregular coupon.\")\n start_date = first_coupon_date or start_date\n # Adjust with settlement days\n start_date = calendar.add_business_days(\n start_date, settlement_days,\n roll_convention=roll_convention)\n if penultimate_coupon_date is None:\n backward = False\n else:\n backward = True\n end_date = end_date or penultimate_coupon_date\n # Adjust with settlement days\n end_date = calendar.add_business_days(\n end_date, settlement_days,\n roll_convention=roll_convention)\n coupon_dates = dateslib.PeriodicSchedule(\n start_date=start_date,\n end_date=end_date,\n tenor=coupon_frequency,\n roll_convention=roll_convention,\n backward=backward,\n end_of_month=end_of_month).dates()\n # Add the regular coupons\n coupon_dates = dateslib.DateTensor.concat(\n [start_date.expand_dims(-1),\n coupon_dates,\n end_date.expand_dims(-1)], axis=-1)\n return coupon_dates\n\n\ndef get_discount_curve(\n discount_curve_types: List[Union[curve_types_lib.RiskFreeCurve,\n curve_types_lib.RateIndexCurve]],\n market: pmd.ProcessedMarketData,\n mask: List[int]) -> rate_curve.RateCurve:\n \"\"\"Builds a batched discount curve.\n\n Given a list of discount curve an integer mask, creates a discount curve\n object to compute discount factors against the list of discount curves.\n\n #### Example\n ```none\n curve_types = [RiskFreeCurve(\"USD\"), RiskFreeCurve(\"AUD\")]\n # A mask to price a batch of 7 instruments with the corresponding discount\n # curves [\"USD\", \"AUD\", \"AUD\", \"AUD\" \"USD\", \"USD\", \"AUD\"].\n mask = [0, 1, 1, 1, 0, 0, 1]\n market = MarketDataDict(...)\n get_discount_curve(curve_types, market, mask)\n # Returns a RateCurve object that can compute a discount factors for a\n # batch of 7 dates.\n ```\n\n Args:\n discount_curve_types: A list of curve types.\n market: an instance of the processed market data.\n mask: An integer mask.\n\n Returns:\n An instance of `RateCurve`.\n \"\"\"\n discount_curves = [market.yield_curve(curve_type)\n for curve_type in discount_curve_types]\n discounts = []\n dates = []\n interpolation_method = None\n interpolate_rates = None\n for curve in discount_curves:\n discount, date = curve.discount_factors_and_dates()\n discounts.append(discount)\n dates.append(date)\n interpolation_method = curve.interpolation_method\n interpolate_rates = curve.interpolate_rates\n\n all_discounts = tf.stack(pad.pad_tensors(discounts), axis=0)\n all_dates = pad.pad_date_tensors(dates)\n all_dates = dateslib.DateTensor.stack(dates, axis=0)\n prepare_discounts = tf.gather(all_discounts, mask)\n prepare_dates = dateslib.dates_from_ordinals(\n tf.gather(all_dates.ordinal(), mask))\n # All curves are assumed to have the same interpolation method\n # TODO(b/168411153): Extend to the case with multiple curve configs.\n discount_curve = rate_curve.RateCurve(\n prepare_dates, prepare_discounts, market.date,\n interpolator=interpolation_method,\n interpolate_rates=interpolate_rates)\n return discount_curve\n\n\ndef _get_fixings(start_dates,\n end_dates,\n reference_curve_types,\n reference_mask,\n market):\n \"\"\"Computes fixings for a list of reference curves.\"\"\"\n num_curves = len(reference_curve_types)\n if num_curves > 1:\n # For each curve get corresponding cashflow indices\n split_indices = [tf.squeeze(tf.where(tf.equal(reference_mask, i)), -1)\n for i in range(num_curves)]\n else:\n split_indices = [0]\n fixings = []\n start_dates_ordinal = start_dates.ordinal()\n end_dates_ordinal = end_dates.ordinal()\n for idx, reference_curve_type in zip(split_indices, reference_curve_types):\n if num_curves > 1:\n # Get all dates corresponding to the reference curve\n start_date = dateslib.dates_from_ordinals(\n tf.gather(start_dates_ordinal, idx))\n end_date = dateslib.dates_from_ordinals(\n tf.gather(end_dates_ordinal, idx))\n else:\n start_date = start_dates\n end_date = end_dates\n fixing, fixing_daycount = market.fixings(start_date, reference_curve_type)\n if fixing_daycount is not None:\n fixing_daycount = market_data_utils.get_daycount_fn(\n fixing_daycount, dtype=market.dtype)\n year_fraction = fixing_daycount(start_date=start_date, end_date=end_date)\n else:\n year_fraction = 0.0\n fixings.append(\n fixing * year_fraction)\n fixings = pad.pad_tensors(fixings)\n all_indices = tf.concat(split_indices, axis=0)\n all_fixings = tf.concat(fixings, axis=0)\n if num_curves > 1:\n return tf.gather(all_fixings, tf.argsort(all_indices))\n else:\n return all_fixings\n\n\ndef process_curve_types(\n curve_types: List[Union[curve_types_lib.RiskFreeCurve,\n curve_types_lib.RateIndexCurve]],\n mask=None\n ) -> Tuple[\n List[Union[curve_types_lib.RiskFreeCurve,\n curve_types_lib.RateIndexCurve]],\n List[int]]:\n \"\"\"Extracts unique curves and computes an integer mask.\n\n #### Example\n ```python\n curve_types = [RiskFreeCurve(\"USD\"), RiskFreeCurve(\"AUD\"),\n RiskFreeCurve(\"USD\")]\n process_curve_types(curve_types)\n # Returns [RiskFreeCurve(\"AUD\"), RiskFreeCurve(\"USD\")], [1, 0, 1]\n ```\n Args:\n curve_types: A list of either `RiskFreeCurve` or `RateIndexCurve`.\n mask: An optional integer mask for the sorted curve type sequence. If\n supplied, the function returns does not do anything and returns\n `(curve_types, mask)`.\n\n Returns:\n A Tuple of `(curve_list, mask)` where `curve_list` is a list of unique\n curves in `curve_types` and `mask` is a list of integers which is the\n mask for `curve_types`.\n \"\"\"\n def _get_signature(curve):\n \"\"\"Converts curve infromation to a string.\"\"\"\n if isinstance(curve, curve_types_lib.RiskFreeCurve):\n return curve.currency.value\n elif isinstance(curve, curve_types_lib.RateIndexCurve):\n return (curve.currency.value + \"_\" + curve.index.type.value\n + \"_\" + \"_\".join(curve.index.source)\n + \"_\" + \"_\".join(curve.index.name))\n else:\n raise ValueError(f\"{type(curve)} is not supported.\")\n curve_list = to_list(curve_types)\n if mask is not None:\n return curve_list, mask\n curve_hash = [_get_signature(curve_type) for curve_type in curve_list]\n hash_discount_map = {\n _get_signature(curve_type): curve_type for curve_type in curve_list}\n mask, mask_map, num_unique_discounts = create_mask(curve_hash)\n discount_curve_types = [\n hash_discount_map[mask_map[i]]\n for i in range(num_unique_discounts)]\n return discount_curve_types, mask\n\n\ndef create_mask(x):\n \"\"\"Given a list of object creates integer mask for unique values in the list.\n\n Args:\n x: 1-d numpy array.\n\n Returns:\n A tuple of three objects:\n * A list of integers that is the mask for `x`,\n * A dictionary map between entries of `x` and the list\n * The number of unique elements.\n \"\"\"\n # For example, create_mask([\"USD\", \"AUD\", \"USD\"]) returns\n # a list [1, 0, 1], a map {0: \"AUD\", 1: \"USD\"} and the number of unique\n # elements which is 2.\n # Note that elements of `x` are being sorted\n unique = np.unique(x)\n num_unique_elems = len(unique)\n keys = range(num_unique_elems)\n d = dict(zip(unique, keys))\n mask_map = dict(zip(keys, unique))\n return [d[el] for el in x], mask_map, num_unique_elems\n\n\ndef to_list(x):\n \"\"\"Converts input to a list if necessary.\"\"\"\n if isinstance(x, (list, tuple)):\n return x\n else:\n return [x]\n\n\ndef _get_attr(obj, key):\n if isinstance(obj, dict):\n return obj[key]\n else:\n return obj.__getattribute__(key)\n\n\n__all__ = [\"FixedCashflowStream\", \"FloatingCashflowStream\"]\n" ]
[ [ "tensorflow.compat.v2.zeros_like", "tensorflow.compat.v2.argsort", "tensorflow.compat.v2.searchsorted", "tensorflow.compat.v2.equal", "numpy.unique", "tensorflow.compat.v2.concat", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.where", "tensorflow.compat.v2.math.reduce_sum", "tensorflow.compat.v2.shape", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.expand_dims", "tensorflow.compat.v2.gather", "tensorflow.compat.v2.zeros", "tensorflow.compat.v2.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
eric8607242/OSNASLib
[ "2e758a9e5d9e03eecb9c4cc0e2e6a8ec38cf7052" ]
[ "search_space/sgnas/sgnas_model.py" ]
[ "import numpy as np\n\nimport torch.nn as nn\n\nfrom ..base_model import BaseModel\nfrom ..block_builder import get_block\n\nclass SGNASModel(BaseModel):\n def _construct_stage_layers(self, architecture, bn_momentum, bn_track_running_stats, *args, **kwargs):\n \"\"\" Construct searched layers in entire search stage.\n\n Return:\n stages (nn.Sequential)\n \"\"\"\n split_architecture = np.split(architecture, len(self.macro_cfg[\"search\"]))\n\n stages = []\n for l_cfg, block_idxs in zip(self.macro_cfg[\"search\"], split_architecture):\n in_channels, out_channels, stride = l_cfg\n\n kernel_size_list = []\n for block_idx in block_idxs:\n block_type, kernel_size, se, activation, kwargs = self.micro_cfg[block_idx]\n if kernel_size == 0:\n continue\n kernel_size_list.append(kernel_size)\n\n layer = get_block(block_type=\"mixconv\",\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n activation=\"relu\",\n se=False,\n bn_momentum=bn_momentum,\n bn_track_running_stats=bn_track_running_stats,\n **{\"kernel_size_list\": kernel_size_list,\n \"expansion_rate\": len(kernel_size_list)})\n stages.append(layer)\n\n stages = nn.Sequential(*stages)\n return stages\n\n\n" ]
[ [ "torch.nn.Sequential" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dataiku/dss-plugin-decision-tree-builder
[ "5bc53e8331e8f2e94b5e0c52fd720ebf6ea499f1" ]
[ "python-tests/test_score.py" ]
[ "import pandas as pd\nfrom dku_idtb_scoring.score import add_scoring_columns, get_scored_df_schema\nfrom dku_idtb_decision_tree.tree import ScoringTree\nfrom pytest import raises\n\nnodes = {\n\t\"0\": {\n\t\t\"id\": 0,\n\t\t\"parent_id\": -1,\n\t\t\"treated_as_numerical\": {\"num\": None},\n\t\t\"feature\": None,\n\t\t\"prediction\": \"A\",\n\t\t\"children_ids\": [1,2],\n\t\t\"probabilities\": [[\"A\", 0.664], [\"B\", 0.336]],\n\t\t\"samples\": 500,\n\t\t\"label\": None\n\t},\n\t\"1\": {\n\t\t\"id\": 1,\n\t\t\"parent_id\": 0,\n\t\t\"treated_as_numerical\": {\"num\": None},\n\t\t\"feature\": \"num\",\n\t\t\"end\": 4,\n\t\t\"prediction\": \"A\",\n\t\t\"children_ids\": [],\n\t\t\"probabilities\": [[\"A\", 0.800], [\"B\", 0.200]],\n\t\t\"samples\": 300,\n\t\t\"label\": \"hello there\"\n\t},\n\t\"2\": {\n\t\t\"id\": 2,\n\t\t\"parent_id\": 0,\n\t\t\"treated_as_numerical\": {\"num\": None},\n\t\t\"feature\": \"num\",\n\t\t\"beginning\": 4,\n\t\t\"prediction\": \"A\",\n\t\t\"children_ids\": [3,4],\n\t\t\"probabilities\": [[\"A\", 0.800], [\"B\", 0.200]],\n\t\t\"samples\": 200,\n\t\t\"label\": None\n\t},\n\t\"3\": {\n\t\t\"id\": 3,\n\t\t\"parent_id\": 2,\n\t\t\"treated_as_numerical\": {\"num\": None},\n\t\t\"feature\": \"cat\",\n\t\t\"values\": [\"u\",\"v\"],\n\t\t\"prediction\": \"B\",\n\t\t\"others\": False,\n\t\t\"children_ids\": [],\n\t\t\"probabilities\": [[\"B\", 0.75], [\"A\", 0.25]],\n\t\t\"samples\": 0,\n\t\t\"label\": None\n\t},\n\t\"4\": {\n\t\t\"id\": 4,\n\t\t\"parent_id\": 2,\n\t\t\"treated_as_numerical\": {\"num\": None},\n\t\t\"feature\": \"cat\",\n\t\t\"values\": [\"u\", \"v\"],\n\t\t\"others\": True,\n\t\t\"prediction\": None,\n\t\t\"children_ids\": [],\n\t\t\"probabilities\": None,\n\t\t\"samples\": 200,\n\t\t\"label\": \"general Kenobi\"\n\t}\n}\nfeatures = {\"num\": {\"nr_uses\": 1, \"mean\": 3.5}, \"cat\": {\"nr_uses\": 1}}\ntree = ScoringTree(\"target\", [\"A\", \"B\"], nodes, features)\n\ndef get_input_df():\n\treturn pd.DataFrame([[.2, \"u\", \"A\"],\n\t\t\t\t\t\t[7, pd.np.nan, \"B\"],\n\t\t\t\t\t\t[4, \"u\", \"A\"],\n\t\t\t\t\t\t[3, \"v\", \"A\"],\n\t\t\t\t\t\t[pd.np.nan, \"u\", \"C\"]], columns=(\"num\", \"cat\", \"target\"))\n\ndef test_score():\n\tdf = get_input_df()\n\tadd_scoring_columns(tree, df, True)\n\texpected_df = pd.DataFrame([[.2, \"u\", \"A\", .8, .2, \"A\", \"hello there\"],\n\t\t\t\t\t\t\t\t[7, pd.np.nan, \"B\", pd.np.nan, pd.np.nan, pd.np.nan, \"general Kenobi\"],\n\t\t\t\t\t\t\t\t[4, \"u\", \"A\", .25, .75, \"B\", None],\n\t\t\t\t\t\t\t\t[3, \"v\", \"A\", .8, .2, \"A\", \"hello there\"],\n\t\t\t\t\t\t\t\t[pd.np.nan, \"u\", \"C\", .8, .2, \"A\", \"hello there\"]], columns=(\"num\", \"cat\", \"target\", \"proba_A\", \"proba_B\", \"prediction\", \"label\"))\n\tassert df.equals(expected_df)\n\n\tdf = get_input_df()\n\tadd_scoring_columns(tree, df, False, True, False)\n\texpected_df = pd.DataFrame([[.2, \"u\", \"A\", \"A\", \"hello there\"],\n\t\t\t\t\t\t\t\t[7, pd.np.nan, \"B\", pd.np.nan, \"general Kenobi\"],\n\t\t\t\t\t\t\t\t[4, \"u\", \"A\", \"B\", None],\n\t\t\t\t\t\t\t\t[3, \"v\", \"A\", \"A\", \"hello there\"],\n\t\t\t\t\t\t\t\t[pd.np.nan, \"u\", \"C\", pd.np.nan, \"hello there\"]], columns=(\"num\", \"cat\", \"target\", \"prediction\", \"label\"))\n\tassert df.equals(expected_df)\n\n\tdf = get_input_df()\n\tadd_scoring_columns(tree, df, False, True, True)\n\texpected_df = pd.DataFrame([[.2, \"u\", \"A\", \"A\", True, \"hello there\"],\n\t\t\t\t\t\t\t\t[7, pd.np.nan, \"B\", pd.np.nan, pd.np.nan, \"general Kenobi\"],\n\t\t\t\t\t\t\t\t[4, \"u\", \"A\", \"B\", False, None],\n\t\t\t\t\t\t\t\t[3, \"v\", \"A\", \"A\", True, \"hello there\"],\n\t\t\t\t\t\t\t\t[pd.np.nan, \"u\", \"C\", pd.np.nan, pd.np.nan, \"hello there\"]], columns=(\"num\", \"cat\", \"target\", \"prediction\", \"prediction_correct\", \"label\"))\n\tassert df.equals(expected_df)\n\ndef get_input_schema():\n\treturn [{\"type\": \"double\", \"name\": \"num\"}, {\"type\": \"string\", \"name\": \"cat\"}, {\"type\": \"string\", \"name\": \"target\"}]\n\ndef test_scored_df_schema():\n\tschema = get_scored_df_schema(tree, get_input_schema(), None, True)\n\tassert schema == [{\"type\": \"double\", \"name\": \"num\"}, {\"type\": \"string\", \"name\": \"cat\"}, {\"type\": \"string\", \"name\": \"target\"},\n\t\t\t\t\t{\"type\": \"double\", \"name\": \"proba_A\"}, {\"type\": \"double\", \"name\": \"proba_B\"}, {\"type\": \"string\", \"name\": \"prediction\"}, {\"type\": \"string\", \"name\": \"label\"}]\n\tcolumns = []\n\tschema = get_scored_df_schema(tree, get_input_schema(), columns, False, True, False)\n\tassert schema == [{\"type\": \"string\", \"name\": \"prediction\"}, {\"type\": \"string\", \"name\": \"label\"}]\n\tassert columns == [\"prediction\", \"label\"]\n\n\tcolumns = [\"num\"]\n\tschema = get_scored_df_schema(tree, get_input_schema(), columns, False, True, True)\n\tassert schema == [{\"type\": \"double\", \"name\": \"num\"}, {\"type\": \"string\", \"name\": \"prediction\"}, {\"type\": \"boolean\", \"name\": \"prediction_correct\"}, {\"type\": \"string\", \"name\": \"label\"}]\n\tassert columns == [\"num\", \"prediction\", \"prediction_correct\", \"label\"]\n\n\tschema_missing_feature = [{\"type\": \"double\", \"name\": \"num\"}, {\"type\": \"string\", \"name\": \"target\"}]\n\tschema_missing_target = [{\"type\": \"double\", \"name\": \"num\"}, {\"type\": \"string\", \"name\": \"cat\"}]\n\twith raises(ValueError) as e:\n\t\tget_scored_df_schema(tree, schema_missing_feature, None, False)\n\t\tassert e.args[0] == \"The column cat is missing in the input dataset\"\n\twith raises(ValueError) as e:\n\t\tget_scored_df_schema(tree, schema_missing_target, None, False, True, True)\n\t\tassert e.args[0] == \"The target target is missing in the input dataset\"\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
hajiejue/FP-Code
[ "d144336cfd8e70b289a673567f727b9c9abbf9f5" ]
[ "segan/models/generator.py" ]
[ "import torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.nn.utils as nnu\nimport torch.nn as nn\nimport random\nimport numpy as np\ntry:\n from core import *\n from modules import *\nexcept ImportError:\n from .core import *\n from .modules import *\n\n# BEWARE: PyTorch >= 0.4.1 REQUIRED\nfrom torch.nn.utils.spectral_norm import spectral_norm\n\nclass GSkip(nn.Module):\n\n def __init__(self, skip_type, size, skip_init, skip_dropout=0,\n merge_mode='sum', kwidth=11, bias=True):\n # skip_init only applies to alpha skips\n super().__init__()\n self.merge_mode = merge_mode\n if skip_type == 'alpha' or skip_type == 'constant':\n if skip_init == 'zero':\n alpha_ = torch.zeros(size)\n elif skip_init == 'randn':\n alpha_ = torch.randn(size)\n elif skip_init == 'one':\n alpha_ = torch.ones(size)\n else:\n raise TypeError('Unrecognized alpha init scheme: ', \n skip_init)\n #if cuda:\n # alpha_ = alpha_.cuda()\n if skip_type == 'alpha':\n self.skip_k = nn.Parameter(alpha_.view(1, -1, 1))\n else:\n # constant, not learnable\n self.skip_k = nn.Parameter(alpha_.view(1, -1, 1))\n self.skip_k.requires_grad = False\n elif skip_type == 'conv':\n if kwidth > 1:\n pad = kwidth // 2\n else:\n pad = 0\n self.skip_k = nn.Conv1d(size, size, kwidth, stride=1,\n padding=pad, bias=bias)\n else:\n raise TypeError('Unrecognized GSkip scheme: ', skip_type)\n self.skip_type = skip_type\n if skip_dropout > 0:\n self.skip_dropout = nn.Dropout(skip_dropout)\n\n def __repr__(self):\n if self.skip_type == 'alpha':\n return self._get_name() + '(Alpha(1))'\n elif self.skip_type == 'constant':\n return self._get_name() + '(Constant(1))'\n else:\n return super().__repr__()\n\n def forward(self, hj, hi):\n if self.skip_type == 'conv':\n sk_h = self.skip_k(hj)\n else:\n skip_k = self.skip_k.repeat(hj.size(0), 1, hj.size(2))\n sk_h = skip_k * hj\n if hasattr(self, 'skip_dropout'):\n sk_h = self.skip_dropout(sk_h)\n if self.merge_mode == 'sum':\n # merge with input hi on current layer\n return sk_h + hi\n elif self.merge_mode == 'concat':\n return torch.cat((hi, sk_h), dim=1)\n else:\n raise TypeError('Unrecognized skip merge mode: ', self.merge_mode)\n\nclass Generator(Model):\n\n def __init__(self, ninputs, fmaps,\n kwidth, poolings, \n dec_fmaps=None,\n dec_kwidth=None,\n dec_poolings=None,\n z_dim=None,\n no_z=False,\n skip=True,\n bias=False,\n skip_init='one',\n skip_dropout=0,\n skip_type='alpha',\n norm_type=None,\n skip_merge='sum',\n skip_kwidth=11,\n name='Generator'):\n super().__init__(name=name)\n self.skip = skip\n self.bias = bias\n self.no_z = no_z\n self.z_dim = z_dim\n self.enc_blocks = nn.ModuleList()\n assert isinstance(fmaps, list), type(fmaps)\n assert isinstance(poolings, list), type(poolings)\n if isinstance(kwidth, int): \n kwidth = [kwidth] * len(fmaps)\n assert isinstance(kwidth, list), type(kwidth)\n skips = {}\n ninp = ninputs\n for pi, (fmap, pool, kw) in enumerate(zip(fmaps, poolings, kwidth),\n start=1):\n if skip and pi < len(fmaps):\n # Make a skip connection for all but last hidden layer\n gskip = GSkip(skip_type, fmap,\n skip_init,\n skip_dropout,\n merge_mode=skip_merge,\n kwidth=skip_kwidth,\n bias=bias)\n l_i = pi - 1\n skips[l_i] = {'alpha':gskip}\n setattr(self, 'alpha_{}'.format(l_i), skips[l_i]['alpha'])\n enc_block = GConv1DBlock(\n ninp, fmap, kw, stride=pool, bias=bias,\n norm_type=norm_type\n )\n self.enc_blocks.append(enc_block)\n ninp = fmap\n\n self.skips = skips\n if not no_z and z_dim is None:\n z_dim = fmaps[-1]\n if not no_z:\n ninp += z_dim\n # Ensure we have fmaps, poolings and kwidth ready to decode\n if dec_fmaps is None:\n dec_fmaps = fmaps[::-1][1:] + [1]\n else:\n assert isinstance(dec_fmaps, list), type(dec_fmaps)\n if dec_poolings is None:\n dec_poolings = poolings[:]\n else:\n assert isinstance(dec_poolings, list), type(dec_poolings)\n self.dec_poolings = dec_poolings\n if dec_kwidth is None:\n dec_kwidth = kwidth[:]\n else:\n if isinstance(dec_kwidth, int): \n dec_kwidth = [dec_kwidth] * len(dec_fmaps)\n assert isinstance(dec_kwidth, list), type(dec_kwidth)\n # Build the decoder\n self.dec_blocks = nn.ModuleList()\n for pi, (fmap, pool, kw) in enumerate(zip(dec_fmaps, dec_poolings, \n dec_kwidth),\n start=1):\n if skip and pi > 1 and pool > 1:\n if skip_merge == 'concat':\n ninp *= 2\n\n if pi >= len(dec_fmaps):\n act = 'Tanh'\n else:\n act = None\n if pool > 1:\n dec_block = GDeconv1DBlock(\n ninp, fmap, kw, stride=pool,\n norm_type=norm_type, bias=bias,\n act=act\n )\n else:\n dec_block = GConv1DBlock(\n ninp, fmap, kw, stride=1, \n bias=bias,\n norm_type=norm_type\n )\n self.dec_blocks.append(dec_block)\n ninp = fmap\n\n def forward(self, x, z=None, ret_hid=False):\n hall = {}\n hi = x\n skips = self.skips\n for l_i, enc_layer in enumerate(self.enc_blocks):\n hi, linear_hi = enc_layer(hi, True)\n #print('ENC {} hi size: {}'.format(l_i, hi.size()))\n #print('Adding skip[{}]={}, alpha={}'.format(l_i,\n # hi.size(),\n # hi.size(1)))\n if self.skip and l_i < (len(self.enc_blocks) - 1):\n skips[l_i]['tensor'] = linear_hi\n if ret_hid:\n hall['enc_{}'.format(l_i)] = hi\n if not self.no_z:\n if z is None:\n # make z \n z = torch.randn(hi.size(0), self.z_dim, *hi.size()[2:])\n if hi.is_cuda:\n z = z.to('cuda')\n if len(z.size()) != len(hi.size()):\n raise ValueError('len(z.size) {} != len(hi.size) {}'\n ''.format(len(z.size()), len(hi.size())))\n if not hasattr(self, 'z'):\n self.z = z\n hi = torch.cat((z, hi), dim=1)\n if ret_hid:\n hall['enc_zc'] = hi\n else:\n z = None\n enc_layer_idx = len(self.enc_blocks) - 1\n for l_i, dec_layer in enumerate(self.dec_blocks):\n if self.skip and enc_layer_idx in self.skips and \\\n self.dec_poolings[l_i] > 1:\n skip_conn = skips[enc_layer_idx]\n #hi = self.skip_merge(skip_conn, hi)\n #print('Merging hi {} with skip {} of hj {}'.format(hi.size(),\n # l_i,\n # skip_conn['tensor'].size()))\n hi = skip_conn['alpha'](skip_conn['tensor'], hi)\n #print('DEC in size after skip and z_all: ', hi.size())\n #print('decoding layer {} with input {}'.format(l_i, hi.size()))\n hi = dec_layer(hi)\n #print('decoding layer {} output {}'.format(l_i, hi.size()))\n enc_layer_idx -= 1\n if ret_hid:\n hall['dec_{}'.format(l_i)] = hi\n if ret_hid:\n return hi, hall\n else:\n return hi\n\nclass Generator1D(Model):\n\n def __init__(self, ninputs, enc_fmaps, kwidth,\n activations, lnorm=False, dropout=0.,\n pooling=2, z_dim=256, z_all=False,\n skip=True, skip_blacklist=[],\n dec_activations=None, cuda=False,\n bias=False, aal=False, wd=0.,\n skip_init='one', skip_dropout=0.,\n no_tanh=False, aal_out=False,\n rnn_core=False, linterp=False,\n mlpconv=False, dec_kwidth=None,\n no_z=False,\n skip_type='alpha', \n num_spks=None, multilayer_out=False,\n skip_merge='sum', snorm=False,\n convblock=False, post_skip=False,\n pos_code=False, satt=False,\n dec_fmaps=None, up_poolings=None,\n post_proc=False, out_gate=False, \n linterp_mode='linear', hidden_comb=False, \n big_out_filter=False, z_std=1,\n freeze_enc=False, skip_kwidth=11,\n pad_type='constant'):\n # if num_spks is specified, do onehot coditioners in dec stages\n # subract_mean: from output signal, get rif of mean by windows\n # multilayer_out: add some convs in between gblocks in decoder\n super().__init__(name='Generator1D')\n self.dec_kwidth = dec_kwidth\n self.skip_kwidth = skip_kwidth\n self.skip = skip\n self.skip_init = skip_init\n self.skip_dropout = skip_dropout\n self.snorm = snorm\n self.z_dim = z_dim\n self.z_all = z_all\n self.pos_code = pos_code\n self.post_skip = post_skip\n self.big_out_filter = big_out_filter\n self.satt = satt\n self.post_proc = post_proc\n self.pad_type = pad_type\n self.onehot = num_spks is not None\n if self.onehot:\n assert num_spks > 0\n self.num_spks = num_spks\n # do not place any z\n self.no_z = no_z\n self.do_cuda = cuda\n self.wd = wd\n self.no_tanh = no_tanh\n self.skip_blacklist = skip_blacklist\n self.z_std = z_std\n self.freeze_enc = freeze_enc\n self.gen_enc = nn.ModuleList()\n if aal or aal_out:\n # Make cheby1 filter to include into pytorch conv blocks\n from scipy.signal import cheby1, dlti, dimpulse\n system = dlti(*cheby1(8, 0.05, 0.8 / pooling))\n tout, yout = dimpulse(system)\n filter_h = yout[0]\n if aal:\n self.filter_h = filter_h\n else:\n self.filter_h = None\n\n if dec_kwidth is None:\n dec_kwidth = kwidth\n\n if isinstance(activations, str):\n if activations != 'glu':\n activations = getattr(nn, activations)()\n if not isinstance(activations, list):\n activations = [activations] * len(enc_fmaps)\n if not isinstance(pooling, list) or len(pooling) == 1: \n pooling = [pooling] * len(enc_fmaps)\n skips = {}\n # Build Encoder\n for layer_idx, (fmaps, pool, act) in enumerate(zip(enc_fmaps, \n pooling,\n activations)):\n if layer_idx == 0:\n inp = ninputs\n else:\n inp = enc_fmaps[layer_idx - 1]\n if self.skip and layer_idx < (len(enc_fmaps) - 1):\n if layer_idx not in self.skip_blacklist:\n l_i = layer_idx\n gskip = GSkip(skip_type, fmaps,\n skip_init,\n skip_dropout,\n merge_mode=skip_merge,\n cuda=self.do_cuda,\n kwidth=self.skip_kwidth)\n skips[l_i] = {'alpha':gskip}\n setattr(self, 'alpha_{}'.format(l_i), skips[l_i]['alpha'])\n self.gen_enc.append(GBlock(inp, fmaps, kwidth, act,\n padding=None, lnorm=lnorm, \n dropout=dropout, pooling=pool,\n enc=True, bias=bias, \n aal_h=self.filter_h,\n snorm=snorm, convblock=convblock,\n satt=self.satt,\n pad_type=pad_type))\n self.skips = skips\n dec_inp = enc_fmaps[-1]\n if dec_fmaps is None:\n if mlpconv:\n dec_fmaps = enc_fmaps[:-1][::-1] + [16, 8, 1]\n print(dec_fmaps)\n up_poolings = [pooling] * (len(dec_fmaps) - 2) + [1] * 3\n add_activations = [nn.PReLU(16), nn.PReLU(8), nn.PReLU(1)]\n raise NotImplementedError('MLPconv is not useful and should be'\n ' deleted')\n else:\n dec_fmaps = enc_fmaps[:-1][::-1] + [1]\n up_poolings = pooling[::-1]\n #up_poolings = [pooling] * len(dec_fmaps)\n print('up_poolings: ', up_poolings)\n self.up_poolings = up_poolings\n else:\n assert up_poolings is not None\n self.up_poolings = up_poolings\n if rnn_core:\n self.z_all = False\n z_all = False\n # place a bidirectional RNN layer in the core to condition\n # everything to everything AND Z will be the init state of it\n self.rnn_core = nn.LSTM(dec_inp, dec_inp // 2, bidirectional=True,\n batch_first=True)\n else:\n if no_z:\n all_z = False\n else:\n dec_inp += z_dim\n #print(dec_fmaps)\n # Build Decoder\n self.gen_dec = nn.ModuleList()\n\n if dec_activations is None:\n # assign same activations as in Encoder\n dec_activations = [activations[0]] * len(dec_fmaps)\n else:\n if mlpconv:\n dec_activations = dec_activations[:-1]\n dec_activations += add_activations\n \n enc_layer_idx = len(enc_fmaps) - 1\n for layer_idx, (fmaps, act) in enumerate(zip(dec_fmaps, \n dec_activations)):\n if skip and layer_idx > 0 and enc_layer_idx not in skip_blacklist \\\n and up_poolings[layer_idx] > 1: \n if skip_merge == 'concat':\n dec_inp *= 2\n print('Added skip conn input of enc idx: {} and size:'\n ' {}'.format(enc_layer_idx, dec_inp))\n\n if z_all and layer_idx > 0:\n dec_inp += z_dim\n\n if self.onehot:\n dec_inp += self.num_spks\n\n if layer_idx >= len(dec_fmaps) - 1:\n if self.no_tanh:\n act = None\n else:\n act = nn.Tanh()\n lnorm = False\n dropout = 0\n if up_poolings[layer_idx] > 1:\n pooling = up_poolings[layer_idx]\n self.gen_dec.append(GBlock(dec_inp,\n fmaps, dec_kwidth, act, \n padding=0, \n lnorm=lnorm,\n dropout=dropout, pooling=pooling, \n enc=False,\n bias=bias,\n linterp=linterp, \n linterp_mode=linterp_mode,\n convblock=convblock, \n comb=hidden_comb,\n pad_type=pad_type))\n else:\n self.gen_dec.append(GBlock(dec_inp,\n fmaps, dec_kwidth, act, \n lnorm=lnorm,\n dropout=dropout, pooling=1,\n padding=0,#kwidth//2,\n enc=True,\n bias=bias,\n convblock=convblock,\n pad_type=pad_type))\n dec_inp = fmaps\n if aal_out:\n # make AAL filter to put in output\n self.aal_out = nn.Conv1d(1, 1, filter_h.shape[0] + 1,\n stride=1, \n padding=filter_h.shape[0] // 2,\n bias=False)\n print('filter_h shape: ', filter_h.shape)\n # apply AAL weights, reshaping impulse response to match\n # in channels and out channels\n aal_t = torch.FloatTensor(filter_h).view(1, 1, -1)\n aal_t = torch.cat((aal_t, torch.zeros(1, 1, 1)), dim=-1)\n self.aal_out.weight.data = aal_t\n print('aal_t size: ', aal_t.size())\n\n if post_proc:\n self.comb_net = PostProcessingCombNet(1, 512)\n if out_gate:\n self.out_gate = OutGate(1, 1)\n if big_out_filter:\n self.out_filter = nn.Conv1d(1, 1, 513, padding=513//2)\n\n \n\n def forward(self, x, z=None, ret_hid=False, spkid=None, \n slice_idx=0, att_weight=0):\n if self.num_spks is not None and spkid is None:\n raise ValueError('Please specify spk ID to network to '\n 'build OH identifier in decoder')\n\n hall = {}\n hi = x\n skips = self.skips\n for l_i, enc_layer in enumerate(self.gen_enc):\n hi, linear_hi = enc_layer(hi, att_weight=att_weight)\n #print('ENC {} hi size: {}'.format(l_i, hi.size()))\n #print('Adding skip[{}]={}, alpha={}'.format(l_i,\n # hi.size(),\n # hi.size(1)))\n if self.skip and l_i < (len(self.gen_enc) - 1):\n if l_i not in self.skip_blacklist:\n if self.post_skip:\n skips[l_i]['tensor'] = hi\n else:\n skips[l_i]['tensor'] = linear_hi\n if ret_hid:\n hall['enc_{}'.format(l_i)] = hi\n if hasattr(self, 'rnn_core'):\n self.z_all = False\n if z is None:\n # make z as initial RNN state forward and backward\n # (2 directions)\n if self.no_z:\n # MAKE DETERMINISTIC ZERO\n h0 = Variable(torch.zeros(2, hi.size(0), hi.size(1)//2))\n else:\n h0 = Variable(self.z_std * torch.randn(2, \n hi.size(0), \n hi.size(1)//2))\n c0 = Variable(torch.zeros(2, hi.size(0), hi.size(1)//2))\n if self.do_cuda:\n h0 = h0.cuda()\n c0 = c0.cuda()\n z = (h0, c0)\n if not hasattr(self, 'z'):\n self.z = z\n # Conv --> RNN\n hi = hi.transpose(1, 2)\n hi, state = self.rnn_core(hi, z)\n # RNN --> Conv\n hi = hi.transpose(1, 2)\n else:\n if not self.no_z:\n if z is None:\n # make z \n z = Variable(self.z_std * torch.randn(hi.size(0), self.z_dim,\n *hi.size()[2:]))\n if len(z.size()) != len(hi.size()):\n raise ValueError('len(z.size) {} != len(hi.size) {}'\n ''.format(len(z.size()), len(hi.size())))\n if self.do_cuda:\n z = z.cuda()\n if not hasattr(self, 'z'):\n self.z = z\n #print('Concating z {} and hi {}'.format(z.size(),\n # hi.size()))\n hi = torch.cat((z, hi), dim=1)\n if ret_hid:\n hall['enc_zc'] = hi\n else:\n z = None\n if self.pos_code:\n hi = pos_code(slice_idx, hi)\n # Cut gradient flow in Encoder?\n if self.freeze_enc:\n hi = hi.detach()\n #print('Concated hi|z size: ', hi.size())\n enc_layer_idx = len(self.gen_enc) - 1\n z_up = z\n if self.onehot:\n # make one hot identifier batch\n spk_oh = Variable(torch.zeros(spkid.size(0), \n self.num_spks))\n for bidx in range(spkid.size(0)):\n if len(spkid.size()) == 3:\n spk_id = spkid[bidx, 0].cpu().data[0]\n else:\n spk_id = spkid[bidx].cpu().data[0]\n spk_oh[bidx, spk_id] = 1\n spk_oh = spk_oh.view(spk_oh.size(0), -1, 1)\n if self.do_cuda:\n spk_oh = spk_oh.cuda()\n # Now one-hot is [B, SPKS, 1] ready to be \n # repeated to [B, SPKS, T] depending on layer\n for l_i, dec_layer in enumerate(self.gen_dec):\n if self.skip and enc_layer_idx in self.skips and \\\n self.up_poolings[l_i] > 1:\n skip_conn = skips[enc_layer_idx]\n #hi = self.skip_merge(skip_conn, hi)\n #print('Merging hi {} with skip {} of hj {}'.format(hi.size(),\n # l_i,\n # skip_conn['tensor'].size()))\n hi = skip_conn['alpha'](skip_conn['tensor'], hi)\n if l_i > 0 and self.z_all:\n # concat z in every layer\n z_up = torch.cat((z_up, z_up), dim=2)\n hi = torch.cat((hi, z_up), dim=1)\n if self.onehot:\n # repeat one-hot in time to adjust to concat\n spk_oh_r = spk_oh.repeat(1, 1, hi.size(-1))\n # concat in depth (channels)\n hi = torch.cat((hi, spk_oh_r), dim=1)\n #print('DEC in size after skip and z_all: ', hi.size())\n #print('decoding layer {} with input {}'.format(l_i, hi.size()))\n hi, _ = dec_layer(hi, att_weight=att_weight)\n #print('decoding layer {} output {}'.format(l_i, hi.size()))\n enc_layer_idx -= 1\n if ret_hid:\n hall['dec_{}'.format(l_i)] = hi\n if hasattr(self, 'aal_out'):\n hi = self.aal_out(hi)\n if hasattr(self, 'comb_net'):\n hi = F.tanh(self.comb_net(hi))\n if hasattr(self, 'out_gate'):\n hi = self.out_gate(hi)\n if hasattr(self, 'out_filter'):\n hi = self.out_filter(hi)\n # normalize G output in range within [-1, 1]\n #hi = self.batch_minmax_norm(hi)\n if ret_hid:\n return hi, hall\n else:\n return hi\n\n def batch_minmax_norm(self, x, out_min=-1, out_max=1):\n mins = torch.min(x, dim=2)[0]\n maxs = torch.max(x, dim=2)[0]\n R = (out_max - out_min) / (maxs - mins)\n R = R.unsqueeze(1)\n #print('R size: ', R.size())\n #print('x size: ', x.size())\n #print('mins size: ', mins.size())\n x = R * (x - mins.unsqueeze(1)) + out_min\n #print('norm x size: ', x.size())\n return x\n\n def skip_merge(self, skip_conn, hi):\n # TODO: DEPRECATED WITH NEW SKIP SCHEME\n raise NotImplementedError\n hj = skip_conn['tensor']\n alpha = skip_conn['alpha'].view(1, -1, 1)\n alpha = alpha.repeat(hj.size(0), 1, hj.size(2))\n #print('hi: ', hi.size())\n #print('hj: ', hj.size())\n #print('alpha: ', alpha.size())\n #print('alpha: ', alpha)\n if 'dropout' in skip_conn:\n alpha = skip_conn['dropout'](alpha)\n #print('alpha: ', alpha)\n return hi + alpha * hj\n \nif __name__ == '__main__':\n \"\"\"\n G = Generator1D(1, [64, 128, 256, 512, 1024], \n 31, \n 'ReLU',\n lnorm=False, \n pooling=4,\n z_dim=1024,\n skip_init='randn',\n skip_type='alpha',\n skip_blacklist=[],\n bias=False, cuda=False,\n rnn_core=False, linterp=False,\n dec_kwidth=31)\n \"\"\"\n G = Generator(1, [64, 128, 256, 512, 1024],\n kwidth=31,\n poolings=[4, 4, 4, 4, 4], no_z=True)\n print(G)\n print('G num params: ', G.get_n_params())\n x = torch.randn(1, 1, 16384)\n y, hall = G(x, ret_hid=True)\n print(y)\n print(x.size())\n print(y.size())\n #import matplotlib\n #matplotlib.use('Agg')\n #import matplotlib.pyplot as plt\n #plt.imshow(hall['att'].data[0, :, :].numpy())\n #plt.savefig('att_test.png', dpi=200)\n" ]
[ [ "torch.nn.Dropout", "scipy.signal.cheby1", "torch.max", "torch.ones", "torch.cat", "torch.nn.LSTM", "torch.randn", "torch.nn.ModuleList", "torch.min", "torch.zeros", "torch.nn.PReLU", "torch.nn.Tanh", "scipy.signal.dimpulse", "torch.FloatTensor", "torch.nn.Conv1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
XavierCHEN34/ClickSEG
[ "0c801cfa5f67f066fdaab28ff8f3afde1cb71ace" ]
[ "scripts/annotations_conversion/coco_lvis.py" ]
[ "import cv2\nimport pickle\nimport numpy as np\nfrom pathlib import Path\nfrom tqdm import tqdm\n\nfrom isegm.data.datasets import LvisDataset, CocoDataset\nfrom isegm.utils.misc import get_bbox_from_mask, get_bbox_iou\nfrom scripts.annotations_conversion.common import get_masks_hierarchy, get_iou, encode_masks\n\n\ndef create_annotations(lvis_path: Path, coco_path: Path, dataset_split='train', min_object_area=80):\n lvis_dataset = LvisDataset(lvis_path, split=dataset_split)\n lvis_samples = lvis_dataset.dataset_samples\n lvis_annotations = lvis_dataset.annotations\n\n coco_dataset = CocoDataset(coco_path, split=dataset_split + '2017')\n\n coco_lvis_mapping = []\n lvis_images = {x['coco_url'].split('/')[-1].split('.')[0]: lvis_indx\n for lvis_indx, x in enumerate(lvis_samples)}\n for indx, coco_sample in enumerate(coco_dataset.dataset_samples):\n lvis_indx = lvis_images.get(coco_sample['file_name'].split('.')[0], None)\n if lvis_indx is not None:\n coco_lvis_mapping.append((indx, lvis_indx))\n\n output_masks_path = lvis_path / dataset_split / 'masks'\n output_masks_path.mkdir(parents=True, exist_ok=True)\n\n hlvis_annotation = dict()\n for coco_indx, lvis_indx in tqdm(coco_lvis_mapping):\n coco_sample = get_coco_sample(coco_dataset, coco_indx)\n\n lvis_info = lvis_samples[lvis_indx]\n lvis_annotation = lvis_annotations[lvis_info['id']]\n empty_mask = np.zeros((lvis_info['height'], lvis_info['width']))\n image_name = lvis_info['coco_url'].split('/')[-1].split('.')[0]\n\n lvis_masks = []\n lvis_bboxes = []\n for obj_annotation in lvis_annotation:\n obj_mask = lvis_dataset.get_mask_from_polygon(obj_annotation, empty_mask)\n obj_mask = obj_mask == 1\n if obj_mask.sum() >= min_object_area:\n lvis_masks.append(obj_mask)\n lvis_bboxes.append(get_bbox_from_mask(obj_mask))\n\n coco_bboxes = []\n coco_masks = []\n for inst_id in coco_sample['instances_info'].keys():\n obj_mask = coco_sample['instances_mask'] == inst_id\n if obj_mask.sum() >= min_object_area:\n coco_masks.append(obj_mask)\n coco_bboxes.append(get_bbox_from_mask(obj_mask))\n\n masks = []\n for coco_j, coco_bbox in enumerate(coco_bboxes):\n for lvis_i, lvis_bbox in enumerate(lvis_bboxes):\n if get_bbox_iou(lvis_bbox, coco_bbox) > 0.70 and \\\n get_iou(lvis_masks[lvis_i], coco_masks[coco_j]) > 0.70:\n break\n else:\n masks.append(coco_masks[coco_j])\n\n for ti, (lvis_mask, lvis_bbox) in enumerate(zip(lvis_masks, lvis_bboxes)):\n for tj_mask, tj_bbox in zip(lvis_masks[ti + 1:], lvis_bboxes[ti + 1:]):\n bbox_iou = get_bbox_iou(lvis_bbox, tj_bbox)\n if bbox_iou > 0.7 and get_iou(lvis_mask, tj_mask) > 0.85:\n break\n else:\n masks.append(lvis_mask)\n\n masks_meta = [(get_bbox_from_mask(x), x.sum()) for x in masks]\n if not masks:\n continue\n\n hierarchy = get_masks_hierarchy(masks, masks_meta)\n\n for obj_id, obj_info in list(hierarchy.items()):\n if obj_info['parent'] is None and len(obj_info['children']) == 0:\n hierarchy[obj_id] = None\n\n merged_mask = np.max(masks, axis=0)\n num_instance_masks = len(masks)\n for obj_id in coco_sample['semantic_info'].keys():\n obj_mask = coco_sample['semantic_map'] == obj_id\n obj_mask = np.logical_and(obj_mask, np.logical_not(merged_mask))\n if obj_mask.sum() > 500:\n masks.append(obj_mask)\n\n hlvis_annotation[image_name] = {\n 'num_instance_masks': num_instance_masks,\n 'hierarchy': hierarchy\n }\n\n with open(output_masks_path / f'{image_name}.pickle', 'wb') as f:\n pickle.dump(encode_masks(masks), f)\n\n with open(lvis_path / dataset_split / 'hannotation.pickle', 'wb') as f:\n pickle.dump(hlvis_annotation, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef get_coco_sample(dataset, index):\n dataset_sample = dataset.dataset_samples[index]\n\n image_path = dataset.images_path / dataset.get_image_name(dataset_sample['file_name'])\n label_path = dataset.labels_path / dataset_sample['file_name']\n\n image = cv2.imread(str(image_path))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n label = cv2.imread(str(label_path), cv2.IMREAD_UNCHANGED).astype(np.int32)\n label = 256 * 256 * label[:, :, 0] + 256 * label[:, :, 1] + label[:, :, 2]\n\n instance_map = np.full_like(label, 0)\n semantic_map = np.full_like(label, 0)\n semantic_info = dict()\n instances_info = dict()\n for segment in dataset_sample['segments_info']:\n class_id = segment['category_id']\n obj_id = segment['id']\n if class_id not in dataset._things_labels_set:\n semantic_map[label == obj_id] = obj_id\n semantic_info[obj_id] = {'ignore': False}\n continue\n\n instance_map[label == obj_id] = obj_id\n ignore = segment['iscrowd'] == 1\n instances_info[obj_id] = {\n 'ignore': ignore\n }\n\n sample = {\n 'image': image,\n 'instances_mask': instance_map,\n 'instances_info': instances_info,\n 'semantic_map': semantic_map,\n 'semantic_info': semantic_info\n }\n\n return sample\n" ]
[ [ "numpy.full_like", "numpy.max", "numpy.logical_not", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
siddtheshah/text2illustrate
[ "69ad0bf2b05c199626bda716f1af72064c28ef41" ]
[ "t2i/visualizer.py" ]
[ "import cv2\nimport numpy as np\n\nfrom t2i.entity import *\nfrom t2i.script import *\nfrom t2i.assetBook import *\nfrom t2i.endpointResolver import *\nfrom t2i.animate import *\nfrom threading import Lock, Thread\n\nimport sys\n\nCANVAS_WIDTH = 800\nCANVAS_HEIGHT = 600\n\nclass StaticVisualGraph:\n class VisualNode:\n def __init__(self, entity):\n self.entity = entity\n self.isRoot = True\n self.rootOffset = None # Define offsets to be vectors to CENTER of image\n self.otherOffsets = {}\n self.reverse = []\n self.onIsland = False\n\n class Island:\n def __init__(self, root):\n self.root = root\n self.leftExtent = 0\n self.rightExtent = 0\n self.upExtent = 0\n self.downExtent = 0\n self.nodes = []\n if root.entity.eImage.image is not None:\n self.leftExtent = -root.entity.eImage.width/2\n self.rightExtent = root.entity.eImage.width/2\n self.upExtent = root.entity.eImage.height/2\n self.downExtent = -root.entity.eImage.height/2\n\n def getDimensions(self):\n return (self.rightExtent - self.leftExtent, self.upExtent - self.downExtent)\n\n def __repr__(self):\n return '\\n<\\n\\tisland: {0}\\n\\tleft: {1}\\n\\tright: {2}\\n\\tup: {3}\\n\\tdown: {4}\\n>'.format(\n self.root.entity, self.leftExtent, self.rightExtent, self.upExtent, self.downExtent)\n\n def __init__(self, entities):\n # Demotes other entities if they are not roots.\n # If what was once a root was demoted, it is instead no longer a root\n # and the subject gets promoted instead.\n self.map = {}\n for entity in entities:\n self.map[entity.text] = self.VisualNode(entity)\n # print(entity)\n for entity in entities:\n forwards = []\n for obj in entity.objs:\n if obj in entities:\n forwards.append(self.map[obj.text])\n self.map[entity.text].forward = forwards\n self.nodeList = self.map.values()\n self.islands = []\n\n def AssignLocations(self):\n self.GetForwardRootsAndReverse()\n self.NodeToNodeOffsets()\n self.CreateIslands()\n self.ArrangeIslands()\n print(\"SVG Done\")\n\n def GetForwardRootsAndReverse(self):\n for node in self.nodeList:\n for child in node.forward:\n child.isRoot = False\n child.reverse.append(node)\n self.roots = list(filter(lambda x: x.isRoot, self.nodeList))\n\n def NodeToNodeOffsets(self):\n for node in self.nodeList:\n for verb, prep, otherNode in zip(node.entity.baseVerbs, node.entity.preps, node.forward):\n \n offset = self.SelectOffsetTogether(verb, prep)\n print(\"Offset: \")\n print(offset.tolist())\n if not (node.entity.eImage.image is not None and otherNode.entity.eImage.image is not None):\n offset*= SMALL_WIDTH\n else:\n offset*= (node.entity.eImage.width + otherNode.entity.eImage.width)/2\n if offset[2] > 0: \n offset[2] = 1\n elif offset[2] < 0:\n offset[2] = -1 \n node.otherOffsets[otherNode] = offset\n otherNode.otherOffsets[node] = -1*offset #graph is now bidirectional\n\n def SelectOffsetTogether(self, verb, prep):\n offset = np.array([0.0, 0.0, 0.0])\n offset += self.SelectVerbOffset(verb)\n if prep:\n offset += self.SelectPrepOffset(prep)\n # conditional tree\n if verb in endpointResolver.MOTION_OTHER:\n offset = np.array([1.5, 0, -1.0])\n return offset\n\n\n\n\n def SelectVerbOffset(self, verb):\n offset = np.array([0.0, 0.0, 0.0])\n if verb in endpointResolver.MOTION_OTHER:\n offset = np.array([0.1, 0, 1.0])\n elif verb in endpointResolver.MOTION_SELF:\n offset = np.array([1.5, 0, -1.0])\n elif verb in endpointResolver.REGARD:\n offset = np.array([1.5, 0, -1.0])\n elif verb in endpointResolver.USAGE:\n offset = np.array([.1, 0, 1.0])\n elif verb in endpointResolver.SPEAK:\n offset = np.array([1, 0, 0.0])\n return offset\n\n def SelectPrepOffset(self, prep):\n # Things look inverted because we want the object's relation to us.\n offset = np.array([0.0, 0.0, 0.0])\n if prep in [\"below\", \"beneath\", \"under\"]: # object is above us, etc\n offset = np.array([0.0, .75, 1.0])\n elif prep in [\"with\", \"to\", \"at\", \"before\"]: # we're at object. push it back a layer\n offset = np.array([.25, 0.0, -1.0])\n elif prep in [\"in\", \"inside\", \"into\", \"within\"]: # object in foreground\n offset = np.array([0.0, 0.0, 1.0])\n elif prep in [\"beside\", \"near\", \"outside\", \"by\", \"nearby\"]: # object nearby, we're more important\n offset = np.array([.25, 0.0, -1.0]) \n elif prep in [\"over\", \"above\", \"atop\", \"on\", \"onto\", \"upon\"]: # object beneath us\n offset = np.array([0.0, -.75, -1.0])\n return offset\n\n def CreateIslands(self):\n nodesResolved = 0\n while len(self.roots) > 0 and nodesResolved < len(self.nodeList):\n root = self.roots.pop(0)\n if root.onIsland:\n continue\n else:\n island = self.Island(root)\n root.rootOffset = np.array([0, 0, 0])\n self.islands.append(island)\n toExplore = set([root])\n visited = set()\n # print(\"New Island\")\n # Big diamonds are extremely unusual, so we won't worry about it.\n while toExplore:\n current = toExplore.pop()\n # print(\"Current: \" + current.entity.text)\n current.onIsland = True\n island.nodes.append(current)\n nodesResolved += 1\n if current not in visited:\n visited.add(current)\n for other in current.otherOffsets:\n if other not in visited:\n # handles depth 1 diamonds\n offset = current.otherOffsets[other] + current.rootOffset\n\n if not other.rootOffset:\n other.rootOffset = offset\n elif SumSq(other.rootOffset) > SumSq(offset):\n other.rootOffset = offset \n # print(\"Other: \" + other.entity.text)\n # print(other.rootOffset)\n if offset[0] - other.entity.eImage.width/2 < island.leftExtent:\n island.leftExtent = offset[0] - other.entity.eImage.width/2\n elif offset[0] + other.entity.eImage.width/2 > island.rightExtent:\n island.rightExtent = offset[0] + other.entity.eImage.width/2\n\n if offset[1] - other.entity.eImage.height/2 < island.downExtent:\n island.downExtent = offset[1] - other.entity.eImage.height/2\n elif offset[1] + other.entity.eImage.height/2 > island.upExtent:\n island.upExtent = offset[1] + other.entity.eImage.height/2\n toExplore.add(other)\n\n def HorizontalBounce(self):\n pass\n\n def ArrangeIslands(self):\n # Naive pack horizontally\n white_width = CANVAS_WIDTH - sum([island.getDimensions()[0] for island in self.islands])\n width_margin = white_width/(len(self.islands) + 1)\n\n grid_col = 0\n root_row = 2*CANVAS_HEIGHT/3 \n for island in self.islands:\n grid_col += width_margin\n root_col = grid_col - island.leftExtent\n for node in island.nodes:\n if node.entity.eImage.image is not None:\n node.entity.eImage.x = (root_col + node.rootOffset[0])\n node.entity.eImage.y = (root_row - node.rootOffset[1])\n node.entity.eImage.layer = node.rootOffset[2]\n print(\"Entity: \", node.entity.text, node.entity.eImage)\n grid_col += island.getDimensions()[0]\n print(\"================ ARRANGED ISLANDS =====================\")\n print(self.islands)\n\n\n\n def SumSq(offset):\n # This function favors denser packing, if possible.\n return sum(x**2 for x in offset)\n\nclass Visualizer:\n def __init__(self, width=CANVAS_WIDTH, height=CANVAS_HEIGHT):\n self.width = width\n self.height = height\n self.assetBook = AssetBook()\n self.script = Script()\n self.animator = Animator()\n self.visualScript = [] \n # self.lock_ = Lock()\n\n\n def DrawStoryWithCallback(self, textBody, callBackFunc):\n self.GetAssets(textBody)\n self.StreamScenes(callBackFunc)\n print(\"Finished stream\")\n self.visualScript = []\n self.script = Script()\n\n def GetAssets(self, textBody):\n self.script.processEntities(textBody)\n self.script.ResolveAdjectives()\n self.script.CreateContinuum()\n print(\"============== VISUALIZER ==============\")\n self.visualScript = self.script.continuum\n for entityList in self.visualScript:\n for entity in entityList:\n self.assetBook.attachImageToEntity(entity)\n if entity.eImage.image is None: \n print(\"Could not find image for entity: \" + entity.text)\n\n def StreamScenes(self, callBackFunc):\n # self.lock_.acquire()\n for entityList in self.visualScript:\n self.ArrangeStaticScene(entityList)\n self.ArrangeDynamicScene(entityList)\n # print(entityList)\n callBackFunc(entityList) # Should add in asynchronous processing here\n\n # self.lock_.release()\n\n def ArrangeStaticScene(self, entityList):\n graph = StaticVisualGraph(entityList)\n graph.AssignLocations()\n return entityList\n\n # Set default sizes and positions of \n\n\n def ArrangeDynamicScene(self, entityList):\n # Creates animation objects from animate.py, uses them to parameterize functions\n # which are then attached to the imageEntities\n self.animator.assignAnimations(entityList, 500)\n return entityList\n\n def ServeFileTitleAndMotion(self, textBody):\n self.GetAssets(textBody)\n ret = []\n for entityList in self.visualScript:\n self.ArrangeStaticScene(entityList)\n self.ArrangeDynamicScene(entityList)\n sublist = []\n for entity in entityList:\n if entity.eImage.image is not None:\n print(entity.eImage.path, entity.eImage.layer)\n size = (entity.eImage.width, entity.eImage.height)\n sublist.append((entity.eImage.path, entity.eImage.animateFunc.eager(), size, int(entity.eImage.layer)))\n ret.append(sublist)\n return ret\n\n\ndef staticShow(entity):\n print(\"Entity: \" + entity.text)\n if entity.eImage.image is None:\n print(\"Not Found\")\n else:\n print(entity.eImage)\n\ndef staticShowMultiple(entityList):\n for entity in entityList:\n staticShow(entity)\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n textBody = sys.argv[1]\n else:\n textBody = (\"Jake was a policeman during his younger years.\")\n\n v = Visualizer()\n v.DrawStoryWithCallback(textBody, staticShowMultiple)\n # v.ServeFileTitleAndMotion(textBody)\n # textBody = \"The cat sat near the man.\"\n # v.DrawStoryWithCallback(textBody, staticShowMultiple)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gladonias/neuronal-filters
[ "39eb6700725f91a374eafd50e1a814b6d9762d66" ]
[ "L4_DBC/run_RmpRiTau.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"Python script to run cell model\"\"\"\n\n\n\"\"\"\n/* Copyright (c) 2015 EPFL-BBP, All rights reserved.\n\nTHIS SOFTWARE IS PROVIDED BY THE BLUE BRAIN PROJECT ``AS IS''\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE BLUE BRAIN PROJECT\nBE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\nBUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\nWHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE\nOR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN\nIF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nThis work is licensed under a\nCreative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.\nTo view a copy of this license, visit\nhttp://creativecommons.org/licenses/by-nc-sa/4.0/legalcode or send a letter to\nCreative Commons, 171 Second Street, Suite 300,\nSan Francisco, California, 94105, USA.\n\"\"\"\n\n\"\"\"\n * @file run.py\n * @brief Run simulation using pyneuron\n * @author Werner Van Geit @ BBP\n * @date 2015\n\"\"\"\n\n# pylint: disable=C0325, W0212, F0401, W0612, F0401\n\nimport os\nimport neuron\nimport numpy\nimport sys\n\n\ndef create_cell():\n \"\"\"Create the cell model\"\"\"\n # Load morphology\n neuron.h.load_file(\"morphology.hoc\")\n # Load biophysics\n neuron.h.load_file(\"biophysics.hoc\")\n # Load main cell template\n neuron.h.load_file(\"template.hoc\")\n\n # Instantiate the cell from the template\n\n print(\"Loading cell bNAC219_L4_DBC_519f8feedc\")\n cell = neuron.h.bNAC219_L4_DBC_519f8feedc(0)\n return cell\n\n\ndef create_stimuli(cell, stim_start, stim_end, current_amplitude):\n \"\"\"Create the stimuli\"\"\"\n\n print('Attaching stimulus electrodes')\n\n stimuli = []\n\n iclamp = neuron.h.IClamp(0.5, sec=cell.soma[0])\n iclamp.delay = stim_start\n iclamp.dur = stim_end - stim_start\n iclamp.amp = current_amplitude\n print('Setting up step current clamp: '\n 'amp=%f nA, delay=%f ms, duration=%f ms' %\n (iclamp.amp, iclamp.delay, iclamp.dur))\n\n stimuli.append(iclamp)\n\n return stimuli\n\n\ndef create_recordings(cell):\n \"\"\"Create the recordings\"\"\"\n print('Attaching recording electrodes')\n\n recordings = {}\n\n recordings['time'] = neuron.h.Vector()\n recordings['soma(0.5)'] = neuron.h.Vector()\n\n recordings['time'].record(neuron.h._ref_t, 0.1)\n recordings['soma(0.5)'].record(cell.soma[0](0.5)._ref_v, 0.1)\n\n return recordings\n\n\ndef run_RmpRiTau_step(\n stim_start,\n stim_end,\n current_amplitude,\n plot_traces=None):\n \"\"\"Run \"\"\"\n\n cell = create_cell()\n stimuli = create_stimuli(cell, stim_start, stim_end, current_amplitude) # noqa\n recordings = create_recordings(cell)\n\n # Overriding default 30s simulation,\n neuron.h.tstop = stim_end + stim_start\n print(\n 'Setting simulation time to %.6g ms for the step current' %\n neuron.h.tstop)\n\n print('Setting initial voltage to -70 mV')\n neuron.h.v_init = -70\n\n neuron.h.stdinit()\n neuron.h.dt = 1000\n neuron.h.t = -1e9\n for _ in range(10):\n neuron.h.fadvance()\n\n neuron.h.t = 0\n neuron.h.dt = 0.025\n neuron.h.frecord_init()\n\n neuron.h.continuerun(3000)\n\n time = numpy.array(recordings['time'])\n soma_voltage = numpy.array(recordings['soma(0.5)'])\n\n recordings_dir = 'python_recordings'\n\n soma_voltage_filename = os.path.join(\n recordings_dir,\n 'soma_voltage_RmpRiTau_step.dat')\n numpy.savetxt(soma_voltage_filename, zip(time, soma_voltage))\n\n print('Soma voltage for RmpRiTau trace saved to: %s'\n % (soma_voltage_filename))\n\n if plot_traces:\n import pylab\n pylab.figure(facecolor='white')\n pylab.plot(recordings['time'], recordings['soma(0.5)'])\n pylab.xlabel('time (ms)')\n pylab.ylabel('Vm (mV)')\n pylab.gcf().canvas.set_window_title('RmpRiTau trace')\n\n return time, soma_voltage, stim_start, stim_end\n\n\ndef init_simulation():\n \"\"\"Initialise simulation environment\"\"\"\n\n neuron.h.load_file(\"stdrun.hoc\")\n neuron.h.load_file(\"import3d.hoc\")\n\n print('Loading constants')\n neuron.h.load_file('constants.hoc')\n\n\ndef analyse_RmpRiTau_trace(\n time,\n soma_voltage,\n stim_start,\n stim_end,\n current_amplitude):\n \"\"\"Analyse the output of the RmpRiTau protocol\"\"\"\n\n # Import the eFeature Extraction Library\n import efel\n\n # Prepare the trace data\n trace = {}\n trace['T'] = time\n trace['V'] = soma_voltage\n trace['stim_start'] = [stim_start]\n trace['stim_end'] = [stim_end]\n\n # Calculate the necessary eFeatures\n efel_results = efel.getFeatureValues(\n [trace],\n ['voltage_base', 'steady_state_voltage_stimend',\n 'decay_time_constant_after_stim'])\n\n voltage_base = efel_results[0]['voltage_base'][0]\n ss_voltage = efel_results[0]['steady_state_voltage_stimend'][0]\n dct = efel_results[0]['decay_time_constant_after_stim'][0]\n\n # Calculate input resistance\n input_resistance = float(ss_voltage - voltage_base) / current_amplitude\n\n rmpritau_dict = {}\n\n rmpritau_dict['Rmp'] = '%.6g' % voltage_base\n rmpritau_dict['Rmp_Units'] = 'mV'\n rmpritau_dict['Rin'] = '%.6g' % input_resistance\n rmpritau_dict['Rin_Units'] = 'MOhm'\n rmpritau_dict['Tau'] = '%.6g' % dct\n rmpritau_dict['Tau_Units'] = 'ms'\n\n print('Resting membrane potential is %s %s' %\n (rmpritau_dict['Rmp'], rmpritau_dict['Rmp_Units']))\n print('Input resistance is %s %s' %\n (rmpritau_dict['Rin'], rmpritau_dict['Rin_Units']))\n print('Time constant is %s %s' %\n (rmpritau_dict['Tau'], rmpritau_dict['Tau_Units']))\n\n import json\n\n with open('rmp_ri_tau.json', 'w') as rmpritau_json_file:\n json.dump(rmpritau_dict, rmpritau_json_file,\n sort_keys=True,\n indent=4,\n separators=(',', ': '))\n\n\ndef main(plot_traces=False):\n \"\"\"Main\"\"\"\n\n # Import matplotlib to plot the traces\n if plot_traces:\n import matplotlib\n matplotlib.rcParams['path.simplify'] = False\n\n init_simulation()\n\n current_amplitude = -0.01\n stim_start = 1000\n stim_end = 2000\n\n time, soma_voltage, stim_start, stim_end = run_RmpRiTau_step(\n stim_start, stim_end, current_amplitude, plot_traces=plot_traces)\n\n analyse_RmpRiTau_trace(\n time,\n soma_voltage,\n stim_start,\n stim_end,\n current_amplitude)\n\n if plot_traces:\n import pylab\n pylab.show()\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n main(plot_traces=True)\n elif len(sys.argv) == 2 and sys.argv[1] == '--no-plots':\n main(plot_traces=False)\n else:\n raise Exception(\n \"Script only accepts one argument: --no-plots, not %s\" %\n str(sys.argv))\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gy20073/robosuite
[ "cb02dd64c02d7b3d76f6016c9d00dc9391776ef9" ]
[ "robosuite/demo.py" ]
[ "import numpy as np\nimport robosuite as suite\nimport time\n\nif __name__ == \"__main__\":\n\n # get the list of all environments\n envs = sorted(suite.environments.ALL_ENVS)\n\n # print info and select an environment\n print(\"Welcome to Surreal Robotics Suite v{}!\".format(suite.__version__))\n print(suite.__logo__)\n print(\"Here is a list of environments in the suite:\\n\")\n\n for k, env in enumerate(envs):\n print(\"[{}] {}\".format(k, env))\n print()\n try:\n s = input(\n \"Choose an environment to run \"\n + \"(enter a number from 0 to {}): \".format(len(envs) - 1)\n )\n # parse input into a number within range\n k = min(max(int(s), 0), len(envs))\n except:\n print(\"Input is not valid. Use 0 by default.\")\n k = 0\n\n # initialize the task\n env = suite.make(\n envs[k],\n has_renderer=True,\n ignore_done=True,\n use_camera_obs=False,\n control_freq=1,\n )\n env.reset()\n env.viewer.set_camera(camera_id=0)\n\n # do visualization\n start_all = time.time()\n NIter = 60\n for i in range(1):\n #env.reset()\n #env.viewer.set_camera(camera_id=0)\n\n for j in range(NIter):\n time1 = time.time()\n action = np.random.randn(env.dof)\n action = np.random.rand(2)* 2\n obs, reward, done, _ = env.step([0.6, 0.375])\n time2 = time.time()\n print(\"step time \", time2-time1, \" second\")\n\n for _ in range(200):\n #spass\n env.render()\n #env.render()\n\n print(\"render time \", time.time()-time2, \" second\")\n\n tot_time =time.time()-start_all\n tot_steps = NIter* (1.0/0.002)\n print(tot_time, \" is the total time for\", tot_steps,\" mujoco steps. FPS is \", tot_steps / tot_time)\n" ]
[ [ "numpy.random.randn", "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JungUnYun/License-Plate-Recognition
[ "2db81526532a23c4cfe5f1824d09e19e2fa25911" ]
[ "LPR/models.py" ]
[ "from __future__ import division\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\n\nfrom PIL import Image\n\nfrom utils.parse_config import *\nfrom utils.utils import build_targets\nfrom collections import defaultdict\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\n\ndef create_modules(module_defs):\n \"\"\"\n Constructs module list of layer blocks from module configuration in module_defs\n \"\"\"\n hyperparams = module_defs.pop(0)\n output_filters = [int(hyperparams[\"channels\"])]\n module_list = nn.ModuleList()\n for i, module_def in enumerate(module_defs):\n modules = nn.Sequential()\n\n if module_def[\"type\"] == \"convolutional\":\n bn = int(module_def[\"batch_normalize\"])\n filters = int(module_def[\"filters\"])\n kernel_size = int(module_def[\"size\"])\n pad = (kernel_size - 1) // 2 if int(module_def[\"pad\"]) else 0\n modules.add_module(\n \"conv_%d\" % i,\n nn.Conv2d(\n in_channels=output_filters[-1],\n out_channels=filters,\n kernel_size=kernel_size,\n stride=int(module_def[\"stride\"]),\n padding=pad,\n bias=not bn,\n ),\n )\n if bn:\n modules.add_module(\"batch_norm_%d\" % i, nn.BatchNorm2d(filters))\n if module_def[\"activation\"] == \"leaky\":\n modules.add_module(\"leaky_%d\" % i, nn.LeakyReLU(0.1))\n\n elif module_def[\"type\"] == \"maxpool\":\n kernel_size = int(module_def[\"size\"])\n stride = int(module_def[\"stride\"])\n if kernel_size == 2 and stride == 1:\n padding = nn.ZeroPad2d((0, 1, 0, 1))\n modules.add_module(\"_debug_padding_%d\" % i, padding)\n maxpool = nn.MaxPool2d(\n kernel_size=int(module_def[\"size\"]),\n stride=int(module_def[\"stride\"]),\n padding=int((kernel_size - 1) // 2),\n )\n modules.add_module(\"maxpool_%d\" % i, maxpool)\n\n elif module_def[\"type\"] == \"upsample\":\n upsample = nn.Upsample(scale_factor=int(module_def[\"stride\"]), mode=\"nearest\")\n modules.add_module(\"upsample_%d\" % i, upsample)\n\n elif module_def[\"type\"] == \"route\":\n layers = [int(x) for x in module_def[\"layers\"].split(\",\")]\n filters = sum([output_filters[layer_i] for layer_i in layers])\n modules.add_module(\"route_%d\" % i, EmptyLayer())\n\n elif module_def[\"type\"] == \"shortcut\":\n filters = output_filters[int(module_def[\"from\"])]\n modules.add_module(\"shortcut_%d\" % i, EmptyLayer())\n\n elif module_def[\"type\"] == \"yolo\":\n anchor_idxs = [int(x) for x in module_def[\"mask\"].split(\",\")]\n # Extract anchors\n anchors = [int(x) for x in module_def[\"anchors\"].split(\",\")]\n anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]\n anchors = [anchors[i] for i in anchor_idxs]\n num_classes = int(module_def[\"classes\"])\n img_height = int(hyperparams[\"height\"])\n # Define detection layer\n yolo_layer = YOLOLayer(anchors, num_classes, img_height)\n modules.add_module(\"yolo_%d\" % i, yolo_layer)\n # Register module list and number of output filters\n module_list.append(modules)\n output_filters.append(filters)\n\n return hyperparams, module_list\n\n\nclass EmptyLayer(nn.Module):\n \"\"\"Placeholder for 'route' and 'shortcut' layers\"\"\"\n\n def __init__(self):\n super(EmptyLayer, self).__init__()\n\n\nclass YOLOLayer(nn.Module):\n \"\"\"Detection layer\"\"\"\n\n def __init__(self, anchors, num_classes, img_dim):\n super(YOLOLayer, self).__init__()\n self.anchors = anchors\n self.num_anchors = len(anchors)\n self.num_classes = num_classes\n self.bbox_attrs = 5 + num_classes\n self.image_dim = img_dim\n self.ignore_thres = 0.5\n self.lambda_coord = 1\n\n self.mse_loss = nn.MSELoss(size_average=True) # Coordinate loss\n self.bce_loss = nn.BCELoss(size_average=True) # Confidence loss\n self.ce_loss = nn.CrossEntropyLoss() # Class loss\n\n def forward(self, x, targets=None):\n nA = self.num_anchors\n nB = x.size(0)\n nG = x.size(2)\n stride = self.image_dim / nG\n\n # Tensors for cuda support\n FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor\n LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor\n ByteTensor = torch.cuda.ByteTensor if x.is_cuda else torch.ByteTensor\n\n prediction = x.view(nB, nA, self.bbox_attrs, nG, nG).permute(0, 1, 3, 4, 2).contiguous()\n\n # Get outputs\n x = torch.sigmoid(prediction[..., 0]) # Center x\n y = torch.sigmoid(prediction[..., 1]) # Center y\n w = prediction[..., 2] # Width\n h = prediction[..., 3] # Height\n pred_conf = torch.sigmoid(prediction[..., 4]) # Conf\n pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred.\n\n # Calculate offsets for each grid\n grid_x = torch.arange(nG).repeat(nG, 1).view([1, 1, nG, nG]).type(FloatTensor)\n grid_y = torch.arange(nG).repeat(nG, 1).t().view([1, 1, nG, nG]).type(FloatTensor)\n scaled_anchors = FloatTensor([(a_w / stride, a_h / stride) for a_w, a_h in self.anchors])\n anchor_w = scaled_anchors[:, 0:1].view((1, nA, 1, 1))\n anchor_h = scaled_anchors[:, 1:2].view((1, nA, 1, 1))\n\n # Add offset and scale with anchors\n pred_boxes = FloatTensor(prediction[..., :4].shape)\n pred_boxes[..., 0] = x.data + grid_x\n pred_boxes[..., 1] = y.data + grid_y\n pred_boxes[..., 2] = torch.exp(w.data) * anchor_w\n pred_boxes[..., 3] = torch.exp(h.data) * anchor_h\n\n # Training\n if targets is not None:\n\n if x.is_cuda:\n self.mse_loss = self.mse_loss.cuda()\n self.bce_loss = self.bce_loss.cuda()\n self.ce_loss = self.ce_loss.cuda()\n\n nGT, nCorrect, mask, conf_mask, tx, ty, tw, th, tconf, tcls = build_targets(\n pred_boxes=pred_boxes.cpu().data,\n pred_conf=pred_conf.cpu().data,\n pred_cls=pred_cls.cpu().data,\n target=targets.cpu().data,\n anchors=scaled_anchors.cpu().data,\n num_anchors=nA,\n num_classes=self.num_classes,\n grid_size=nG,\n ignore_thres=self.ignore_thres,\n img_dim=self.image_dim,\n )\n\n nProposals = int((pred_conf > 0.5).sum().item())\n recall = float(nCorrect / nGT) if nGT else 1\n precision = float(nCorrect / nProposals)\n\n # Handle masks\n mask = Variable(mask.type(ByteTensor))\n conf_mask = Variable(conf_mask.type(ByteTensor))\n\n # Handle target variables\n tx = Variable(tx.type(FloatTensor), requires_grad=False)\n ty = Variable(ty.type(FloatTensor), requires_grad=False)\n tw = Variable(tw.type(FloatTensor), requires_grad=False)\n th = Variable(th.type(FloatTensor), requires_grad=False)\n tconf = Variable(tconf.type(FloatTensor), requires_grad=False)\n tcls = Variable(tcls.type(LongTensor), requires_grad=False)\n\n # Get conf mask where gt and where there is no gt\n conf_mask_true = mask\n conf_mask_false = conf_mask - mask\n\n # Mask outputs to ignore non-existing objects\n loss_x = self.mse_loss(x[mask], tx[mask])\n loss_y = self.mse_loss(y[mask], ty[mask])\n loss_w = self.mse_loss(w[mask], tw[mask])\n loss_h = self.mse_loss(h[mask], th[mask])\n loss_conf = self.bce_loss(pred_conf[conf_mask_false], tconf[conf_mask_false]) + self.bce_loss(\n pred_conf[conf_mask_true], tconf[conf_mask_true]\n )\n loss_cls = (1 / nB) * self.ce_loss(pred_cls[mask], torch.argmax(tcls[mask], 1))\n loss = loss_x + loss_y + loss_w + loss_h + loss_conf + loss_cls\n\n return (\n loss,\n loss_x.item(),\n loss_y.item(),\n loss_w.item(),\n loss_h.item(),\n loss_conf.item(),\n loss_cls.item(),\n recall,\n precision,\n )\n\n else:\n # If not in training phase return predictions\n output = torch.cat(\n (\n pred_boxes.view(nB, -1, 4) * stride,\n pred_conf.view(nB, -1, 1),\n pred_cls.view(nB, -1, self.num_classes),\n ),\n -1,\n )\n return output\n\n\nclass Darknet(nn.Module):\n \"\"\"YOLOv3 object detection model\"\"\"\n\n def __init__(self, config_path, img_size=416):\n super(Darknet, self).__init__()\n self.module_defs = parse_model_config(config_path)\n self.hyperparams, self.module_list = create_modules(self.module_defs)\n self.img_size = img_size\n self.seen = 0\n self.header_info = np.array([0, 0, 0, self.seen, 0])\n self.loss_names = [\"x\", \"y\", \"w\", \"h\", \"conf\", \"cls\", \"recall\", \"precision\"]\n\n def forward(self, x, targets=None):\n is_training = targets is not None\n output = []\n self.losses = defaultdict(float)\n layer_outputs = []\n for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):\n if module_def[\"type\"] in [\"convolutional\", \"upsample\", \"maxpool\"]:\n x = module(x)\n elif module_def[\"type\"] == \"route\":\n layer_i = [int(x) for x in module_def[\"layers\"].split(\",\")]\n x = torch.cat([layer_outputs[i] for i in layer_i], 1)\n elif module_def[\"type\"] == \"shortcut\":\n layer_i = int(module_def[\"from\"])\n x = layer_outputs[-1] + layer_outputs[layer_i]\n elif module_def[\"type\"] == \"yolo\":\n # Train phase: get loss\n if is_training:\n x, *losses = module[0](x, targets)\n for name, loss in zip(self.loss_names, losses):\n self.losses[name] += loss\n # Test phase: Get detections\n else:\n x = module(x)\n output.append(x)\n layer_outputs.append(x)\n\n self.losses[\"recall\"] /= 3\n self.losses[\"precision\"] /= 3\n return sum(output) if is_training else torch.cat(output, 1)\n\n def load_weights(self, weights_path):\n \"\"\"Parses and loads the weights stored in 'weights_path'\"\"\"\n\n # Open the weights file\n fp = open(weights_path, \"rb\")\n header = np.fromfile(fp, dtype=np.int32, count=5) # First five are header values\n\n # Needed to write header when saving weights\n self.header_info = header\n\n self.seen = header[3]\n weights = np.fromfile(fp, dtype=np.float32) # The rest are weights\n fp.close()\n\n ptr = 0\n for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):\n if module_def[\"type\"] == \"convolutional\":\n conv_layer = module[0]\n if module_def[\"batch_normalize\"]:\n # Load BN bias, weights, running mean and running variance\n bn_layer = module[1]\n num_b = bn_layer.bias.numel() # Number of biases\n # Bias\n bn_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.bias)\n bn_layer.bias.data.copy_(bn_b)\n ptr += num_b\n # Weight\n bn_w = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.weight)\n bn_layer.weight.data.copy_(bn_w)\n ptr += num_b\n # Running Mean\n bn_rm = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_mean)\n bn_layer.running_mean.data.copy_(bn_rm)\n ptr += num_b\n # Running Var\n bn_rv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_var)\n bn_layer.running_var.data.copy_(bn_rv)\n ptr += num_b\n else:\n # Load conv. bias\n num_b = conv_layer.bias.numel()\n conv_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(conv_layer.bias)\n conv_layer.bias.data.copy_(conv_b)\n ptr += num_b\n # Load conv. weights\n num_w = conv_layer.weight.numel()\n conv_w = torch.from_numpy(weights[ptr : ptr + num_w]).view_as(conv_layer.weight)\n conv_layer.weight.data.copy_(conv_w)\n ptr += num_w\n\n \"\"\"\n @:param path - path of the new weights file\n @:param cutoff - save layers between 0 and cutoff (cutoff = -1 -> all are saved)\n \"\"\"\n\n def save_weights(self, path, cutoff=-1):\n\n fp = open(path, \"wb\")\n self.header_info[3] = self.seen\n self.header_info.tofile(fp)\n\n # Iterate through layers\n for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):\n if module_def[\"type\"] == \"convolutional\":\n conv_layer = module[0]\n # If batch norm, load bn first\n if module_def[\"batch_normalize\"]:\n bn_layer = module[1]\n bn_layer.bias.data.cpu().numpy().tofile(fp)\n bn_layer.weight.data.cpu().numpy().tofile(fp)\n bn_layer.running_mean.data.cpu().numpy().tofile(fp)\n bn_layer.running_var.data.cpu().numpy().tofile(fp)\n # Load conv bias\n else:\n conv_layer.bias.data.cpu().numpy().tofile(fp)\n # Load conv weights\n conv_layer.weight.data.cpu().numpy().tofile(fp)\n\n fp.close()\n" ]
[ [ "torch.nn.Sequential", "torch.nn.CrossEntropyLoss", "torch.sigmoid", "numpy.fromfile", "torch.cat", "torch.nn.ModuleList", "torch.from_numpy", "torch.arange", "torch.nn.BCELoss", "torch.exp", "torch.nn.LeakyReLU", "torch.nn.BatchNorm2d", "torch.nn.ZeroPad2d", "numpy.array", "torch.nn.MSELoss", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lartpang/CMWNet.pytorch
[ "b0cabc8c083c89077842e729191a27c3f2904b74" ]
[ "utils.py" ]
[ "# -*- coding: utf-8 -*-\n# @Time : 2021/6/3\n# @Author : Lart Pang\n# @GitHub : https://github.com/lartpang\n\nimport os\nimport random\nfrom collections import abc, defaultdict\nfrom numbers import Number\nfrom typing import List\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\[email protected]_grad()\ndef load_params_for_new_conv(conv_layer, new_conv_layer, in_dim):\n o, i, k_h, k_w = new_conv_layer.weight.shape\n ori_weight = conv_layer.weight\n if in_dim < 3:\n new_weight = ori_weight[:, :in_dim]\n else:\n new_weight = torch.repeat_interleave(\n ori_weight, repeats=in_dim // i + 1, dim=1\n )[:, :in_dim]\n new_conv_layer.weight = nn.Parameter(new_weight)\n new_conv_layer.bias = conv_layer.bias\n\n\ndef cus_sample(\n feat: torch.Tensor,\n mode=None,\n factors=None,\n *,\n interpolation=\"bilinear\",\n align_corners=False,\n) -> torch.Tensor:\n \"\"\"\n :param feat: 输入特征\n :param mode: size/scale\n :param factors: shape list for mode=size or scale list for mode=scale\n :param interpolation:\n :param align_corners: 具体差异可见https://www.yuque.com/lart/idh721/ugwn46\n :return: the resized tensor\n \"\"\"\n if mode is None:\n return feat\n else:\n if factors is None:\n raise ValueError(\n f\"factors should be valid data when mode is not None, but it is {factors} now.\"\n )\n\n interp_cfg = {}\n if mode == \"size\":\n if isinstance(factors, Number):\n factors = (factors, factors)\n assert isinstance(factors, (list, tuple)) and len(factors) == 2\n factors = [int(x) for x in factors]\n if factors == list(feat.shape[2:]):\n return feat\n interp_cfg[\"size\"] = factors\n elif mode == \"scale\":\n assert isinstance(factors, (int, float))\n if factors == 1:\n return feat\n recompute_scale_factor = None\n if isinstance(factors, float):\n recompute_scale_factor = False\n interp_cfg[\"scale_factor\"] = factors\n interp_cfg[\"recompute_scale_factor\"] = recompute_scale_factor\n else:\n raise NotImplementedError(f\"mode can not be {mode}\")\n\n if interpolation == \"nearest\":\n if align_corners is False:\n align_corners = None\n assert align_corners is None, (\n \"align_corners option can only be set with the interpolating modes: \"\n \"linear | bilinear | bicubic | trilinear, so we will set it to None\"\n )\n try:\n result = F.interpolate(\n feat, mode=interpolation, align_corners=align_corners, **interp_cfg\n )\n except NotImplementedError as e:\n print(\n f\"shape: {feat.shape}\\n\"\n f\"mode={mode}\\n\"\n f\"factors={factors}\\n\"\n f\"interpolation={interpolation}\\n\"\n f\"align_corners={align_corners}\"\n )\n raise e\n except Exception as e:\n raise e\n return result\n\n\ndef read_gray_array(\n path, div_255=False, to_normalize=False, thr=-1, dtype=np.float32\n) -> np.ndarray:\n \"\"\"\n 1. read the binary image with the suffix `.jpg` or `.png`\n into a grayscale ndarray\n 2. (to_normalize=True) rescale the ndarray to [0, 1]\n 3. (thr >= 0) binarize the ndarray with `thr`\n 4. return a gray ndarray (np.float32)\n \"\"\"\n assert path.endswith(\".jpg\") or path.endswith(\".png\")\n assert not div_255 or not to_normalize\n gray_array = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n\n if div_255:\n gray_array = gray_array / 255\n\n if to_normalize:\n gray_array = gray_array / 255\n gray_array_min = gray_array.min()\n gray_array_max = gray_array.max()\n if gray_array_max != gray_array_min:\n gray_array = (gray_array - gray_array_min) / (\n gray_array_max - gray_array_min\n )\n\n if thr >= 0:\n gray_array = gray_array > thr\n\n return gray_array.astype(dtype)\n\n\ndef read_color_array(path: str):\n assert path.endswith(\".jpg\") or path.endswith(\".png\")\n bgr_array = cv2.imread(path, cv2.IMREAD_COLOR)\n rgb_array = cv2.cvtColor(bgr_array, cv2.COLOR_BGR2RGB)\n return rgb_array\n\n\ndef to_device(data, device):\n \"\"\"\n :param data:\n :param device:\n :return:\n \"\"\"\n if isinstance(data, (tuple, list)):\n return to_device(data, device)\n elif isinstance(data, dict):\n return {name: to_device(item, device) for name, item in data.items()}\n elif isinstance(data, torch.Tensor):\n return data.to(device=device, non_blocking=True)\n else:\n raise TypeError(\n f\"Unsupported type {type(data)}. Only support Tensor or tuple/list/dict containing Tensors.\"\n )\n\n\ndef save_array_as_image(data_array: np.ndarray, save_name: str, save_dir: str):\n \"\"\"\n save the ndarray as a image\n\n Args:\n data_array: np.float32 the max value is less than or equal to 1\n save_name: with special suffix\n save_dir: the dirname of the image path\n \"\"\"\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n save_path = os.path.join(save_dir, save_name)\n if data_array.dtype != np.uint8:\n if data_array.max() > 1:\n raise Exception(\"the range of data_array has smoe errors\")\n data_array = (data_array * 255).astype(np.uint8)\n cv2.imwrite(save_path, data_array)\n\n\ndef imresize(image_array: np.ndarray, target_h, target_w, interp=\"linear\"):\n _interp_mapping = dict(\n linear=cv2.INTER_LINEAR,\n cubic=cv2.INTER_CUBIC,\n nearst=cv2.INTER_NEAREST,\n )\n assert interp in _interp_mapping, f\"Only support interp: {_interp_mapping.keys()}\"\n resized_image_array = cv2.resize(\n image_array, dsize=(target_w, target_h), interpolation=_interp_mapping[interp]\n )\n return resized_image_array\n\n\ndef get_data_from_txt(path: str) -> list:\n \"\"\"\n 读取文件中各行数据,存放到列表中\n \"\"\"\n lines = []\n with open(path, encoding=\"utf-8\", mode=\"r\") as f:\n line = f.readline().strip()\n while line:\n lines.append(line)\n line = f.readline().strip()\n return lines\n\n\ndef get_name_list_from_dir(path: str) -> list:\n \"\"\"直接从文件夹中读取所有文件不包含扩展名的名字\"\"\"\n return [os.path.splitext(x)[0] for x in os.listdir(path)]\n\n\ndef get_datasets_info_with_keys(dataset_infos: List[tuple], extra_keys: list) -> dict:\n \"\"\"\n 从给定的包含数据信息字典的列表中,依据给定的extra_kers和固定获取的key='image'来获取相应的路径\n Args:\n dataset_infos: 数据集列表\n extra_keys: 除了'image'之外的需要获取的信息名字\n\n Returns:\n 包含指定信息的绝对路径列表\n \"\"\"\n\n # total_keys = tuple(set(extra_keys + [\"image\"]))\n # e.g. ('image', 'mask')\n def _get_intersection(list_a: list, list_b: list, to_sort: bool = True):\n \"\"\"返回两个列表的交集,并可以随之排序\"\"\"\n intersection_list = list(set(list_a).intersection(set(list_b)))\n if to_sort:\n return sorted(intersection_list)\n return intersection_list\n\n def _get_info(dataset_info: dict, extra_keys: list, path_collection: defaultdict):\n \"\"\"\n 配合get_datasets_info_with_keys使用,针对特定的数据集的信息进行路径获取\n\n Args:\n dataset_info: 数据集信息字典\n extra_keys: 除了'image'之外的需要获取的信息名字\n path_collection: 存放收集到的路径信息\n \"\"\"\n total_keys = tuple(set(extra_keys + [\"image\"]))\n # e.g. ('image', 'mask')\n\n infos = {}\n for k in total_keys:\n assert k in dataset_info, f\"{k} is not in {dataset_info}\"\n infos[k] = dict(dir=dataset_info[k][\"path\"], ext=dataset_info[k][\"suffix\"])\n\n if (index_file_path := dataset_info.get(\"index_file\", None)) is not None:\n image_names = get_data_from_txt(index_file_path)\n else:\n image_names = get_name_list_from_dir(infos[\"image\"][\"dir\"])\n\n if \"mask\" in total_keys:\n mask_names = get_name_list_from_dir(infos[\"mask\"][\"dir\"])\n image_names = _get_intersection(image_names, mask_names)\n\n for i, name in enumerate(image_names):\n for k in total_keys:\n path_collection[k].append(\n os.path.join(infos[k][\"dir\"], name + infos[k][\"ext\"])\n )\n\n path_collection = defaultdict(list)\n for dataset_name, dataset_info in dataset_infos:\n print(f\"Loading data from {dataset_name}: {dataset_info['root']}\")\n\n _get_info(\n dataset_info=dataset_info,\n extra_keys=extra_keys,\n path_collection=path_collection,\n )\n return path_collection\n\n\ndef customized_worker_init_fn(worker_id, base_seed):\n set_seed_for_lib(base_seed + worker_id)\n\n\ndef set_seed_for_lib(seed):\n random.seed(seed)\n np.random.seed(seed)\n # 为了禁止hash随机化,使得实验可复现。\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n torch.manual_seed(seed) # 为CPU设置随机种子\n torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子\n torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子\n\n\ndef initialize_seed_cudnn(seed, deterministic):\n assert isinstance(deterministic, bool) and isinstance(seed, int)\n set_seed_for_lib(seed)\n if not deterministic:\n print(\"We will use `torch.backends.cudnn.benchmark`\")\n else:\n print(\"We will not use `torch.backends.cudnn.benchmark`\")\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = not deterministic\n torch.backends.cudnn.deterministic = deterministic\n\n\ndef mapping_to_str(\n mapping: abc.Mapping, *, prefix: str = \" \", lvl: int = 0, max_lvl: int = 1\n) -> str:\n \"\"\"\n Print the structural information of the dict.\n \"\"\"\n sub_lvl = lvl + 1\n cur_prefix = prefix * lvl\n sub_prefix = prefix * sub_lvl\n\n if lvl == max_lvl:\n sub_items = str(mapping)\n else:\n sub_items = [\"{\"]\n for k, v in mapping.items():\n sub_item = sub_prefix + k + \": \"\n if isinstance(v, abc.Mapping):\n sub_item += mapping_to_str(\n v, prefix=prefix, lvl=sub_lvl, max_lvl=max_lvl\n )\n else:\n sub_item += str(v)\n sub_items.append(sub_item)\n sub_items.append(cur_prefix + \"}\")\n sub_items = \"\\n\".join(sub_items)\n return sub_items\n\n\ndef change_lr(optimizer, curr_idx, total_num, lr_decay):\n ratio = pow((1 - float(curr_idx) / total_num), lr_decay)\n optimizer.param_groups[0][\"lr\"] = optimizer.param_groups[0][\"lr\"] * ratio\n optimizer.param_groups[1][\"lr\"] = optimizer.param_groups[0][\"lr\"]\n\n\ndef mkdir_if_not_exist(path_list: list):\n for path in path_list:\n if not os.path.exists(path):\n os.makedirs(path)\n" ]
[ [ "torch.nn.Parameter", "torch.cuda.manual_seed", "numpy.random.seed", "torch.manual_seed", "torch.repeat_interleave", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.nn.functional.interpolate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rychallener/TauREx3_public
[ "eb0eeeeca8f47e5e7d64d8d70b43a3af370b7677" ]
[ "taurex/plot/plotter.py" ]
[ "import h5py\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport taurex.plot.corner as corner\nimport matplotlib as mpl\nfrom taurex.util.util import decode_string_array\nimport os\n\nfrom matplotlib import rc\n\n# some global matplotlib vars\nmpl.rcParams['axes.linewidth'] = 1 #set the value globally\nmpl.rcParams['text.antialiased'] = True\nmpl.rcParams['errorbar.capsize'] = 2\n\n# rc('text', usetex=True) # use tex in plots\n#rc('font', **{ 'family' : 'serif','serif':['Palatino'], 'size' : 11})\n\nclass Plotter(object):\n phi = 1.618\n\n modelAxis = {\n 'TransmissionModel' : '$(R_p/R_*)^2$',\n 'EmissionModel' : '$F_p/F_*$',\n 'DirectImageModel' : '$F_p$'\n\n }\n\n def __init__(self,filename,title=None,prefix=None,cmap='Paired',out_folder='.'):\n self.fd = h5py.File(filename,'r')\n self.title = title\n self.cmap = mpl.cm.get_cmap(cmap)\n self.prefix=prefix\n if self.prefix is None:\n self.prefix = \"output\" \n self.out_folder=out_folder\n\n if not os.path.exists(self.out_folder):\n os.makedirs(self.out_folder)\n\n @property\n def num_solutions(self,fd_position='Output'):\n return len([(int(k[8:]),v) for k,v in self.fd[fd_position]['Solutions'].items() if 'solution' in k])\n\n def solution_iter(self,fd_position='Output'):\n for idx,solution in [(int(k[8:]),v) for k,v in self.fd[fd_position]['Solutions'].items() if 'solution' in k]:\n yield idx,solution\n\n def forward_output(self):\n return self.fd['Output']\n\n def compute_ranges(self):\n\n solution_ranges = []\n\n\n\n mu_derived = None\n for idx,sol in self.solution_iter():\n \n mu_derived = self.get_mu_parameters(sol)\n\n\n fitting_names = self.fittingNames\n\n\n fit_params = sol['fit_params']\n param_list = []\n for fit_names in self.fittingNames:\n param_values = fit_params[fit_names]\n sigma_m = param_values['sigma_m'][()]\n sigma_p = param_values['sigma_p'][()]\n val = param_values['value'][()]\n\n param_list.append([val,val- 5.0*sigma_m,val+5.0*sigma_p])\n \n if mu_derived is not None:\n sigma_m = mu_derived['sigma_m'][()]\n sigma_p = mu_derived['sigma_p'][()]\n val = mu_derived['value'][()] \n param_list.append([val, val- 5.0*sigma_m,val+5.0*sigma_p])\n \n solution_ranges.append(param_list)\n\n\n fitting_boundary_low = self.fittingBoundaryLow\n fitting_boundary_high = self.fittingBoundaryHigh\n\n if mu_derived is not None:\n fitting_boundary_low = np.concatenate((fitting_boundary_low, [-1e99]))\n fitting_boundary_high = np.concatenate((fitting_boundary_high, [1e99]))\n\n\n range_all = np.array(solution_ranges)\n\n range_min = np.min(range_all[:,:,1],axis=0)\n range_max = np.max(range_all[:,:,2],axis=0)\n\n range_min = np.where(range_min < fitting_boundary_low, fitting_boundary_low,range_min)\n range_max = np.where(range_max > fitting_boundary_high, fitting_boundary_high,range_max)\n\n return list(zip(range_min,range_max)) \n \n\n @property\n def activeGases(self):\n return decode_string_array(self.fd['ModelParameters']['Chemistry']['active_gases'])\n\n @property\n def inactiveGases(self):\n return decode_string_array(self.fd['ModelParameters']['Chemistry']['inactive_gases'])\n\n\n def plot_fit_xprofile(self):\n\n for solution_idx, solution_val in self.solution_iter():\n\n fig = plt.figure(figsize=(7,7/self.phi))\n ax = fig.add_subplot(111)\n num_moles = len(self.activeGases+self.inactiveGases)\n\n profiles = solution_val['Profiles']\n pressure_profile = profiles['pressure_profile'][:]/1e5\n active_profile = profiles['active_mix_profile'][...]\n active_profile_std = profiles['active_mix_profile_std'][...]\n\n inactive_profile = profiles['inactive_mix_profile'][...]\n inactive_profile_std = profiles['inactive_mix_profile_std'][...]\n\n cols_mol = {}\n for mol_idx,mol_name in enumerate(self.activeGases):\n cols_mol[mol_name] = self.cmap(mol_idx/num_moles)\n\n prof = active_profile[mol_idx]\n prof_std = active_profile_std[mol_idx]\n\n plt.plot(prof,pressure_profile,color=cols_mol[mol_name], label=mol_name)\n\n plt.fill_betweenx(pressure_profile, prof + prof_std, prof,\n color=self.cmap(mol_idx / num_moles), alpha=0.5)\n plt.fill_betweenx(pressure_profile, prof,\n np.power(10, (np.log10(prof) - (\n np.log10(prof + prof_std) - np.log10(prof)))),\n color=self.cmap(mol_idx / num_moles), alpha=0.5)\n\n for mol_idx,mol_name in enumerate(self.inactiveGases):\n inactive_idx = len(self.activeGases) + mol_idx\n cols_mol[mol_name] = self.cmap(inactive_idx/num_moles)\n\n \n prof = inactive_profile[mol_idx]\n prof_std = inactive_profile_std[mol_idx]\n\n plt.plot(prof,pressure_profile,color=cols_mol[mol_name], label=mol_name)\n\n plt.fill_betweenx(pressure_profile, prof + prof_std, prof,\n color=self.cmap(inactive_idx / num_moles), alpha=0.5)\n plt.fill_betweenx(pressure_profile, prof,\n np.power(10, (np.log10(prof) - (\n np.log10(prof + prof_std) - np.log10(prof)))),\n color=self.cmap(inactive_idx / num_moles), alpha=0.5)\n\n plt.yscale('log')\n plt.gca().invert_yaxis()\n plt.xscale('log')\n plt.xlim(1e-12, 3)\n plt.xlabel('Mixing ratio')\n plt.ylabel('Pressure (bar)')\n plt.tight_layout()\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), ncol=1, prop={'size':11}, frameon=False)\n if self.title:\n plt.title(self.title, fontsize=14)\n plt.savefig(os.path.join(self.out_folder, '%s_fit_mixratio.pdf' % (self.prefix)))\n plt.close('all')\n\n def plot_forward_xprofile(self):\n fig = plt.figure(figsize=(7,7/self.phi))\n ax = fig.add_subplot(111)\n num_moles = len(self.activeGases+self.inactiveGases)\n\n solution_val = self.forward_output()\n\n profiles = solution_val['Profiles']\n pressure_profile = profiles['pressure_profile'][:]/1e5\n active_profile = profiles['active_mix_profile'][...]\n\n inactive_profile = profiles['inactive_mix_profile'][...]\n\n cols_mol = {}\n for mol_idx,mol_name in enumerate(self.activeGases):\n cols_mol[mol_name] = self.cmap(mol_idx/num_moles)\n\n prof = active_profile[mol_idx]\n\n plt.plot(prof,pressure_profile,color=cols_mol[mol_name], label=mol_name)\n\n for mol_idx,mol_name in enumerate(self.inactiveGases):\n inactive_idx = len(self.activeGases) + mol_idx\n cols_mol[mol_name] = self.cmap(inactive_idx/num_moles)\n\n \n prof = inactive_profile[mol_idx]\n\n plt.plot(prof,pressure_profile,color=cols_mol[mol_name], label=mol_name)\n\n plt.yscale('log')\n plt.gca().invert_yaxis()\n plt.xscale('log')\n plt.xlim(1e-12, 3)\n plt.xlabel('Mixing ratio')\n plt.ylabel('Pressure (bar)')\n plt.tight_layout()\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), ncol=1, prop={'size':11}, frameon=False)\n if self.title:\n plt.title(self.title, fontsize=14)\n plt.savefig(os.path.join(self.out_folder, '%s_fit_mixratio.pdf' % (self.prefix)))\n plt.close('all')\n\n def plot_fitted_tp(self):\n\n # fitted model\n fig = plt.figure(figsize=(5,3.5))\n ax = fig.add_subplot(111)\n \n for solution_idx, solution_val in self.solution_iter():\n if self.num_solutions > 1:\n label = 'Fitted profile (%i)' % (solution_idx)\n else:\n label = 'Fitted profile'\n temp_prof = solution_val['Profiles']['temp_profile'][:]\n temp_prof_std = solution_val['Profiles']['temp_profile_std'][:]\n pres_prof = solution_val['Profiles']['pressure_profile'][:]/1e5\n plt.plot(temp_prof, pres_prof, color=self.cmap(float(solution_idx)/self.num_solutions), label=label)\n plt.fill_betweenx(pres_prof, temp_prof-temp_prof_std, temp_prof+temp_prof_std, color=self.cmap(float(solution_idx)/self.num_solutions), alpha=0.5)\n\n plt.yscale('log')\n plt.gca().invert_yaxis()\n plt.xlabel('Temperature (K)')\n plt.ylabel('Pressure (bar)')\n plt.tight_layout()\n legend = plt.legend(loc='upper left', ncol=1, prop={'size':11})\n legend.get_frame().set_facecolor('white')\n legend.get_frame().set_edgecolor('white')\n\n legend.get_frame().set_alpha(0.8)\n if self.title:\n plt.title(self.title, fontsize=14)\n plt.savefig(os.path.join(self.out_folder, '%s_tp_profile.pdf' % (self.prefix)))\n plt.close()\n\n def plot_forward_tp(self):\n\n fig = plt.figure(figsize=(5,3.5))\n ax = fig.add_subplot(111)\n \n solution_val = self.forward_output()\n\n temp_prof = solution_val['Profiles']['temp_profile'][:]\n pres_prof = solution_val['Profiles']['pressure_profile'][:]/1e5\n plt.plot(temp_prof, pres_prof)\n\n plt.yscale('log')\n plt.gca().invert_yaxis()\n plt.xlabel('Temperature (K)')\n plt.ylabel('Pressure (bar)')\n plt.tight_layout()\n legend = plt.legend(loc='upper left', ncol=1, prop={'size':11})\n legend.get_frame().set_facecolor('white')\n legend.get_frame().set_edgecolor('white')\n\n legend.get_frame().set_alpha(0.8)\n if self.title:\n plt.title(self.title, fontsize=14)\n plt.savefig(os.path.join(self.out_folder, '%s_tp_profile.pdf' % (self.prefix)))\n plt.close()\n\n\n def get_mu_parameters(self, solution):\n if 'mu_derived' not in solution['fit_params'].keys():\n return None\n else:\n return solution['fit_params']['mu_derived']\n\n\n\n def plot_posteriors(self):\n if not self.is_retrieval:\n raise Exception('HDF5 was not generated from retrieval, no posteriors found')\n \n ranges = self.compute_ranges()\n\n figs = []\n\n for solution_idx, solution_val in self.solution_iter():\n\n # print(solution_idx)\n\n mu_derived = self.get_mu_parameters(solution_val)\n\n tracedata = solution_val['tracedata']\n weights = solution_val['weights']\n\n figure_past = None\n\n if solution_idx > 0:\n figure_past = figs[solution_idx - 1]\n\n latex_names = self.fittingLatex\n\n if mu_derived is not None:\n latex_names.append('$\\mu$ (derived)')\n tracedata = np.column_stack((tracedata, mu_derived['trace']))\n\n\n color_idx = np.float(solution_idx)/self.num_solutions\n\n # print('color: {}'.format(color_idx))\n ### https://matplotlib.org/users/customizing.html\n plt.rc('xtick', labelsize=10) #size of individual labels\n plt.rc('ytick', labelsize=10)\n plt.rc('axes.formatter', limits=( -4, 5 )) #scientific notation..\n\n\n fig = corner.corner(tracedata,\n weights=weights,\n labels=latex_names,\n label_kwargs=dict(fontsize=20),\n smooth=True,\n scale_hist=True,\n quantiles=[0.16, 0.5, 0.84],\n show_titles=True,\n title_kwargs=dict(fontsize=12),\n range=ranges,\n #quantiles=[0.16, 0.5],\n ret=True,\n fill_contours=True,\n color=self.cmap(float(color_idx)),\n top_ticks=False,\n bins=30,\n fig = figure_past)\n if self.title:\n fig.gca().annotate(self.title, xy=(0.5, 1.0), xycoords=\"figure fraction\",\n xytext=(0, -5), textcoords=\"offset points\",\n ha=\"center\", va=\"top\", fontsize=14)\n\n figs.append(fig)\n\n plt.savefig(os.path.join(self.out_folder, '%s_posteriors.pdf' % (self.prefix)))\n self.posterior_figure_handles = figs\n self.posterior_figure_ranges = ranges\n plt.close()\n\n @property\n def modelType(self):\n return self.fd['ModelParameters']['model_type'][()]\n\n def count_contributions(self,spectra):\n pass\n\n\n \n\n def plot_fitted_spectrum(self, resolution=None):\n\n # fitted model\n fig = plt.figure(figsize=(10.6, 7.0))\n #ax = fig.add_subplot(111)\n\n \n\n obs_spectrum = self.fd['Observed']['spectrum'][...]\n error = self.fd['Observed']['errorbars'][...]\n wlgrid = self.fd['Observed']['wlgrid'][...]\n bin_widths = self.fd['Observed']['binwidths'][...] \n \n plt.errorbar(wlgrid,obs_spectrum, error, lw=1, color='black', alpha=0.4, ls='none', zorder=0, label='Observed')\n\n N = self.num_solutions\n for solution_idx, solution_val in self.solution_iter():\n if N > 1:\n label = 'Fitted model (%i)' % (solution_idx)\n else:\n label = 'Fitted model'\n\n try:\n binned_grid = solution_val['Spectra']['binned_wlgrid'][...]\n except KeyError:\n binned_grid = solution_val['Spectra']['bin_wlgrid'][...]\n \n native_grid = solution_val['Spectra']['native_wngrid'][...]\n\n\n plt.scatter(wlgrid, obs_spectrum, marker='d',zorder=1,**{'s': 10, 'edgecolors': 'grey','c' : self.cmap(float(solution_idx)/N) })\n\n self._generic_plot(binned_grid,native_grid,solution_val['Spectra'],resolution=resolution,color=self.cmap(float(solution_idx)/N),label=label)\n\n\n plt.xlim(np.min(wlgrid)-0.05*np.min(wlgrid), np.max(wlgrid)+0.05*np.max(wlgrid))\n # plt.ylim(0.0,0.006)\n plt.xlabel(r'Wavelength ($\\mu$m)')\n plt.ylabel(self.modelAxis[self.modelType])\n\n if np.max(wlgrid) - np.min(wlgrid) > 5:\n plt.xscale('log')\n plt.tick_params(axis='x', which='minor')\n #ax.xaxis.set_minor_formatter(mpl.ticker.FormatStrFormatter(\"%i\"))\n #ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(\"%i\"))\n plt.legend(loc='best', ncol=2, frameon=False, prop={'size':11})\n if self.title:\n plt.title(self.title, fontsize=14)\n plt.tight_layout()\n plt.savefig(os.path.join(self.out_folder, '%s_spectrum.pdf' % (self.prefix)))\n plt.close()\n\n\n\n def plot_forward_spectrum(self,resolution=None):\n fig = plt.figure(figsize=(5.3, 3.5))\n\n spectra_out = self.forward_output()['Spectra']\n\n native_grid = spectra_out['native_wngrid'][...]\n\n try:\n wlgrid = spectra_out['binned_wlgrid'][...]\n except KeyError:\n wlgrid = spectra_out['native_wlgrid'][...]\n \n \n self._generic_plot(wlgrid,native_grid,spectra_out,resolution=resolution,alpha=1)\n plt.xlim(np.min(wlgrid)-0.05*np.min(wlgrid), np.max(wlgrid)+0.05*np.max(wlgrid))\n # plt.ylim(0.0,0.006)\n plt.xlabel(r'Wavelength ($\\mu$m)')\n plt.ylabel(self.modelAxis[self.modelType])\n\n if np.max(wlgrid) - np.min(wlgrid) > 5:\n plt.xscale('log')\n plt.tick_params(axis='x', which='minor')\n #ax.xaxis.set_minor_formatter(mpl.ticker.FormatStrFormatter(\"%i\"))\n #ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(\"%i\"))\n plt.legend(loc='best', ncol=2, frameon=False, prop={'size':11})\n if self.title:\n plt.title(self.title, fontsize=14)\n plt.tight_layout()\n plt.savefig(os.path.join(self.out_folder, '%s_forward_spectrum.pdf' % (self.prefix)))\n plt.close()\n\n def plot_fitted_contrib(self,full=False,resolution=None):\n # fitted model\n\n N = self.num_solutions\n for solution_idx, solution_val in self.solution_iter():\n\n fig=plt.figure(figsize=(5.3*2, 3.5*2))\n ax = fig.add_subplot(111)\n\n \n\n obs_spectrum = self.fd['Observed']['spectrum'][:]\n error = self.fd['Observed']['errorbars'][...]\n wlgrid = self.fd['Observed']['wlgrid'][...]\n plot_wlgrid = wlgrid\n bin_widths = self.fd['Observed']['binwidths'][...] \n \n plt.errorbar(wlgrid,obs_spectrum, error, lw=1, color='black', alpha=0.4, ls='none', zorder=0, label='Observed')\n self._plot_contrib(solution_val,wlgrid,ax,full=full,resolution=resolution)\n\n\n #plt.tight_layout()\n plt.savefig(os.path.join(self.out_folder, '%s_spectrum_contrib_sol%i.pdf' % (self.prefix,solution_idx)))\n plt.close()\n\n plt.close('all')\n\n def plot_forward_contrib(self,full=False,resolution=None):\n fig=plt.figure(figsize=(5.3*2, 3.5*2))\n ax = fig.add_subplot(111)\n\n\n spectra_out = self.forward_output()['Spectra']\n\n native_grid = spectra_out['native_wngrid'][...]\n\n try:\n wlgrid = spectra_out['binned_wlgrid'][...]\n except KeyError:\n wlgrid = spectra_out['native_wlgrid'][...]\n\n self._generic_plot(wlgrid,native_grid,spectra_out,resolution=resolution,alpha=0.5)\n self._plot_contrib(self.forward_output(),wlgrid,ax,full=full,resolution=resolution)\n\n\n #plt.tight_layout()\n plt.savefig(os.path.join(self.out_folder, '%s_spectrum_contrib_forward.pdf' % (self.prefix)))\n plt.close()\n\n\n def _plot_contrib(self,output,wlgrid,ax,full=False,resolution=None):\n\n\n if full:\n wlgrid = self.full_contrib_plot(output['Spectra'],wlgrid,resolution=resolution)\n else:\n wlgrid = self.simple_contrib_plot(output['Spectra'],wlgrid,resolution=resolution)\n\n plt.xlim(np.min(wlgrid)-0.05*np.min(wlgrid), np.max(wlgrid)+0.05*np.max(wlgrid))\n # plt.ylim(0.0,0.006)\n plt.xlabel('Wavelength ($\\mu$m)')\n plt.ylabel(self.modelAxis[self.modelType])\n\n if np.max(wlgrid) - np.min(wlgrid) > 5:\n plt.xscale('log')\n plt.tick_params(axis='x', which='minor')\n #ax.xaxis.set_minor_formatter(mpl.ticker.FormatStrFormatter(\"%i\"))\n #ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(\"%i\"))\n #plt.legend(loc='best', ncol=2, frameon=False, prop={'size':11})\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height * 0.1,\n box.width, box.height * 0.9])\n # Put a legend below current axis\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.08),\n fancybox=True, shadow=True, ncol=5)\n if self.title:\n plt.title(self.title, fontsize=14)\n\n def full_contrib_plot(self,spectra,wlgrid,resolution=None):\n native_grid = spectra['native_wngrid'][...]\n for contrib_name,contrib_dict in spectra['Contributions'].items():\n\n first_name = contrib_name\n\n for component_name,component_value in contrib_dict.items():\n if isinstance(component_value,h5py.Dataset):\n continue\n total_label = '{}-{}'.format(contrib_name,component_name)\n self._generic_plot(wlgrid,native_grid,component_value,resolution,label=total_label)\n return wlgrid\n def simple_contrib_plot(self,spectra,wlgrid,resolution=None):\n\n\n binner = None\n native_grid = spectra['native_wngrid'][...]\n\n\n for contrib_name,contrib_dict in spectra['Contributions'].items():\n first_name = contrib_name\n if first_name == 'Absorption':\n for component_name,component_value in contrib_dict.items():\n if isinstance(component_value,h5py.Dataset):\n continue\n total_label = '{}-{}'.format(contrib_name,component_name)\n self._generic_plot(wlgrid,native_grid,component_value,resolution,label=total_label)\n else:\n self._generic_plot(wlgrid,native_grid,contrib_dict,resolution)\n \n return wlgrid\n\n\n def _generic_plot(self,wlgrid,native_grid,spectra,resolution,color=None,error=False,alpha=1.0,label=None):\n\n \n binned_error = None\n if resolution is not None:\n from taurex.binning import FluxBinner\n from taurex.util.util import create_grid_res,wnwidth_to_wlwidth\n _grid = create_grid_res(resolution,wlgrid.min()*0.9,wlgrid.max()*1.1)\n bin_wlgrid = _grid[:,0]\n\n bin_wngrid = 10000/_grid[:,0]\n\n bin_sort = bin_wngrid.argsort()\n\n bin_wlgrid = bin_wlgrid[bin_sort]\n bin_wngrid = bin_wngrid[bin_sort]\n\n bin_wnwidth = wnwidth_to_wlwidth(bin_wlgrid,_grid[bin_sort,1])\n wlgrid = _grid[bin_sort,0]\n binner = FluxBinner(bin_wngrid,bin_wnwidth)\n native_spectra = spectra['native_spectrum'][...]\n binned_spectrum = binner.bindown(native_grid,native_spectra)[1]\n try:\n native_error = spectra['native_std']\n except KeyError:\n native_error = None\n if native_error is not None:\n binned_error = binner.bindown(native_grid,native_error)[1]\n\n else:\n try:\n binned_spectrum = spectra['binned_spectrum'][...]\n except KeyError:\n try:\n binned_spectrum = spectra['bin_spectrum'][...]\n except KeyError:\n binned_spectrum = spectra['native_spectrum'][...]\n try:\n binned_error = spectra['binned_std'][...]\n except KeyError:\n binned_error = None\n plt.plot(wlgrid, binned_spectrum, label=label,alpha=alpha) \n if binned_error is not None:\n plt.fill_between(wlgrid, binned_spectrum-binned_error,\n binned_spectrum+binned_error,\n alpha=0.5, zorder=-2, color=color, edgecolor='none')\n\n # 2 sigma\n plt.fill_between(wlgrid, binned_spectrum-2*binned_error,\n binned_spectrum+2*binned_error,\n alpha=0.2, zorder=-3, color=color, edgecolor='none')\n \n\n def plot_forward_tau(self):\n\n forward_output =self.forward_output()\n\n contribution = forward_output['Spectra']['native_tau'][...]\n #contribution = self.pickle_file['solutions'][solution_idx]['contrib_func']\n\n pressure = forward_output['Profiles']['pressure_profile'][:]\n wavelength = forward_output['Spectra']['native_wlgrid'][:]\n\n self._plot_tau(contribution,pressure,wavelength)\n\n plt.savefig(os.path.join(self.out_folder, '%s_tau_forward.pdf' % (self.prefix)))\n\n plt.close()\n\n\n def plot_fitted_tau(self):\n N = self.num_solutions\n for solution_idx, solution_val in self.solution_iter():\n\n contribution = solution_val['Spectra']['native_tau'][...]\n #contribution = self.pickle_file['solutions'][solution_idx]['contrib_func']\n\n pressure = solution_val['Profiles']['pressure_profile'][:]\n wavelength = solution_val['Spectra']['native_wlgrid'][:]\n\n self._plot_tau(contribution,pressure,wavelength)\n\n plt.savefig(os.path.join(self.out_folder, '%s_tau_sol%i.pdf' % (self.prefix,solution_idx)))\n\n plt.close()\n\n def _plot_tau(self,contribution,pressure,wavelength):\n grid = plt.GridSpec(1, 4, wspace=0.4, hspace=0.3)\n fig = plt.figure('Contribution function')\n ax1 = plt.subplot(grid[0, :3])\n plt.imshow(contribution, aspect='auto')\n\n ### mapping of the pressure array onto the ticks:\n y_labels = np.array([pow(10, 6), pow(10, 4), pow(10, 2), pow(10, 0), pow(10, -2), pow(10, -4)])\n y_ticks = np.zeros(len(y_labels))\n for i in range(len(y_ticks)):\n y_ticks[i] = (np.abs(pressure - y_labels[i])).argmin() ## To find the corresponding index\n plt.yticks(y_ticks, ['$10^{%.f}$' % y for y in np.log10(y_labels) - 5])\n\n ### mapping of the wavelength array onto the ticks:\n x_label0 = np.ceil(np.min(wavelength) * 10) / 10.\n x_label5 = np.round(np.max(wavelength) * 10) / 10.\n x_label1 = np.round(\n pow(10, (np.log10(x_label5) - np.log10(x_label0)) * 1 / 5. + np.log10(x_label0)) * 10) / 10.0\n x_label2 = np.round(\n pow(10, (np.log10(x_label5) - np.log10(x_label0)) * 2 / 5. + np.log10(x_label0)) * 10) / 10.0\n x_label3 = np.round(\n pow(10, (np.log10(x_label5) - np.log10(x_label0)) * 3 / 5. + np.log10(x_label0)) * 10) / 10.\n x_label4 = np.round(\n pow(10, (np.log10(x_label5) - np.log10(x_label0)) * 4 / 5. + np.log10(x_label0)) * 10) / 10.\n\n x_labels = np.array([x_label0, x_label1, x_label2, x_label3, x_label4, x_label5])\n x_ticks = np.zeros(len(x_labels))\n for i in range(len(x_ticks)):\n x_ticks[i] = (np.abs(wavelength - x_labels[i])).argmin() ## To find the corresponding index\n plt.xticks(x_ticks, x_labels)\n plt.gca().invert_yaxis()\n plt.gca().invert_xaxis()\n plt.xlabel(\"Wavelength ($\\mu m$)\")\n plt.ylabel(\"Pressure (Bar)\")\n\n ax2 = plt.subplot(grid[0, 3])\n\n contribution_collapsed = np.average(contribution, axis=1)\n # contribution_collapsed = np.amax(contribution_hr, axis=1) ## good for emission\n contribution_sum = np.zeros(len(contribution_collapsed))\n for i in range(len(contribution_collapsed) - 1):\n contribution_sum[i + 1] = contribution_sum[i] + contribution_collapsed[i + 1]\n plt.plot(contribution_collapsed, pressure * pow(10, -5))\n\n plt.yscale('log')\n plt.gca().invert_yaxis()\n plt.gca().yaxis.tick_right()\n plt.xlabel(\"Contribution\")\n\n\n @property\n def fittingNames(self):\n from taurex.util.util import decode_string_array\n if not self.is_retrieval:\n raise Exception('HDF5 was not generated from retrieval, no fitting names found')\n return decode_string_array(self.fd['Optimizer']['fit_parameter_names'])\n \n @property\n def fittingLatex(self):\n from taurex.util.util import decode_string_array\n if not self.is_retrieval:\n raise Exception('HDF5 was not generated from retrieval, no fitting latex found')\n return decode_string_array(self.fd['Optimizer']['fit_parameter_latex'])\n\n @property\n def fittingBoundaryLow(self):\n if not self.is_retrieval:\n raise Exception('HDF5 was not generated from retrieval, no fitting boundary found')\n return self.fd['Optimizer']['fit_boundary_low'][:]\n\n @property\n def fittingBoundaryHigh(self):\n if not self.is_retrieval:\n raise Exception('HDF5 was not generated from retrieval, no fitting boundary found')\n return self.fd['Optimizer']['fit_boundary_high'][:]\n\n\n @property\n def is_retrieval(self):\n try:\n self.fd['Output']\n self.fd['Optimizer']\n self.fd['Output']['Solutions']\n return True\n except KeyError:\n return False \n\n @property\n def is_lightcurve(self):\n try:\n self.fd['Lightcurve']\n return True\n except KeyError:\n return False\n\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser(description='Taurex-Plotter')\n parser.add_argument(\"-i\", \"--input\",dest='input_file',type=str,required=True,help=\"Input hdf5 file from taurex\")\n parser.add_argument(\"-P\",\"--plot-posteriors\",dest=\"posterior\",default=False,help=\"Plot fitting posteriors\",action='store_true')\n parser.add_argument(\"-x\",\"--plot-xprofile\",dest=\"xprofile\",default=False,help=\"Plot molecular profiles\",action='store_true')\n parser.add_argument(\"-t\",\"--plot-tpprofile\",dest=\"tpprofile\",default=False,help=\"Plot Temperature profiles\",action='store_true')\n parser.add_argument(\"-d\",\"--plot-tau\",dest=\"tau\",default=False,help=\"Plot optical depth contribution\",action=\"store_true\")\n parser.add_argument(\"-s\",\"--plot-spectrum\",dest=\"spectrum\",default=False,help=\"Plot spectrum\",action='store_true')\n parser.add_argument(\"-c\",\"--plot-contrib\",dest=\"contrib\",default=False,help=\"Plot contrib\",action='store_true')\n parser.add_argument(\"-C\",\"--full-contrib\",dest=\"full_contrib\",default=False,help=\"Plot detailed contribs\",action=\"store_true\")\n parser.add_argument(\"-a\",\"--all\",dest=\"all\",default=False,help=\"Plot everythiong\",action='store_true')\n parser.add_argument(\"-T\",\"--title\",dest=\"title\",type=str,help=\"Title of plots\")\n parser.add_argument(\"-o\",\"--output-dir\",dest=\"output_dir\",type=str,required=True,help=\"output directory to store plots\")\n parser.add_argument(\"-p\",\"--prefix\",dest=\"prefix\",type=str,help=\"File prefix for outputs\")\n parser.add_argument(\"-m\",\"--color-map\",dest=\"cmap\",type=str,default=\"Paired\",help=\"Matplotlib colormap to use\")\n parser.add_argument(\"-R\",\"--resolution\",dest=\"resolution\",type=float,default=None,help=\"Resolution to bin spectra to\")\n args=parser.parse_args()\n\n plot_xprofile = args.xprofile or args.all\n plot_tp_profile = args.tpprofile or args.all\n plot_spectrum = args.spectrum or args.all\n plot_contrib = args.contrib or args.all\n plot_fullcontrib = args.full_contrib or args.all\n plot_posteriors = args.posterior or args.all\n plot_tau = args.tau or args.all\n\n plot=Plotter(args.input_file,cmap=args.cmap,\n title=args.title,prefix=args.prefix,out_folder=args.output_dir)\n \n if plot_posteriors:\n if plot.is_retrieval:\n plot.plot_posteriors()\n\n if plot_xprofile:\n if plot.is_retrieval:\n plot.plot_fit_xprofile()\n else:\n plot.plot_forward_xprofile()\n if plot_spectrum:\n if plot.is_retrieval:\n plot.plot_fitted_spectrum(resolution=args.resolution)\n else:\n plot.plot_forward_spectrum(resolution=args.resolution)\n if plot_tp_profile:\n if plot.is_retrieval:\n plot.plot_fitted_tp()\n else:\n plot.plot_forward_tp()\n\n if plot_contrib:\n if plot.is_retrieval:\n plot.plot_fitted_contrib(full=plot_fullcontrib,resolution=args.resolution)\n else:\n plot.plot_forward_contrib(full=plot_fullcontrib,resolution=args.resolution)\n\n if plot_tau:\n if plot.is_retrieval:\n plot.plot_fitted_tau()\n else:\n plot.plot_forward_tau()\n \nif __name__ == \"__main__\":\n main()\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.imshow", "matplotlib.pyplot.rc", "matplotlib.pyplot.plot", "numpy.max", "numpy.concatenate", "numpy.where", "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplot", "matplotlib.pyplot.errorbar", "matplotlib.pyplot.close", "numpy.column_stack", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.min", "matplotlib.pyplot.xscale", "numpy.log10", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.GridSpec", "numpy.array", "matplotlib.pyplot.ylabel", "numpy.abs", "matplotlib.use", "matplotlib.pyplot.yscale", "matplotlib.pyplot.xlim", "matplotlib.cm.get_cmap", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "numpy.average", "matplotlib.pyplot.tick_params", "numpy.float" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aeturrell/example-reproducible-research
[ "4de882d1af05ade52e3991c1c4e1f939c9714f8f" ]
[ "src/analysis.py" ]
[ "\"\"\"\nThis script performs a regression\n\"\"\"\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport statsmodels.formula.api as smf\nfrom pathlib import Path\n\n\ndef regression():\n # Load data and format datetime\n df = pd.read_csv(\n Path(\"raw/raw_data.csv\"),\n index_col=0,\n )\n reg = smf.ols(formula='total ~ alcohol + no_previous + not_distracted', data=df).fit()\n latex_results = reg.summary().as_latex()\n open(Path('output/regression.tex'), 'w').write(latex_results)\n\n\ndef chart():\n # Load data and format datetime\n df = pd.read_csv(\n Path(\"raw/raw_data.csv\"),\n index_col=0,\n )\n fig, ax = plt.subplots()\n ax.scatter(df[\"total\"], df[\"alcohol\"])\n plt.savefig(Path(\"output/scatter.pdf\"))\n\n\nif __name__ == \"__main__\":\n regression()\n chart()\n" ]
[ [ "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nicholasz2510/zipf-from-text
[ "2ab0ce5fc248969de3019c6dd44e15d7937b4189" ]
[ "just_list.py" ]
[ "import matplotlib.pyplot as plt\n\nf = open(\"biggest_cities.txt\", \"r\")\n\nplt.plot([int(x.split(\",\")[3]) for x in f.readlines()][:15])\nplt.ylabel(\"City size\")\nplt.xlabel(\"City rank\")\nplt.show()\n\nf.close()\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Kau5h1K/ds5500-userprivacy-deploy
[ "38c91d20596163427b4f169c9ee221057ac438ce" ]
[ "haystack/modeling/data_handler/dataset.py" ]
[ "import logging\nimport numbers\nfrom typing import Iterable, List\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset, ConcatDataset, TensorDataset\nfrom transformers import BatchEncoding\n\nfrom haystack.modeling.utils import flatten_list\n\nlogger = logging.getLogger(__name__)\n\n\ndef flatten_rename(encoded_batch: BatchEncoding, keys: List[str] = None, renamed_keys: List[str] = None):\n if encoded_batch is None:\n return []\n if not keys:\n keys = list(encoded_batch.keys())\n if not renamed_keys:\n renamed_keys = keys\n assert len(keys) == len(renamed_keys), f\"keys and renamed_keys have different size {len(keys)} != {len(renamed_keys)}\"\n assert any([key in encoded_batch for key in keys]), f\"one of the keys {keys} is not in batch {encoded_batch.keys()}\"\n features_flat = []\n for item in range(len(encoded_batch[keys[0]])):\n feat_dict = {k: v for k, v in zip(renamed_keys, [encoded_batch[k][item] for k in keys])}\n features_flat.append(feat_dict)\n return features_flat\n\n\ndef convert_features_to_dataset(features):\n \"\"\"\n Converts a list of feature dictionaries (one for each sample) into a PyTorch Dataset.\n\n :param features: A list of dictionaries. Each dictionary corresponds to one sample. Its keys are the\n names of the type of feature and the keys are the features themselves.\n :Return: a Pytorch dataset and a list of tensor names.\n \"\"\"\n # features can be an empty list in cases where down sampling occurs\n if len(features) == 0:\n return None, None\n tensor_names = list(features[0].keys())\n all_tensors = []\n for t_name in tensor_names:\n try:\n # Checking whether a non-integer will be silently converted to torch.long\n check = features[0][t_name]\n if isinstance(check, numbers.Number):\n base = check\n # extract a base variable from a nested lists or tuples\n elif isinstance(check, list):\n base = list(flatten_list(check))[0]\n # extract a base variable from numpy arrays\n else:\n base = check.ravel()[0]\n if not np.issubdtype(type(base), np.integer):\n logger.warning(f\"Problem during conversion to torch tensors:\\n\"\n f\"A non-integer value for feature '{t_name}' with a value of: \"\n f\"'{base}' will be converted to a torch tensor of dtype long.\")\n except:\n logger.debug(f\"Could not determine type for feature '{t_name}'. \"\n \"Converting now to a tensor of default type long.\")\n\n # Convert all remaining python objects to torch long tensors\n cur_tensor = torch.tensor([sample[t_name] for sample in features], dtype=torch.long)\n\n all_tensors.append(cur_tensor)\n\n dataset = TensorDataset(*all_tensors)\n return dataset, tensor_names\n\n\nclass ConcatTensorDataset(ConcatDataset):\n r\"\"\"ConcatDataset of only TensorDatasets which supports getting slices.\n\n This dataset allows the use of slices, e.g. ds[2:4] if all concatenated\n datasets are either TensorDatasets or Subset or other ConcatTensorDataset instances\n which eventually contain only TensorDataset instances. If no slicing is needed,\n this class works exactly like torch.utils.data.ConcatDataset and can concatenate arbitrary\n (not just TensorDataset) datasets.\n\n Args:\n datasets (sequence): List of datasets to be concatenated\n \"\"\"\n def __init__(self, datasets: Iterable[Dataset]) -> None:\n super(ConcatTensorDataset, self).__init__(datasets)\n\n def __getitem__(self, idx):\n if isinstance(idx, slice):\n rows = [super(ConcatTensorDataset, self).__getitem__(i) for i in range(self.__len__())[idx]]\n return tuple(map(torch.stack, zip(*rows)))\n elif isinstance(idx, (list, np.ndarray)):\n rows = [super(ConcatTensorDataset, self).__getitem__(i) for i in idx]\n return tuple(map(torch.stack, zip(*rows)))\n else:\n return super(ConcatTensorDataset, self).__getitem__(idx)\n" ]
[ [ "torch.utils.data.TensorDataset", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
eruffaldi/pyml-tools
[ "ec2501245c494a0816208bbc8a90b21286a15c02" ]
[ "src/multiclassCM.py" ]
[ "import numpy as np\n\ndef confusionMatrix(predicted,actual,classes):\n\tmat = np.zeros([classes,classes])\n\n\tfor i in range(len(actual)):\n\t\tmat[int(round(predicted[i])),actual[i]] += 1\n\treturn mat\n\ndef getAccuracy(matrix):\n\t#sum(diag(mat))/(sum(mat))\n\tsumd = np.sum(np.diagonal(matrix))\n\tsumall = np.sum(matrix)\n\tsumall = np.add(sumall,0.00000001)\n\treturn sumd/sumall\n\ndef getPrecision(matrix):\n\t#diag(mat) / rowSum(mat)\n\tsumrow = np.sum(matrix,axis=1)\n\tsumrow = np.add(sumrow,0.00000001)\n\tprecision = np.divide(np.diagonal(matrix),sumrow)\n\treturn np.sum(precision)/precision.shape[0]\n\ndef getRecall(matrix):\n\t#diag(mat) / colsum(mat)\n\tsumcol = np.sum(matrix,axis=0)\n\tsumcol = np.add(sumcol,0.00000001)\n\trecall = np.divide(np.diagonal(matrix),sumcol)\n\treturn np.sum(recall)/recall.shape[0]\n\ndef get2f(matrix):\n\t#2*precision*recall/(precision+recall)\n\tprecision = getPrecision(matrix)\n\trecall = getRecall(matrix)\n\treturn (2*precision*recall)/(precision+recall)\n\n\nif __name__ == '__main__':\n\tmain()" ]
[ [ "numpy.diagonal", "numpy.add", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RyutaroHashimoto/aws_sagemaker
[ "fabe4727498c1f2807cda29df8d35c71cc1b27bd" ]
[ "2_training/Original_Container/tabnet/container/model/predictor.py" ]
[ "from __future__ import print_function\n\nimport json\nimport os\nfrom io import StringIO\n\nimport flask\nimport pandas as pd\nimport torch\nfrom model import TabNet_Regressor as model\nfrom pytorch_tabnet.tab_model import TabNetRegressor as original\n\nprefix = '/opt/ml/'\nmodel_path = os.path.join(prefix, 'model')\n\n\n# A singleton for holding the model. This simply loads the model and holds it.\n# It has a predict function that does a prediction based on the model and the input data.\n\nclass ScoringService(object):\n model = None # Where we keep the model when it's loaded\n\n @classmethod\n def get_model(cls):\n \"\"\"Get the model object for this instance, loading it if it's not already loaded.\"\"\"\n if cls.model == None:\n cls.model = model()\n cls.model.model = original()\n cls.model.load_model(os.path.join(model_path, 'model.pt'))\n if torch.cuda.is_available():\n cls.model.model.device = 'cuda'\n else:\n cls.model.model.device = 'cpu'\n\n return cls.model\n\n @classmethod\n def predict(cls, input):\n \"\"\"For the input, do the predictions and return them.\"\"\"\n\n clf = cls.get_model()\n\n return clf.predict(input)\n\n# The flask app for serving predictions\napp = flask.Flask(__name__)\n\[email protected]('/ping', methods=['GET'])\ndef ping():\n \"\"\"Determine if the container is working and healthy. In this sample container, we declare\n it healthy if we can load the model successfully.\"\"\"\n health = ScoringService.get_model() is not None # You can insert a health check here\n\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')\n\[email protected]('/invocations', methods=['POST'])\ndef transformation():\n \"\"\"Do an inference on a single batch of data. In this sample server, we take data as CSV, convert\n it to a pandas data frame for internal use and then convert the predictions back to CSV (which really\n just means one prediction per line, since there's a single column.\n \"\"\"\n data = None\n\n # Convert from CSV to pandas\n if flask.request.content_type == 'text/csv':\n data = flask.request.data.decode('utf-8')\n s = StringIO(data)\n data = pd.read_csv(s, header=None)\n elif flask.request.content_type == 'application/json':\n res = flask.request.data.decode('utf-8')\n data = pd.read_json(json.loads(res), orient='split')\n # return flask.Response(response=res, status=415, mimetype='application/json')\n else:\n res = {\n \"Error\":\n 'This predictor only supports CSV data or json'\n }\n return flask.Response(response=json.dumps(res), status=415, mimetype='application/json')\n\n print('Invoked with {} records'.format(data.shape[0]))\n\n # Do the prediction\n predictions = ScoringService.predict(data)\n\n if flask.request.content_type == 'text/csv':\n # Convert from numpy to CSV\n out = StringIO()\n pd.DataFrame(predictions).to_csv(out, header=False, index=False)\n response = out.getvalue()\n return flask.Response(response=response, status=200, mimetype='text/csv')\n elif flask.request.content_type == 'application/json':\n # Convert from numpy to JSON\n response = json.dumps({\n \"predictions\": predictions.tolist()\n })\n return flask.Response(response=response, status=200, mimetype='application/json')\n" ]
[ [ "pandas.read_csv", "torch.cuda.is_available", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
dom-s/cnn-text-classification-tf
[ "8cc93f06a76ddf6201f56938f3bff21b8e45c497" ]
[ "train.py" ]
[ "#! /usr/bin/env python\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport time\nimport datetime\nimport data_helpers\nfrom text_cnn import TextCNN\nfrom tensorflow.contrib import learn\n\n# Parameters\n# ==================================================\n\n# Data loading params\ntf.flags.DEFINE_float(\"dev_sample_percentage\", .1, \"Percentage of the training data to use for validation\")\ntf.flags.DEFINE_string(\"positive_data_file\", \"./data/rt-polaritydata/rt-polarity.pos\", \"Data source for the positive data.\")\ntf.flags.DEFINE_string(\"negative_data_file\", \"./data/rt-polaritydata/rt-polarity.neg\", \"Data source for the negative data.\")\ntf.flags.DEFINE_integer(\"min_freq\", 0, \"Minimum frequency when creating dictionary\")\n\n# Model Hyperparameters\ntf.flags.DEFINE_integer(\"embedding_dim\", 128, \"Dimensionality of character embedding (default: 128)\")\ntf.flags.DEFINE_string(\"filter_sizes\", \"3,4,5\", \"Comma-separated filter sizes (default: '3,4,5')\")\ntf.flags.DEFINE_integer(\"num_filters\", 128, \"Number of filters per filter size (default: 128)\")\ntf.flags.DEFINE_float(\"dropout_keep_prob\", 0.5, \"Dropout keep probability (default: 0.5)\")\ntf.flags.DEFINE_float(\"l2_reg_lambda\", 0.0, \"L2 regularization lambda (default: 0.0)\")\n\n# Training parameters\ntf.flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\ntf.flags.DEFINE_integer(\"num_epochs\", 200, \"Number of training epochs (default: 200)\")\ntf.flags.DEFINE_integer(\"evaluate_every\", 100, \"Evaluate model on dev set after this many steps (default: 100)\")\n# tf.flags.DEFINE_integer(\"checkpoint_every\", 100, \"Save model after this many steps (default: 100)\")\ntf.flags.DEFINE_integer(\"num_checkpoints\", 5, \"Number of checkpoints to store (default: 5)\")\n# Misc Parameters\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True, \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False, \"Log placement of ops on devices\")\n\nFLAGS = tf.flags.FLAGS\n# FLAGS._parse_flags()\n# print(\"\\nParameters:\")\n# for attr, value in sorted(FLAGS.__flags.items()):\n# print(\"{}={}\".format(attr.upper(), value))\n# print(\"\")\n\ndef preprocess():\n # Data Preparation\n # ==================================================\n\n # Load data\n print(\"Loading data...\")\n x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)\n\n # Build vocabulary\n max_document_length = max([len(x.split(\" \")) for x in x_text])\n vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length, min_frequency=FLAGS.min_freq)\n x = np.array(list(vocab_processor.fit_transform(x_text)))\n\n # Randomly shuffle data\n np.random.seed(10)\n shuffle_indices = np.random.permutation(np.arange(len(y)))\n x_shuffled = x[shuffle_indices]\n y_shuffled = y[shuffle_indices]\n\n # Split train/test set\n # TODO: This is very crude, should use cross-validation\n dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))\n x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]\n y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]\n\n del x, y, x_shuffled, y_shuffled\n\n print(\"Vocabulary Size: {:d}\".format(len(vocab_processor.vocabulary_)))\n print(\"Train/Dev split: {:d}/{:d}\".format(len(y_train), len(y_dev)))\n return x_train, y_train, vocab_processor, x_dev, y_dev\n\ndef train(x_train, y_train, vocab_processor, x_dev, y_dev):\n # Training\n # ==================================================\n\n with tf.Graph().as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n cnn = TextCNN(\n sequence_length=x_train.shape[1],\n num_classes=y_train.shape[1],\n vocab_size=len(vocab_processor.vocabulary_),\n embedding_size=FLAGS.embedding_dim,\n filter_sizes=list(map(int, FLAGS.filter_sizes.split(\",\"))),\n num_filters=FLAGS.num_filters,\n l2_reg_lambda=FLAGS.l2_reg_lambda)\n\n # Define Training procedure\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(1e-3)\n grads_and_vars = optimizer.compute_gradients(cnn.loss)\n train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)\n\n # Keep track of gradient values and sparsity (optional)\n grad_summaries = []\n for g, v in grads_and_vars:\n if g is not None:\n grad_hist_summary = tf.summary.histogram(\"{}/grad/hist\".format(v.name), g)\n sparsity_summary = tf.summary.scalar(\"{}/grad/sparsity\".format(v.name), tf.nn.zero_fraction(g))\n grad_summaries.append(grad_hist_summary)\n grad_summaries.append(sparsity_summary)\n grad_summaries_merged = tf.summary.merge(grad_summaries)\n\n # Output directory for models and summaries\n timestamp = str(int(time.time()))\n out_dir = os.path.abspath(os.path.join(os.path.curdir, \"runs\", timestamp))\n print(\"Writing to {}\\n\".format(out_dir))\n\n # Summaries for loss and accuracy\n # loss_summary = tf.summary.scalar(\"loss\", cnn.loss)\n # acc_summary = tf.summary.scalar(\"accuracy\", cnn.accuracy)\n\n # Train Summaries\n # train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])\n summary_test = tf.Summary()\n train_summary_dir = os.path.join(out_dir, \"summaries\", \"train\")\n train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n\n # Dev summaries\n # dev_summary_op = tf.summary.merge([loss_summary, acc_summary])\n batch_summary_dev = tf.Summary()\n dev_summary_dir = os.path.join(out_dir, \"summaries\", \"dev\")\n dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)\n\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)\n\n # Write vocabulary\n vocab_processor.save(os.path.join(out_dir, \"vocab\"))\n\n # Initialize all variables\n sess.run(tf.global_variables_initializer())\n\n def train_step(x_batch, y_batch):\n \"\"\"\n A single training step\n \"\"\"\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, loss, accuracy = sess.run(\n [train_op, global_step, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n # train_summary_writer.add_summary(summaries, step)\n if step % 5 == 0:\n summary_test.value.add(tag=\"accuracy\", simple_value=accuracy)\n summary_test.value.add(tag=\"loss\", simple_value=loss)\n train_summary_writer.add_summary(summary_test, step)\n\n def dev_step(x_batch, y_batch, current_step, writer=None):\n \"\"\"\n Evaluates model on a dev set\n \"\"\"\n dev_batches = data_helpers.batch_iter(\n list(zip(x_batch, y_batch)), FLAGS.batch_size, 1)\n total_losses = np.array([], dtype=\"float32\")\n total_predictions = np.array([], dtype=int)\n counter = 0\n for dev_batch in dev_batches:\n counter += 1\n dev_x_batch, dev_y_batch = zip(*dev_batch)\n feed_dict = {\n cnn.input_x: dev_x_batch,\n cnn.input_y: dev_y_batch,\n cnn.dropout_keep_prob: 1.0\n }\n loss_vals, dev_predictions = sess.run(\n [cnn.loss, cnn.predictions], feed_dict)\n total_losses = np.hstack((total_losses, loss_vals))\n total_predictions = np.concatenate((total_predictions, dev_predictions), axis=0)\n loss_tensor = tf.reduce_mean(total_losses)\n correct_predictions = tf.equal(total_predictions, tf.argmax(y_batch, 1))\n accuracy_tensor = tf.reduce_mean(tf.cast(correct_predictions, \"float\"))\n time_str = datetime.datetime.now().isoformat()\n loss, accuracy = sess.run(\n [loss_tensor, accuracy_tensor])\n\n batch_summary_dev.value.add(tag=\"accuracy\", simple_value=accuracy)\n batch_summary_dev.value.add(tag=\"loss\", simple_value=loss)\n\n if writer:\n writer.add_summary(batch_summary_dev, current_step)\n\n print(\"{}: step {}, loss: {:g}, acc: {:g}\".format(time_str, current_step, loss, accuracy))\n\n return accuracy\n\n max_accuracy = 0.0\n\n # Generate batches\n batches = data_helpers.batch_iter(\n list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)\n # Training loop. For each batch...\n for batch in batches:\n x_batch, y_batch = zip(*batch)\n train_step(x_batch, y_batch)\n current_step = tf.train.global_step(sess, global_step)\n if current_step % FLAGS.evaluate_every == 0:\n print(\"\\nEvaluation:\")\n accuracy = dev_step(x_dev, y_dev, current_step, writer=dev_summary_writer)\n print(\"\")\n if accuracy > max_accuracy:\n max_accuracy = accuracy\n path = saver.save(sess, checkpoint_prefix, global_step=current_step)\n print(\"Saved model checkpoint to {}\\n\".format(path))\n\ndef main(argv=None):\n x_train, y_train, vocab_processor, x_dev, y_dev = preprocess()\n train(x_train, y_train, vocab_processor, x_dev, y_dev)\n\nif __name__ == '__main__':\n tf.app.run()" ]
[ [ "tensorflow.train.global_step", "tensorflow.global_variables", "tensorflow.cast", "numpy.concatenate", "tensorflow.train.AdamOptimizer", "tensorflow.flags.DEFINE_float", "numpy.hstack", "tensorflow.Graph", "tensorflow.Variable", "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.Summary", "tensorflow.argmax", "tensorflow.app.run", "tensorflow.flags.DEFINE_boolean", "tensorflow.global_variables_initializer", "numpy.array", "tensorflow.summary.merge", "tensorflow.flags.DEFINE_integer", "tensorflow.summary.FileWriter", "numpy.random.seed", "tensorflow.reduce_mean", "tensorflow.flags.DEFINE_string", "tensorflow.nn.zero_fraction", "tensorflow.contrib.learn.preprocessing.VocabularyProcessor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
psorus/oan
[ "7d1b05e58e3ae470aab04bc806764f52d20b9b5b" ]
[ "oneoff_test/oneoff/auc.py" ]
[ "import numpy as np\n\n\nf=np.load(\"roc.npz\")\nfpr,tpr=f[\"fpr\"],f[\"tpr\"]\n\n\ndef iterdual(q):\n las=None\n for zw in q:\n if not las is None:\n yield las,zw\n las=zw\ndef iterdelta(q):\n for a,b in iterdual(q):\n yield a-b\ndef itermean(q):\n for a,b in iterdual(q):\n yield (a+b)/2\n\ndef integrate(x,y):\n ret=0.0\n for xx,yy in zip(iterdelta(x),itermean(y)):\n ret+=xx*yy\n return ret\n \n\n\n#print(fpr.shape,tpr.shape)\n\n\nauc=integrate(fpr,tpr)\n\nprint(\"AUC: \",auc)\n\nprint(\"1/(1-AUC):\",1/(1-auc))\n\n\n\n\n\n\n\n" ]
[ [ "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
no-brainer/asr-homework
[ "5fafd51ea2ca90da54dad335dd8fd3c44f6f97b5" ]
[ "hw_asr/model/crnn.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\nfrom hw_asr.base import BaseModel\nfrom hw_asr.model.utils import get_same_padding\n\n\nclass ResidualBlock(nn.Module):\n # https://arxiv.org/pdf/1603.05027.pdf\n\n def __init__(self, in_channels, out_channels, kernel, stride, dropout, n_feats):\n super(ResidualBlock, self).__init__()\n\n padding = get_same_padding(kernel)\n\n curr_channels = in_channels\n layers = []\n for _ in range(2):\n layers.extend([\n nn.LayerNorm(n_feats),\n nn.GELU(),\n nn.Conv2d(curr_channels, out_channels, kernel, stride, padding=padding),\n nn.Dropout(dropout),\n ])\n curr_channels = out_channels\n\n self.net = nn.Sequential(*layers)\n\n def forward(self, x):\n return x + self.net(x)\n\n\nclass CRNN(BaseModel):\n\n def __init__(\n self,\n n_feats,\n n_class,\n n_cnn_layers,\n n_rnn_layers,\n rnn_dim,\n stride=2,\n dropout=0.1,\n *args,\n **kwargs\n ):\n super(CRNN, self).__init__(n_feats, n_class, *args, **kwargs)\n n_feats = n_feats // 2\n # cnn for extracting hierarchic features\n self.cnn = nn.Conv2d(1, 32, kernel_size=3, stride=stride, padding=get_same_padding(3))\n\n # n residual cnn layers with filter size of 32\n self.rescnn_layers = nn.Sequential(*[\n ResidualBlock(32, 32, kernel=3, stride=1, dropout=dropout, n_feats=n_feats)\n for _ in range(n_cnn_layers)\n ])\n self.fully_connected = nn.Sequential(\n nn.Linear(n_feats * 32, rnn_dim),\n nn.GELU(),\n )\n self.birnn_layers = nn.LSTM(rnn_dim, hidden_size=rnn_dim, bidirectional=True,\n num_layers=n_rnn_layers, batch_first=True)\n self.classifier = nn.Sequential(\n nn.Linear(2 * rnn_dim, rnn_dim),\n nn.GELU(),\n nn.Dropout(dropout),\n nn.Linear(rnn_dim, n_class)\n )\n\n def forward(self, spectrogram, *args, **kwargs):\n # initially spectrogram is (batch, time, feature)\n spectrogram = spectrogram.unsqueeze(1)\n out = self.cnn(spectrogram)\n out = self.rescnn_layers(out)\n\n out = out.transpose(1, 2).contiguous() # (batch, channels, time, feats) -> (batch, time, channels, feats)\n sizes = out.size()\n out = out.view(sizes[0], sizes[1], sizes[2] * sizes[3]) # (batch, time, feats)\n\n out = self.fully_connected(out)\n out, _ = self.birnn_layers(out)\n\n return self.classifier(out)\n\n def transform_input_lengths(self, input_lengths):\n return input_lengths // 2\n" ]
[ [ "torch.nn.Sequential", "torch.nn.GELU", "torch.nn.Dropout", "torch.nn.LSTM", "torch.nn.Conv2d", "torch.nn.LayerNorm", "torch.nn.Linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Yuu94/bert-ja-maruchi-classification
[ "2ce88be548dc796c73835140b3c214f851f17e0b" ]
[ "run_classifier_livedoor.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport csv\nimport os\nimport modeling\nimport optimization\nimport tokenization\nimport tensorflow as tf\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"data_dir\", None,\n \"The input data dir. Should contain the .tsv files (or other data files) \"\n \"for the task.\")\n\nflags.DEFINE_string(\n \"bert_config_file\", None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\"task_name\", None, \"The name of the task to train.\")\n\nflags.DEFINE_string(\"vocab_file\", None,\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_string(\n \"output_dir\", None,\n \"The output directory where the model checkpoints will be written.\")\n\n## Other parameters\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_bool(\n \"do_predict\", False,\n \"Whether to run the model in inference mode on the test set.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"eval_batch_size\", 8, \"Total batch size for eval.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8, \"Total batch size for predict.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_float(\"num_train_epochs\", 3.0,\n \"Total number of training epochs to perform.\")\n\nflags.DEFINE_float(\n \"warmup_proportion\", 0.1,\n \"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10% of training.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\ntf.flags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass PaddingInputExample(object):\n \"\"\"Fake example so the num input examples is a multiple of the batch size.\n\n When running eval/predict on the TPU, we need to pad the number of examples\n to be a multiple of the batch size, because the TPU requires a fixed batch\n size. The alternative is to drop the last batch, which is bad because it means\n the entire output data won't be generated.\n\n We use this class instead of `None` because treating `None` as padding\n battches could cause silent errors.\n \"\"\"\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n input_ids,\n input_mask,\n segment_ids,\n label_id,\n is_real_example=True):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n self.is_real_example = is_real_example\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_test_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines\n\n\nclass XnliProcessor(DataProcessor):\n \"\"\"Processor for the XNLI data set.\"\"\"\n\n def __init__(self):\n self.language = \"zh\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n lines = self._read_tsv(\n os.path.join(data_dir, \"multinli\",\n \"multinli.train.%s.tsv\" % self.language))\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"train-%d\" % (i)\n text_a = tokenization.convert_to_unicode(line[0])\n text_b = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[2])\n if label == tokenization.convert_to_unicode(\"contradictory\"):\n label = tokenization.convert_to_unicode(\"contradiction\")\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\"))\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"dev-%d\" % (i)\n language = tokenization.convert_to_unicode(line[0])\n if language != tokenization.convert_to_unicode(self.language):\n continue\n text_a = tokenization.convert_to_unicode(line[6])\n text_b = tokenization.convert_to_unicode(line[7])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n\nclass MnliProcessor(DataProcessor):\n \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")),\n \"dev_matched\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0]))\n text_a = tokenization.convert_to_unicode(line[8])\n text_b = tokenization.convert_to_unicode(line[9])\n if set_type == \"test\":\n label = \"contradiction\"\n else:\n label = tokenization.convert_to_unicode(line[-1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass MrpcProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[3])\n text_b = tokenization.convert_to_unicode(line[4])\n if set_type == \"test\":\n label = \"0\"\n else:\n label = tokenization.convert_to_unicode(line[0])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass ColaProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n # Only the test set has a header\n if set_type == \"test\" and i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n if set_type == \"test\":\n text_a = tokenization.convert_to_unicode(line[1])\n label = \"0\"\n else:\n text_a = tokenization.convert_to_unicode(line[3])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\nclass LivedoorProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[3])\n if set_type == \"test\":\n label = \"0\"\n else:\n label = tokenization.convert_to_unicode(line[1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\ndef convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n is_real_example=True)\n return feature\n\n\ndef file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()\n\n\ndef file_based_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([], tf.int64),\n \"is_real_example\": tf.FixedLenFeature([], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)\n\n\ndef model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits, is_real_example):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions, weights=is_real_example)\n loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn,\n [per_example_loss, label_ids, logits, is_real_example])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn\n\n\n# This function is not used by this file but is still used by the Colab and\n# people who depend on it.\ndef input_fn_builder(features, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn\n\n\n# This function is not used by this file but is still used by the Colab and\n# people who depend on it.\ndef convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n features.append(feature)\n return features\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n processors = {\n \"cola\": ColaProcessor,\n \"mnli\": MnliProcessor,\n \"mrpc\": MrpcProcessor,\n \"xnli\": XnliProcessor,\n \"livedoor\": LivedoorProcessor, # 実行時に呼び出すtask_name : クラス名\n }\n\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:\n raise ValueError(\n \"At least one of `do_train`, `do_eval` or `do_predict' must be True.\")\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n task_name = FLAGS.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n processor = processors[task_name]()\n\n label_list = processor.get_labels()\n\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n train_examples = None\n num_train_steps = None\n num_warmup_steps = None\n if FLAGS.do_train:\n train_examples = processor.get_train_examples(FLAGS.data_dir)\n num_train_steps = int(\n len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n num_labels=len(label_list),\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)\n\n if FLAGS.do_train:\n train_file = os.path.join(FLAGS.output_dir, \"train.tf_record\")\n file_based_convert_examples_to_features(\n train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num examples = %d\", len(train_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", num_train_steps)\n train_input_fn = file_based_input_fn_builder(\n input_file=train_file,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True)\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n\n if FLAGS.do_eval:\n eval_examples = processor.get_dev_examples(FLAGS.data_dir)\n num_actual_eval_examples = len(eval_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on. These do NOT count towards the metric (all tf.metrics\n # support a per-instance weight, and these get a weight of 0.0).\n while len(eval_examples) % FLAGS.eval_batch_size != 0:\n eval_examples.append(PaddingInputExample())\n\n eval_file = os.path.join(FLAGS.output_dir, \"eval.tf_record\")\n file_based_convert_examples_to_features(\n eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)\n\n tf.logging.info(\"***** Running evaluation *****\")\n tf.logging.info(\" Num examples = %d (%d actual, %d padding)\",\n len(eval_examples), num_actual_eval_examples,\n len(eval_examples) - num_actual_eval_examples)\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\n\n # This tells the estimator to run through the entire set.\n eval_steps = None\n # However, if running eval on the TPU, you will need to specify the\n # number of steps.\n if FLAGS.use_tpu:\n assert len(eval_examples) % FLAGS.eval_batch_size == 0\n eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)\n\n eval_drop_remainder = True if FLAGS.use_tpu else False\n eval_input_fn = file_based_input_fn_builder(\n input_file=eval_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=eval_drop_remainder)\n\n result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)\n\n output_eval_file = os.path.join(FLAGS.output_dir, \"eval_results.txt\")\n with tf.gfile.GFile(output_eval_file, \"w\") as writer:\n tf.logging.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n tf.logging.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n if FLAGS.do_predict:\n predict_examples = processor.get_test_examples(FLAGS.data_dir)\n num_actual_predict_examples = len(predict_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on.\n while len(predict_examples) % FLAGS.predict_batch_size != 0:\n predict_examples.append(PaddingInputExample())\n\n predict_file = os.path.join(FLAGS.output_dir, \"predict.tf_record\")\n file_based_convert_examples_to_features(predict_examples, label_list,\n FLAGS.max_seq_length, tokenizer,\n predict_file)\n\n tf.logging.info(\"***** Running prediction*****\")\n tf.logging.info(\" Num examples = %d (%d actual, %d padding)\",\n len(predict_examples), num_actual_predict_examples,\n len(predict_examples) - num_actual_predict_examples)\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n predict_drop_remainder = True if FLAGS.use_tpu else False\n predict_input_fn = file_based_input_fn_builder(\n input_file=predict_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=predict_drop_remainder)\n\n result = estimator.predict(input_fn=predict_input_fn)\n\n output_predict_file = os.path.join(FLAGS.output_dir, \"test_results.tsv\")\n with tf.gfile.GFile(output_predict_file, \"w\") as writer:\n num_written_lines = 0\n tf.logging.info(\"***** Predict results *****\")\n for (i, prediction) in enumerate(result):\n probabilities = prediction[\"probabilities\"]\n if i >= num_actual_predict_examples:\n break\n output_line = \"\\t\".join(\n str(class_probability)\n for class_probability in probabilities) + \"\\n\"\n writer.write(output_line)\n num_written_lines += 1\n assert num_written_lines == num_actual_predict_examples\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"data_dir\")\n flags.mark_flag_as_required(\"task_name\")\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n" ]
[ [ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.metrics.accuracy", "tensorflow.FixedLenFeature", "tensorflow.nn.log_softmax", "tensorflow.reduce_sum", "tensorflow.gfile.GFile", "tensorflow.cast", "tensorflow.train.init_from_checkpoint", "tensorflow.gfile.MakeDirs", "tensorflow.to_int32", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.data.TFRecordDataset", "tensorflow.truncated_normal_initializer", "tensorflow.python_io.TFRecordWriter", "tensorflow.logging.set_verbosity", "tensorflow.trainable_variables", "tensorflow.parse_single_example", "tensorflow.argmax", "tensorflow.app.run", "tensorflow.nn.dropout", "tensorflow.metrics.mean", "tensorflow.matmul", "tensorflow.gfile.Open", "tensorflow.shape", "tensorflow.zeros_initializer", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.train.Features", "tensorflow.nn.bias_add", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.train.Scaffold", "tensorflow.reduce_mean", "tensorflow.flags.DEFINE_string", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
hcrlab/stretch_ros
[ "237e9e56d759aa4de71dc09b02ec28ff6bcb1559" ]
[ "stretch_funmap/src/stretch_funmap/numba_create_plane_image.py" ]
[ "import numpy as np\nfrom numba import njit\n\n\n@njit(fastmath=True)\ndef numba_create_plane_image(plane_parameters, image):\n image_height, image_width = image.shape\n alpha, beta, gamma = plane_parameters\n\n for y in range(image_height):\n for x in range(image_width):\n image[y, x] = (alpha * x) + (beta * y) + gamma\n\n\n@njit(fastmath=True)\ndef transform_original_to_corrected(plane_parameters, new_plane_height):\n # This does not account for clipping\n alpha, beta, gamma = plane_parameters\n\n transform = np.array([[1.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [-alpha, -beta, 1.0, new_plane_height - gamma],\n [0.0, 0.0, 0.0, 1.0]])\n return transform\n\n\n@njit(fastmath=True)\ndef transform_corrected_to_original(plane_parameters, new_plane_height):\n transform = transform_original_to_corrected(plane_parameters, new_plane_height)\n return np.linalg.inv(transform)\n\n\n@njit(fastmath=True)\ndef numba_correct_height_image(plane_parameters, height_image, new_plane_height):\n # Sets the plane described by the plane parameters to have a\n # constant height equal to plane_height. This does not perform a\n # true rotation and instead subtracts the plane from the current\n # heights. This is a good approximation for small angles, which\n # corresponds with planes that are close to horizontal (i.e.,\n # close to constant height).\n\n # Currently, this assumes that the height image has type uint8\n\n new_height_image = np.zeros_like(height_image)\n\n image_height, image_width = height_image.shape\n alpha, beta, gamma = plane_parameters\n\n for y in range(image_height):\n for x in range(image_width):\n height = height_image[y, x]\n if height != 0:\n plane_z = (alpha * x) + (beta * y) + gamma\n new_height = (height - plane_z) + new_plane_height\n new_height = np.round(new_height)\n\n # clip the height\n if new_height < 1:\n # If the max height is too low, set the pixel to\n # being unobserved, since the point would not have\n # been observed.\n new_height = 0\n if new_height > 255:\n # If the max height is too high, set the pixel to\n # the maximum height possible. This assumes that\n # it's likely that the maximum height is not an\n # isolated point at the given height. It can also\n # be considered a conservative choice in terms of\n # possible obstacles.\n new_height = 255\n\n # This should cast the value to a uint8\n new_height_image[y, x] = new_height\n\n transform = transform_original_to_corrected(plane_parameters, new_plane_height)\n\n return new_height_image, transform\n" ]
[ [ "numpy.linalg.inv", "numpy.round", "numpy.array", "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bcgov/wps
[ "71df0de72de9cd656dc9ebf8461ffe47cfb155f6" ]
[ "api/app/c_haines/severity_index.py" ]
[ "\"\"\" Logic pertaining to the generation of c_haines severity index from GDAL datasets.\n\"\"\"\nimport os\nimport io\nimport asyncio\nfrom datetime import datetime, timezone, timedelta\nfrom typing import Final, Tuple, Generator, Optional, List\nfrom contextlib import contextmanager\nimport tempfile\nimport logging\nimport json\nfrom osgeo import gdal, ogr\nimport numpy\nfrom pyproj import Transformer, Proj\nfrom shapely.ops import transform\nfrom shapely.geometry import shape, mapping\nfrom aiobotocore.client import AioBaseClient\nfrom app.utils.s3 import object_exists, object_exists_v2\nimport app.utils.time as time_utils\nfrom app.weather_models import ModelEnum, ProjectionEnum\nfrom app.geospatial import WGS84\nfrom app.weather_models.env_canada import (get_model_run_hours,\n get_file_date_part, adjust_model_day, download,\n UnhandledPredictionModelType)\nfrom app.utils.s3 import get_client\nfrom app.c_haines import get_severity_string\nfrom app.c_haines.c_haines_index import CHainesGenerator\nfrom app.c_haines import GDALData\nfrom app.c_haines.object_store import (ObjectTypeEnum, generate_full_object_store_path)\nfrom app.c_haines.kml import save_as_kml_to_s3\nfrom app import config\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_severity(c_haines_index) -> int:\n \"\"\" Return the \"severity\" of the continuous haines index.\n\n Fire behaviour analysts are typically only concerned if there's a high\n or extreme index - so the c-haines values are lumped together by severity.\n\n The severity used here is fairly arbitrary - there's no standard in place.\n \"\"\"\n # 0 - 4 : low\n if c_haines_index < 4:\n return 0\n # 4 - 8 : moderate\n if c_haines_index < 8:\n return 1\n # 8 - 11 : high\n if c_haines_index < 11:\n return 2\n # 11 + Extreme\n return 3\n\n\n@contextmanager\ndef open_gdal(filename_tmp_700: str,\n filename_tmp_850: str,\n filename_dew_850: str) -> Generator[GDALData, None, None]:\n \"\"\" Open gdal, and yield handy object containing all the data \"\"\"\n try:\n # Open the datasets.\n grib_tmp_700 = gdal.Open(filename_tmp_700, gdal.GA_ReadOnly)\n grib_tmp_850 = gdal.Open(filename_tmp_850, gdal.GA_ReadOnly)\n grib_dew_850 = gdal.Open(filename_dew_850, gdal.GA_ReadOnly)\n # Yield handy object.\n yield GDALData(grib_tmp_700, grib_tmp_850, grib_dew_850)\n finally:\n # Clean up memory.\n del grib_tmp_700, grib_tmp_850, grib_dew_850\n\n\ndef get_prediction_date(model_run_timestamp, hour) -> datetime:\n \"\"\" Construct the part of the filename that contains the model run date\n \"\"\"\n return model_run_timestamp + timedelta(hours=hour)\n\n\ndef model_prediction_hour_iterator(model: ModelEnum):\n \"\"\" Return a prediction hour iterator.\n Each model has a slightly different set of prediction hours. \"\"\"\n if model == ModelEnum.GDPS:\n # GDPS goes out real far, but in 3 hour intervals.\n for hour in range(0, 241, 3):\n yield hour\n elif model == ModelEnum.RDPS:\n # RDPS goes out 3 1/2 days.\n for hour in range(0, 85):\n yield hour\n elif model == ModelEnum.HRDPS:\n # HRDPS goes out 2 days.\n for hour in range(0, 49):\n yield hour\n else:\n raise UnhandledPredictionModelType()\n\n\ndef make_model_run_base_url(model: ModelEnum, model_run_start: str, forecast_hour: str):\n \"\"\" Return the base url for the grib file.\n The location of the files differs slightly for each model. \"\"\"\n if model == ModelEnum.GDPS:\n return \"https://dd.weather.gc.ca/model_gem_global/15km/grib2/lat_lon/\"\\\n f\"{model_run_start}/{forecast_hour}/\"\n if model == ModelEnum.RDPS:\n return f'https://dd.weather.gc.ca/model_gem_regional/10km/grib2/{model_run_start}/{forecast_hour}/'\n if model == ModelEnum.HRDPS:\n return f'https://dd.weather.gc.ca/model_hrdps/continental/grib2/{model_run_start}/{forecast_hour}/'\n raise UnhandledPredictionModelType()\n\n\ndef make_model_run_filename(\n model: ModelEnum, level: str, date: str, model_run_start: str, forecast_hour: str):\n \"\"\" Return the filename of the grib file.\n The filename for each model differs slightly. \"\"\"\n if model == ModelEnum.GDPS:\n return f'CMC_glb_{level}_latlon.15x.15_{date}{model_run_start}_P{forecast_hour}.grib2'\n if model == ModelEnum.RDPS:\n return f'CMC_reg_{level}_ps10km_{date}{model_run_start}_P{forecast_hour}.grib2'\n if model == ModelEnum.HRDPS:\n return f'CMC_hrdps_continental_{level}_ps2.5km_{date}{model_run_start}_P{forecast_hour}-00.grib2'\n raise UnhandledPredictionModelType()\n\n\ndef make_model_levels(model: ModelEnum):\n \"\"\" Return list of layers. (The layers are named slightly differently for HRDPS)\n TMP_ISBL_0700 : Temperature at 700mb.\n TMP_ISBL_0850 : Temperature at 850mb.\n DEPR_ISBL_0850 : Dew point depression at 850mb.\n \"\"\"\n if model == ModelEnum.HRDPS:\n return ['TMP_ISBL_0700', 'TMP_ISBL_0850', 'DEPR_ISBL_0850']\n return ['TMP_ISBL_700', 'TMP_ISBL_850', 'DEPR_ISBL_850']\n\n\ndef make_model_run_download_urls(model: ModelEnum,\n now: datetime,\n model_run_hour: int,\n prediction_hour: int) -> Tuple[dict, datetime, datetime]:\n \"\"\" Return urls to download model runs \"\"\"\n\n # hh: model run start, in UTC [00, 12]\n # hhh: prediction hour [000, 003, 006, ..., 240]\n levels: Final = make_model_levels(model)\n # pylint: disable=invalid-name\n hh = f'{model_run_hour:02d}'\n # For the global model, we have prediction at 3 hour intervals up to 240 hours.\n hhh = format(prediction_hour, '03d')\n\n base_url = make_model_run_base_url(model, hh, hhh)\n date = get_file_date_part(now, model_run_hour)\n\n adjusted_model_time = adjust_model_day(now, model_run_hour)\n model_run_timestamp = datetime(year=adjusted_model_time.year,\n month=adjusted_model_time.month,\n day=adjusted_model_time.day,\n hour=model_run_hour,\n tzinfo=timezone.utc)\n\n prediction_timestamp = get_prediction_date(model_run_timestamp, prediction_hour)\n urls = {}\n for level in levels:\n filename = make_model_run_filename(model, level, date, hh, hhh)\n urls[level] = base_url + filename\n\n return urls, model_run_timestamp, prediction_timestamp\n\n\nclass SourceInfo():\n \"\"\" Handy class to store source information in . \"\"\"\n\n def __init__(self, projection, geotransform, rows: int, cols: int):\n self.projection: str = projection\n self.geotransform = geotransform\n self.rows: int = rows\n self.cols: int = cols\n\n\ndef create_in_memory_band(data: numpy.ndarray, source_info: SourceInfo):\n \"\"\" Create an in memory data band to represent a single raster layer.\n See https://gdal.org/user/raster_data_model.html#raster-band for a complete\n description of what a raster band is.\n \"\"\"\n mem_driver = gdal.GetDriverByName('MEM')\n\n dataset = mem_driver.Create('memory', source_info.cols, source_info.rows, 1, gdal.GDT_Byte)\n dataset.SetProjection(source_info.projection)\n dataset.SetGeoTransform(source_info.geotransform)\n band = dataset.GetRasterBand(1)\n band.WriteArray(data)\n\n return dataset, band\n\n\ndef save_data_as_geojson(\n ch_data: numpy.ndarray,\n mask_data: numpy.ndarray,\n source_info: SourceInfo,\n target_filename: str):\n \"\"\" Save data as geojson polygon \"\"\"\n logger.info('Saving output as geojson %s...', target_filename)\n\n # Create data band.\n data_ds, data_band = create_in_memory_band(\n ch_data, source_info)\n\n # Create mask band.\n mask_ds, mask_band = create_in_memory_band(\n mask_data, source_info)\n\n # Create a GeoJSON layer.\n geojson_driver = ogr.GetDriverByName('GeoJSON')\n dst_ds = geojson_driver.CreateDataSource(target_filename)\n dst_layer = dst_ds.CreateLayer('C-Haines')\n field_name = ogr.FieldDefn(\"severity\", ogr.OFTInteger)\n field_name.SetWidth(24)\n dst_layer.CreateField(field_name)\n\n # Turn the rasters into polygons.\n gdal.Polygonize(data_band, mask_band, dst_layer, 0, [], callback=None)\n\n # Ensure that all data in the target dataset is written to disk.\n dst_ds.FlushCache()\n # Explicitly clean up (is this needed?)\n del dst_ds, data_ds, mask_ds\n\n\ndef generate_severity_data(c_haines_data):\n \"\"\" Generate severity index data, iterating over c-haines data.\n NOTE: Iterating to generate c-haines, and then iterating again to generate severity is a bit slower,\n but results in much cleaner code.\n \"\"\"\n logger.info('Generating c-haines severity index data.')\n severity_data = []\n mask_data = []\n for row in c_haines_data:\n severity_row = []\n mask_row = []\n for cell in row:\n severity = get_severity(cell)\n severity_row.append(severity)\n # We ignore severity 0.\n if severity == 0:\n mask_row.append(0)\n else:\n mask_row.append(1)\n severity_data.append(severity_row)\n mask_data.append(mask_row)\n return numpy.array(severity_data), numpy.array(mask_data)\n\n\nclass EnvCanadaPayload():\n \"\"\" Handy class to store payload information in . \"\"\"\n\n def __init__(self):\n self.filename_tmp_700: Optional[str] = None\n self.filename_tmp_850: Optional[str] = None\n self.filename_dew_850: Optional[str] = None\n self.model: Optional[ModelEnum] = None\n self.model_run_timestamp: Optional[datetime] = None\n self.prediction_timestamp: Optional[datetime] = None\n\n\ndef _save_data_as_geotiff(payload: EnvCanadaPayload, ch_data: numpy.ndarray, source_info: SourceInfo):\n \"\"\" This method exists for debug purposes only. It can be real useful to output raw GeoTIFF files.\n \"\"\"\n filename = f'./geotiff/{payload.model}_{payload.model_run_timestamp}_{payload.prediction_timestamp}.tiff'\n logger.info('creating %s', filename)\n target_ds = gdal.GetDriverByName('GTiff')\n out_raster = target_ds.Create(filename, source_info.cols, source_info.rows, 1, gdal.GDT_Byte)\n out_raster.SetGeoTransform(source_info.geotransform)\n outband = out_raster.GetRasterBand(1)\n outband.WriteArray(ch_data)\n\n out_raster.SetProjection(source_info.projection)\n outband.FlushCache()\n\n\ndef re_project_and_classify_geojson(source_json_filename: str,\n source_projection: str) -> dict:\n \"\"\" Given a geojson file in a specified projection\n - order by severity.\n - re-project to wgs84.\n - re-classify the \"severity index\" as a c_haines_index string.\n - return as a dictionary\n \"\"\"\n proj_from = Proj(projparams=source_projection)\n proj_to = Proj(WGS84)\n project = Transformer.from_proj(proj_from, proj_to, always_xy=True)\n with open(source_json_filename, encoding=\"utf-8\") as source_file:\n geojson_data = json.load(source_file)\n # We need to sort the geojson by severity.\n geojson_data['features'].sort(key=lambda feature: feature['properties']['severity'])\n # Iterate through features.\n for feature in geojson_data['features']:\n # Replace \"severity\" with c-haines.\n feature['properties'] = {\"c_haines_index\": get_severity_string(feature['properties']['severity'])}\n # Re-project to WGS84\n source_geometry = shape(feature['geometry'])\n geometry = transform(project.transform, source_geometry)\n geojson_geometry = mapping(geometry)\n feature['geometry']['coordinates'] = geojson_geometry['coordinates']\n return geojson_data\n\n\nasync def save_as_geojson_to_s3(source_json_filename: str,\n source_projection: str,\n prediction_model: ModelEnum,\n model_run_timestamp: datetime,\n prediction_timestamp: datetime):\n \"\"\" Given a geojson file, ensure it's in the correct projection and then store to S3 \"\"\"\n target_path = generate_full_object_store_path(\n prediction_model, model_run_timestamp, prediction_timestamp, ObjectTypeEnum.GEOJSON)\n # let's save some time, and check if the file doesn't already exists.\n # it's super important we do this, since there are many c-haines cronjobs running in dev, all\n # pointing to the same s3 bucket.\n async with get_client() as (client, bucket):\n if await object_exists(client, bucket, target_path):\n logger.info('json (%s) already exists - skipping', target_path)\n return\n\n # re-project the geojson file from whatever it was, to WGS84.\n re_projected_data = re_project_and_classify_geojson(source_json_filename, source_projection)\n\n with io.StringIO() as sio:\n json.dump(re_projected_data, sio)\n # smash it into binary\n sio.seek(0)\n bio = io.BytesIO(sio.read().encode('utf8'))\n # go back to start\n bio.seek(0)\n # smash it into the object store.\n logger.info('uploading %s', target_path)\n await client.put_object(Bucket=bucket, Key=target_path, Body=bio)\n\n\nclass CHainesSeverityGenerator():\n \"\"\" Class responsible for orchestrating the generation of Continous Haines severity\n index polygons.\n\n Steps for generation of severity level as follows:\n 1) Download grib files.\n 2) Iterate through raster rows, generating an in memory raster containing c-haines severity indices.\n 3) Turn raster data into polygons, storing in intermediary GeoJSON file.\n 4) Write polygons to database.\n \"\"\"\n\n def __init__(self, model: ModelEnum, projection: ProjectionEnum, client: AioBaseClient, bucket: str):\n self.model = model\n self.projection = projection\n self.c_haines_generator = CHainesGenerator()\n self.client: AioBaseClient = client\n self.bucket: str = bucket\n\n def _collect_payload(self,\n urls: dict,\n model: ModelEnum,\n model_run_timestamp: datetime,\n prediction_timestamp: datetime,\n temporary_path: str) -> Optional[EnvCanadaPayload]:\n \"\"\" Collect all the different things that make up our payload: our downloaded files,\n model run, and prediction timestamp. \"\"\"\n\n def _download_files(urls: dict,\n model: ModelEnum,\n temporary_path: str) -> Optional[List[str]]:\n \"\"\" Try to download all the files \"\"\"\n filenames = []\n for key in make_model_levels(model):\n # Try to download this file.\n # TODO: would be nice to make the file download async\n filename = download(urls[key], temporary_path)\n if not filename:\n # If we fail to download one of files, quit, don't try the others.\n logger.warning('failed to download %s', urls[key])\n return None\n filenames.append(filename)\n return filenames\n\n filenames = _download_files(urls, self.model, temporary_path)\n if filenames:\n filename_tmp_700, filename_tmp_850, filename_dew_850 = filenames\n payload = EnvCanadaPayload()\n payload.filename_tmp_700 = filename_tmp_700\n payload.filename_tmp_850 = filename_tmp_850\n payload.filename_dew_850 = filename_dew_850\n payload.model = model\n payload.model_run_timestamp = model_run_timestamp\n payload.prediction_timestamp = prediction_timestamp\n return payload\n return None\n\n # pylint: disable=no-self-use\n async def _assets_exist(self, model: ModelEnum,\n model_run_timestamp: datetime,\n prediction_timestamp: datetime) -> bool:\n \"\"\" Return True if kml and geojson assets already exist, otherwise False \"\"\"\n tasks = []\n kml_path = generate_full_object_store_path(\n model,\n model_run_timestamp,\n prediction_timestamp,\n ObjectTypeEnum.KML)\n tasks.append(asyncio.create_task(object_exists_v2(kml_path)))\n\n json_path = generate_full_object_store_path(\n model,\n model_run_timestamp,\n prediction_timestamp,\n ObjectTypeEnum.GEOJSON)\n tasks.append(asyncio.create_task(object_exists_v2(json_path)))\n\n if False in await asyncio.gather(*tasks):\n return False\n return True\n\n async def _get_payloads(self, temporary_path) -> Generator[EnvCanadaPayload, None, None]:\n \"\"\" Iterator that yields the next to process. \"\"\"\n utc_now = time_utils.get_utc_now()\n for model_hour in get_model_run_hours(self.model):\n for prediction_hour in model_prediction_hour_iterator(self.model):\n\n urls, model_run_timestamp, prediction_timestamp = make_model_run_download_urls(\n self.model, utc_now, model_hour, prediction_hour)\n\n # If the GeoJSON and the KML already exist, then we can skip this one.\n if await self._assets_exist(self.model,\n model_run_timestamp,\n prediction_timestamp):\n logger.info('%s: already processed %s-%s',\n self.model,\n model_run_timestamp, prediction_timestamp)\n continue\n\n payload = self._collect_payload(urls,\n self.model,\n model_run_timestamp,\n prediction_timestamp,\n temporary_path)\n if payload:\n yield payload\n else:\n # If you didn't get one of them - you probably won't get the rest either!\n logger.info('Failed to download one of the model files - skipping the rest')\n return\n\n def _generate_c_haines_data(\n self,\n payload: EnvCanadaPayload):\n\n # Open the grib files.\n with open_gdal(payload.filename_tmp_700,\n payload.filename_tmp_850,\n payload.filename_dew_850) as source_data:\n # Generate c_haines data\n c_haines_data = self.c_haines_generator.generate_c_haines(source_data)\n # Store the projection and geotransform for later.\n projection = source_data.grib_tmp_700.GetProjection()\n geotransform = source_data.grib_tmp_700.GetGeoTransform()\n # Store the dimensions for later.\n band = source_data.grib_tmp_700.GetRasterBand(1)\n rows = band.YSize\n cols = band.XSize\n # Package source info nicely.\n source_info = SourceInfo(projection=projection,\n geotransform=geotransform, rows=rows, cols=cols)\n\n return c_haines_data, source_info\n\n # pylint: disable=no-self-use\n async def _persist_severity_data(self,\n payload: EnvCanadaPayload,\n c_haines_severity_data: numpy.ndarray,\n c_haines_mask_data: numpy.ndarray,\n source_info: SourceInfo):\n with tempfile.TemporaryDirectory() as temporary_path:\n json_filename = os.path.join(os.getcwd(), temporary_path, 'c-haines.geojson')\n save_data_as_geojson(\n c_haines_severity_data,\n c_haines_mask_data,\n source_info,\n json_filename)\n\n tasks = []\n tasks.append(asyncio.create_task(\n save_as_kml_to_s3(json_filename, source_info.projection,\n payload.model,\n payload.model_run_timestamp, payload.prediction_timestamp)\n ))\n tasks.append(asyncio.create_task(\n save_as_geojson_to_s3(json_filename, source_info.projection,\n payload.model,\n payload.model_run_timestamp, payload.prediction_timestamp)\n ))\n await asyncio.gather(*tasks)\n\n async def generate(self):\n \"\"\" Entry point for generating and storing c-haines severity index. \"\"\"\n # Iterate through payloads that need processing.\n with tempfile.TemporaryDirectory() as temporary_path:\n async for payload in self._get_payloads(temporary_path):\n # Generate the c_haines data.\n c_haines_data, source_info = self._generate_c_haines_data(payload)\n if config.get('C_HAINES_OUTPUT_TIFF') == 'True':\n # Save as geotiff\n _save_data_as_geotiff(payload, numpy.array(c_haines_data), source_info)\n # Generate the severity index and mask data.\n c_haines_severity_data, c_haines_mask_data = generate_severity_data(c_haines_data)\n # We're done with the c_haines data, so we can clean up some memory.\n del c_haines_data\n # Save to s3.\n await self._persist_severity_data(payload,\n c_haines_severity_data,\n c_haines_mask_data,\n source_info)\n # Delete temporary files\n os.remove(payload.filename_dew_850)\n os.remove(payload.filename_tmp_700)\n os.remove(payload.filename_tmp_850)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MarkRivers/tomopy
[ "77e1b30d7a4cb86b6a779c50184c9483c527a534" ]
[ "source/tomopy/prep/alignment.py" ]
[ "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n# #########################################################################\r\n# Copyright (c) 2016-2019, UChicago Argonne, LLC. All rights reserved. #\r\n# #\r\n# Copyright 2016-2019. UChicago Argonne, LLC. This software was produced #\r\n# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #\r\n# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #\r\n# U.S. Department of Energy. The U.S. Government has rights to use, #\r\n# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #\r\n# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #\r\n# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #\r\n# modified to produce derivative works, such modified software should #\r\n# be clearly marked, so as not to confuse it with the version available #\r\n# from ANL. #\r\n# #\r\n# Additionally, redistribution and use in source and binary forms, with #\r\n# or without modification, are permitted provided that the following #\r\n# conditions are met: #\r\n# #\r\n# * Redistributions of source code must retain the above copyright #\r\n# notice, this list of conditions and the following disclaimer. #\r\n# #\r\n# * Redistributions in binary form must reproduce the above copyright #\r\n# notice, this list of conditions and the following disclaimer in #\r\n# the documentation and/or other materials provided with the #\r\n# distribution. #\r\n# #\r\n# * Neither the name of UChicago Argonne, LLC, Argonne National #\r\n# Laboratory, ANL, the U.S. Government, nor the names of its #\r\n# contributors may be used to endorse or promote products derived #\r\n# from this software without specific prior written permission. #\r\n# #\r\n# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #\r\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #\r\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #\r\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #\r\n# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #\r\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #\r\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #\r\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #\r\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #\r\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #\r\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #\r\n# POSSIBILITY OF SUCH DAMAGE. #\r\n# #########################################################################\r\n\r\nimport numpy as np\r\nimport concurrent.futures as cf\r\nimport tomopy.util.mproc as mproc\r\nimport logging\r\n\r\nfrom skimage import transform as tf\r\nfrom skimage.feature import register_translation\r\nfrom tomopy.recon.algorithm import recon\r\nfrom tomopy.sim.project import project\r\nfrom tomopy.misc.npmath import gauss1d, calc_affine_transform\r\nfrom tomopy.util.misc import write_tiff\r\nfrom scipy.signal import medfilt, medfilt2d\r\nfrom scipy.optimize import curve_fit\r\nfrom scipy.ndimage import affine_transform\r\nfrom scipy.ndimage import map_coordinates\r\nfrom collections import namedtuple\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\n__author__ = \"Doga Gursoy, Chen Zhang, Nghia Vo\"\r\n__copyright__ = \"Copyright (c) 2016-19, UChicago Argonne, LLC.\"\r\n__docformat__ = 'restructuredtext en'\r\n__all__ = ['align_seq',\r\n 'align_joint',\r\n 'scale',\r\n 'tilt',\r\n 'add_jitter',\r\n 'add_noise',\r\n 'blur_edges',\r\n 'shift_images',\r\n 'find_slits_corners_aps_1id',\r\n 'calc_slit_box_aps_1id',\r\n 'remove_slits_aps_1id',\r\n 'distortion_correction_proj',\r\n 'distortion_correction_sino',\r\n 'load_distortion_coefs', \r\n ]\r\n\r\n\r\ndef align_seq(\r\n prj, ang, fdir='.', iters=10, pad=(0, 0),\r\n blur=True, center=None, algorithm='sirt',\r\n upsample_factor=10, rin=0.5, rout=0.8,\r\n save=False, debug=True):\r\n \"\"\"\r\n Aligns the projection image stack using the sequential\r\n re-projection algorithm :cite:`Gursoy:17`.\r\n\r\n Parameters\r\n ----------\r\n prj : ndarray\r\n 3D stack of projection images. The first dimension\r\n is projection axis, second and third dimensions are\r\n the x- and y-axes of the projection image, respectively.\r\n ang : ndarray\r\n Projection angles in radians as an array.\r\n iters : scalar, optional\r\n Number of iterations of the algorithm.\r\n pad : list-like, optional\r\n Padding for projection images in x and y-axes.\r\n blur : bool, optional\r\n Blurs the edge of the image before registration.\r\n center: array, optional\r\n Location of rotation axis.\r\n algorithm : {str, function}\r\n One of the following string values.\r\n\r\n 'art'\r\n Algebraic reconstruction technique :cite:`Kak:98`.\r\n 'gridrec'\r\n Fourier grid reconstruction algorithm :cite:`Dowd:99`,\r\n :cite:`Rivers:06`.\r\n 'mlem'\r\n Maximum-likelihood expectation maximization algorithm\r\n :cite:`Dempster:77`.\r\n 'sirt'\r\n Simultaneous algebraic reconstruction technique.\r\n 'tv'\r\n Total Variation reconstruction technique\r\n :cite:`Chambolle:11`.\r\n 'grad'\r\n Gradient descent method with a constant step size\r\n\r\n upsample_factor : integer, optional\r\n The upsampling factor. Registration accuracy is\r\n inversely propotional to upsample_factor.\r\n rin : scalar, optional\r\n The inner radius of blur function. Pixels inside\r\n rin is set to one.\r\n rout : scalar, optional\r\n The outer radius of blur function. Pixels outside\r\n rout is set to zero.\r\n save : bool, optional\r\n Saves projections and corresponding reconstruction\r\n for each algorithm iteration.\r\n debug : book, optional\r\n Provides debugging info such as iterations and error.\r\n\r\n Returns\r\n -------\r\n ndarray\r\n 3D stack of projection images with jitter.\r\n ndarray\r\n Error array for each iteration.\r\n \"\"\"\r\n\r\n # Needs scaling for skimage float operations.\r\n prj, scl = scale(prj)\r\n\r\n # Shift arrays\r\n sx = np.zeros((prj.shape[0]))\r\n sy = np.zeros((prj.shape[0]))\r\n\r\n conv = np.zeros((iters))\r\n\r\n # Pad images.\r\n npad = ((0, 0), (pad[1], pad[1]), (pad[0], pad[0]))\r\n prj = np.pad(prj, npad, mode='constant', constant_values=0)\r\n\r\n # Register each image frame-by-frame.\r\n for n in range(iters):\r\n # Reconstruct image.\r\n rec = recon(prj, ang, center=center, algorithm=algorithm)\r\n\r\n # Re-project data and obtain simulated data.\r\n sim = project(rec, ang, center=center, pad=False)\r\n\r\n # Blur edges.\r\n if blur:\r\n _prj = blur_edges(prj, rin, rout)\r\n _sim = blur_edges(sim, rin, rout)\r\n else:\r\n _prj = prj\r\n _sim = sim\r\n\r\n # Initialize error matrix per iteration.\r\n err = np.zeros((prj.shape[0]))\r\n\r\n # For each projection\r\n for m in range(prj.shape[0]):\r\n\r\n # Register current projection in sub-pixel precision\r\n shift, error, diffphase = register_translation(\r\n _prj[m], _sim[m], upsample_factor)\r\n err[m] = np.sqrt(shift[0]*shift[0] + shift[1]*shift[1])\r\n sx[m] += shift[0]\r\n sy[m] += shift[1]\r\n\r\n # Register current image with the simulated one\r\n tform = tf.SimilarityTransform(translation=(shift[1], shift[0]))\r\n prj[m] = tf.warp(prj[m], tform, order=5)\r\n\r\n if debug:\r\n print('iter=' + str(n) + ', err=' + str(np.linalg.norm(err)))\r\n conv[n] = np.linalg.norm(err)\r\n\r\n if save:\r\n write_tiff(prj, fdir + '/tmp/iters/prj', n)\r\n write_tiff(sim, fdir + '/tmp/iters/sim', n)\r\n write_tiff(rec, fdir + '/tmp/iters/rec', n)\r\n\r\n # Re-normalize data\r\n prj *= scl\r\n return prj, sx, sy, conv\r\n\r\n\r\ndef align_joint(\r\n prj, ang, fdir='.', iters=10, pad=(0, 0),\r\n blur=True, center=None, algorithm='sirt',\r\n upsample_factor=10, rin=0.5, rout=0.8,\r\n save=False, debug=True):\r\n \"\"\"\r\n Aligns the projection image stack using the joint\r\n re-projection algorithm :cite:`Gursoy:17`.\r\n\r\n Parameters\r\n ----------\r\n prj : ndarray\r\n 3D stack of projection images. The first dimension\r\n is projection axis, second and third dimensions are\r\n the x- and y-axes of the projection image, respectively.\r\n ang : ndarray\r\n Projection angles in radians as an array.\r\n iters : scalar, optional\r\n Number of iterations of the algorithm.\r\n pad : list-like, optional\r\n Padding for projection images in x and y-axes.\r\n blur : bool, optional\r\n Blurs the edge of the image before registration.\r\n center: array, optional\r\n Location of rotation axis.\r\n algorithm : {str, function}\r\n One of the following string values.\r\n\r\n 'art'\r\n Algebraic reconstruction technique :cite:`Kak:98`.\r\n 'gridrec'\r\n Fourier grid reconstruction algorithm :cite:`Dowd:99`,\r\n :cite:`Rivers:06`.\r\n 'mlem'\r\n Maximum-likelihood expectation maximization algorithm\r\n :cite:`Dempster:77`.\r\n 'sirt'\r\n Simultaneous algebraic reconstruction technique.\r\n 'tv'\r\n Total Variation reconstruction technique\r\n :cite:`Chambolle:11`.\r\n 'grad'\r\n Gradient descent method with a constant step size\r\n\r\n upsample_factor : integer, optional\r\n The upsampling factor. Registration accuracy is\r\n inversely propotional to upsample_factor.\r\n rin : scalar, optional\r\n The inner radius of blur function. Pixels inside\r\n rin is set to one.\r\n rout : scalar, optional\r\n The outer radius of blur function. Pixels outside\r\n rout is set to zero.\r\n save : bool, optional\r\n Saves projections and corresponding reconstruction\r\n for each algorithm iteration.\r\n debug : book, optional\r\n Provides debugging info such as iterations and error.\r\n\r\n Returns\r\n -------\r\n ndarray\r\n 3D stack of projection images with jitter.\r\n ndarray\r\n Error array for each iteration.\r\n \"\"\"\r\n\r\n # Needs scaling for skimage float operations.\r\n prj, scl = scale(prj)\r\n\r\n # Shift arrays\r\n sx = np.zeros((prj.shape[0]))\r\n sy = np.zeros((prj.shape[0]))\r\n\r\n conv = np.zeros((iters))\r\n\r\n # Pad images.\r\n npad = ((0, 0), (pad[1], pad[1]), (pad[0], pad[0]))\r\n prj = np.pad(prj, npad, mode='constant', constant_values=0)\r\n\r\n # Initialization of reconstruction.\r\n rec = 1e-12 * np.ones((prj.shape[1], prj.shape[2], prj.shape[2]))\r\n\r\n # Register each image frame-by-frame.\r\n for n in range(iters):\r\n\r\n if np.mod(n, 1) == 0:\r\n _rec = rec\r\n\r\n # Reconstruct image.\r\n rec = recon(prj, ang, center=center, algorithm=algorithm,\r\n num_iter=1, init_recon=_rec)\r\n\r\n # Re-project data and obtain simulated data.\r\n sim = project(rec, ang, center=center, pad=False)\r\n\r\n # Blur edges.\r\n if blur:\r\n _prj = blur_edges(prj, rin, rout)\r\n _sim = blur_edges(sim, rin, rout)\r\n else:\r\n _prj = prj\r\n _sim = sim\r\n\r\n # Initialize error matrix per iteration.\r\n err = np.zeros((prj.shape[0]))\r\n\r\n # For each projection\r\n for m in range(prj.shape[0]):\r\n\r\n # Register current projection in sub-pixel precision\r\n shift, error, diffphase = register_translation(\r\n _prj[m], _sim[m], upsample_factor)\r\n err[m] = np.sqrt(shift[0]*shift[0] + shift[1]*shift[1])\r\n sx[m] += shift[0]\r\n sy[m] += shift[1]\r\n\r\n # Register current image with the simulated one\r\n tform = tf.SimilarityTransform(translation=(shift[1], shift[0]))\r\n prj[m] = tf.warp(prj[m], tform, order=5)\r\n\r\n if debug:\r\n print('iter=' + str(n) + ', err=' + str(np.linalg.norm(err)))\r\n conv[n] = np.linalg.norm(err)\r\n\r\n if save:\r\n write_tiff(prj, 'tmp/iters/prj', n)\r\n write_tiff(sim, 'tmp/iters/sim', n)\r\n write_tiff(rec, 'tmp/iters/rec', n)\r\n\r\n # Re-normalize data\r\n prj *= scl\r\n return prj, sx, sy, conv\r\n\r\n\r\ndef tilt(obj, rad=0, phi=0):\r\n \"\"\"\r\n Tilt object at a given angle from the rotation axis.\r\n\r\n Warning\r\n -------\r\n Not implemented yet.\r\n\r\n Parameters\r\n ----------\r\n obj : ndarray\r\n 3D discrete object.\r\n rad : scalar, optional\r\n Radius in polar cordinates to define tilt angle.\r\n The value is between 0 and 1, where 0 means no tilt\r\n and 1 means a tilt of 90 degrees. The tilt angle\r\n can be obtained by arcsin(rad).\r\n phi : scalar, optional\r\n Angle in degrees to define tilt direction from the\r\n rotation axis. 0 degree means rotation in sagittal\r\n plane and 90 degree means rotation in coronal plane.\r\n\r\n Returns\r\n -------\r\n ndarray\r\n Tilted 3D object.\r\n \"\"\"\r\n pass\r\n\r\n\r\ndef add_jitter(prj, low=0, high=1):\r\n \"\"\"\r\n Simulates jitter in projection images. The jitter\r\n is simulated by drawing random samples from a uniform\r\n distribution over the half-open interval [low, high).\r\n\r\n Parameters\r\n ----------\r\n prj : ndarray\r\n 3D stack of projection images. The first dimension\r\n is projection axis, second and third dimensions are\r\n the x- and y-axes of the projection image, respectively.\r\n low : float, optional\r\n Lower boundary of the output interval. All values\r\n generated will be greater than or equal to low. The\r\n default value is 0.\r\n high : float\r\n Upper boundary of the output interval. All values\r\n generated will be less than high. The default value\r\n is 1.0.\r\n\r\n Returns\r\n -------\r\n ndarray\r\n 3D stack of projection images with jitter.\r\n \"\"\"\r\n from skimage import transform as tf\r\n\r\n # Needs scaling for skimage float operations.\r\n prj, scl = scale(prj)\r\n\r\n # Random jitter parameters are drawn from uniform distribution.\r\n jitter = np.random.uniform(low, high, size=(prj.shape[0], 2))\r\n\r\n for m in range(prj.shape[0]):\r\n tform = tf.SimilarityTransform(translation=jitter[m])\r\n prj[m] = tf.warp(prj[m], tform, order=0)\r\n\r\n # Re-scale back to original values.\r\n prj *= scl\r\n return prj, jitter[:, 0], jitter[:, 1]\r\n\r\n\r\ndef add_noise(prj, ratio=0.05):\r\n \"\"\"\r\n Adds Gaussian noise with zero mean and a given standard\r\n deviation as a ratio of the maximum value in data.\r\n\r\n Parameters\r\n ----------\r\n prj : ndarray\r\n 3D stack of projection images. The first dimension\r\n is projection axis, second and third dimensions are\r\n the x- and y-axes of the projection image, respectively.\r\n ratio : float, optional\r\n Ratio of the standard deviation of the Gaussian noise\r\n distribution to the maximum value in data.\r\n\r\n Returns\r\n -------\r\n ndarray\r\n 3D stack of projection images with added Gaussian noise.\r\n \"\"\"\r\n std = prj.max() * ratio\r\n noise = np.random.normal(0, std, size=prj.shape)\r\n return prj + noise.astype('float32')\r\n\r\n\r\ndef scale(prj):\r\n \"\"\"\r\n Linearly scales the projection images in the range\r\n between -1 and 1.\r\n\r\n Parameters\r\n ----------\r\n prj : ndarray\r\n 3D stack of projection images. The first dimension\r\n is projection axis, second and third dimensions are\r\n the x- and y-axes of the projection image, respectively.\r\n\r\n Returns\r\n -------\r\n ndarray\r\n Scaled 3D stack of projection images.\r\n \"\"\"\r\n scl = max(abs(prj.max()), abs(prj.min()))\r\n prj /= scl\r\n return prj, scl\r\n\r\n\r\ndef blur_edges(prj, low=0, high=0.8):\r\n \"\"\"\r\n Blurs the edge of the projection images.\r\n\r\n Parameters\r\n ----------\r\n prj : ndarray\r\n 3D stack of projection images. The first dimension\r\n is projection axis, second and third dimensions are\r\n the x- and y-axes of the projection image, respectively.\r\n low : scalar, optional\r\n Min ratio of the blurring frame to the image size.\r\n high : scalar, optional\r\n Max ratio of the blurring frame to the image size.\r\n\r\n Returns\r\n -------\r\n ndarray\r\n Edge-blurred 3D stack of projection images.\r\n \"\"\"\r\n _prj = prj.copy()\r\n dx, dy, dz = _prj.shape\r\n rows, cols = np.mgrid[:dy, :dz]\r\n rad = np.sqrt((rows - dy / 2)**2 + (cols - dz / 2)**2)\r\n mask = np.zeros((dy, dz))\r\n rmin, rmax = low * rad.max(), high * rad.max()\r\n mask[rad < rmin] = 1\r\n mask[rad > rmax] = 0\r\n zone = np.logical_and(rad >= rmin, rad <= rmax)\r\n mask[zone] = (rmax - rad[zone]) / (rmax - rmin)\r\n feathered = np.empty((dy, dz), dtype=np.uint8)\r\n _prj *= mask\r\n return _prj\r\n\r\n\r\ndef shift_images(prj, sx, sy):\r\n \"\"\"\r\n Shift projections images for a given set of shift\r\n values in horizontal and vertical directions.\r\n \"\"\"\r\n\r\n from skimage import transform as tf\r\n\r\n # Needs scaling for skimage float operations.\r\n prj, scl = scale(prj)\r\n\r\n # For each projection\r\n for m in range(prj.shape[0]):\r\n tform = tf.SimilarityTransform(translation=(sy[m], sx[m]))\r\n prj[m] = tf.warp(prj[m], tform, order=5)\r\n\r\n # Re-normalize data\r\n prj *= scl\r\n\r\n return prj\r\n\r\n\r\ndef find_slits_corners_aps_1id(img,\r\n method='quadrant+',\r\n medfilt2_kernel_size=3,\r\n medfilt_kernel_size=23,\r\n ):\r\n \"\"\"\r\n Automatically locate the slit box location by its four corners.\r\n\r\n NOTE:\r\n The four slits that form a binding box is the current setup at aps_1id,\r\n which reduce the illuminated region on the detector. Since the slits are\r\n stationary, they can serve as a reference to check detector drifting\r\n during the scan. Technically, the four slits should be used to find\r\n the transformation matrix (not necessarily affine) to correct the image.\r\n However, since we are dealing with 2D images with very little distortion,\r\n affine transformation matrices were used for approximation. Therefore\r\n the \"four corners\" are used instead of all four slits.\r\n\r\n Parameters\r\n ----------\r\n img : np.ndarray\r\n 2D images\r\n method : str, ['simple', 'quadrant', 'quadrant+'], optional\r\n method for auto detecting slit corners\r\n - simple :: assume a rectange slit box, fast but less accurate\r\n (1 pixel precision)\r\n - quadrant :: subdivide the image into four quandrant, then use\r\n an explicit method to find the corner\r\n (1 pixel precision)\r\n - quadrant+ :: similar to quadrant, but use curve_fit (gauss1d) to\r\n find the corner\r\n (0.1 pixel precision)\r\n medfilt2_kernel_size : int, optional\r\n 2D median filter kernel size for noise reduction\r\n medfilt_kernel_size : int, optional\r\n 1D median filter kernel size for noise reduction\r\n\r\n Returns\r\n -------\r\n tuple\r\n autodetected slit corners (counter-clockwise order)\r\n (upperLeft, lowerLeft, lowerRight, upperRight)\r\n \"\"\"\r\n img = medfilt2d(np.log(img.astype(np.float64)),\r\n kernel_size=medfilt2_kernel_size,\r\n )\r\n rows, cols = img.shape\r\n\r\n # simple method is simple, therefore it stands out\r\n if method.lower() == 'simple':\r\n # assuming a rectangle type slit box\r\n col_std = medfilt(np.std(img, axis=0), kernel_size=medfilt_kernel_size)\r\n row_std = medfilt(np.std(img, axis=1), kernel_size=medfilt_kernel_size)\r\n # NOTE: in the tiff img\r\n # x is col index, y is the row index ==> key point here !!!\r\n # img slicing is doen with img[row_idx, col_idx]\r\n # ==> so the image idx and corner position are FLIPPED!\r\n _left = np.argmax(np.gradient(col_std))\r\n _right = np.argmin(np.gradient(col_std))\r\n _top = np.argmax(np.gradient(row_std))\r\n _bottom = np.argmin(np.gradient(row_std))\r\n\r\n cnrs = np.array([[_left, _top],\r\n [_left, _bottom],\r\n [_right, _bottom],\r\n [_right, _top],\r\n ])\r\n else:\r\n # predefine all quadrants\r\n # Here let's assume that the four corners of the slit box are in the\r\n # four quadrant defined by the center of the image\r\n # i.e.\r\n # uppper left quadrant: img[0 :cnt[1], 0 :cnt[0]] => quadarnt origin = (0, 0)\r\n # lower left quadrant: img[cnt[1]: , 0 :cnt[0]] => quadarnt origin = (cnt[0], 0)\r\n # lower right quadrant: img[cnt[1]: , cnt[0]: ] => quadarnt origin = (cnt[0], cnt[1])\r\n # upper right quadrant: img[0 :cnt[1], cnt[0]: ] => quadarnt\r\n # origin = (0, cnt[1])\r\n # center of image that defines FOUR quadrants\r\n cnt = [int(cols / 2), int(rows / 2)]\r\n Quadrant = namedtuple('Quadrant', 'img col_func, row_func')\r\n quadrants = [Quadrant(img=img[0:cnt[1], 0:cnt[0]], col_func=np.argmax, row_func=np.argmax), # upper left, 1st quadrant\r\n # lower left, 2nd quadrant\r\n Quadrant(img=img[cnt[1]:, 0:cnt[0]],\r\n col_func=np.argmax, row_func=np.argmin),\r\n # lower right, 3rd quadrant\r\n Quadrant(img=img[cnt[1]:, cnt[0]:],\r\n col_func=np.argmin, row_func=np.argmin),\r\n # upper right, 4th quadrant\r\n Quadrant(img=img[0:cnt[0], cnt[1]:],\r\n col_func=np.argmin, row_func=np.argmax),\r\n ]\r\n # the origin in each quadrants ==> easier to set it here\r\n quadrantorigins = np.array([[0, 0], # upper left, 1st quadrant\r\n [0, cnt[1]], # lower left, 2nd quadrant\r\n # lower right, 3rd quadrant\r\n [cnt[0], cnt[1]],\r\n [cnt[1], 0], # upper right, 4th quadrant\r\n ])\r\n # init four corners\r\n cnrs = np.zeros((4, 2))\r\n if method.lower() == 'quadrant':\r\n # the standard quadrant method\r\n for i, q in enumerate(quadrants):\r\n cnrs[i, :] = np.array([q.col_func(np.gradient(medfilt(np.std(q.img, axis=0), kernel_size=medfilt_kernel_size))), # x is col_idx\r\n q.row_func(\r\n np.gradient(\r\n medfilt(\r\n np.std(\r\n q.img,\r\n axis=1),\r\n kernel_size=medfilt_kernel_size))),\r\n # y is row_idx\r\n ])\r\n # add the origin offset back\r\n cnrs = cnrs + quadrantorigins\r\n elif method.lower() == 'quadrant+':\r\n # use Gaussian curve fitting to achive subpixel precision\r\n # TODO:\r\n # improve the curve fitting with Lorentz and Voigt fitting function\r\n for i, q in enumerate(quadrants):\r\n # -- find x subpixel position\r\n cnr_x_guess = q.col_func(\r\n np.gradient(\r\n medfilt(\r\n np.std(\r\n q.img,\r\n axis=0),\r\n kernel_size=medfilt_kernel_size)))\r\n # isolate the strongest peak to fit\r\n tmpx = np.arange(cnr_x_guess - 10, cnr_x_guess + 11)\r\n tmpy = np.gradient(np.std(q.img, axis=0))[tmpx]\r\n # tmpy[0] is the value from the highest/lowest pixle\r\n # tmpx[0] is basically cnr_x_guess\r\n # 5.0 is the guessted std,\r\n coeff, _ = curve_fit(gauss1d, tmpx, tmpy,\r\n p0=[tmpy[0], tmpx[0], 5.0],\r\n maxfev=int(1e6),\r\n )\r\n cnrs[i, 0] = coeff[1] # x position\r\n # -- find y subpixel positoin\r\n cnr_y_guess = q.row_func(\r\n np.gradient(\r\n medfilt(\r\n np.std(\r\n q.img,\r\n axis=1),\r\n kernel_size=medfilt_kernel_size)))\r\n # isolate the peak (x, y here is only associated with the peak)\r\n tmpx = np.arange(cnr_y_guess - 10, cnr_y_guess + 11)\r\n tmpy = np.gradient(np.std(q.img, axis=1))[tmpx]\r\n coeff, _ = curve_fit(gauss1d, tmpx, tmpy,\r\n p0=[tmpy[0], tmpx[0], 5.0],\r\n maxfev=int(1e6),\r\n )\r\n cnrs[i, 1] = coeff[1] # y posiiton\r\n # add the quadrant shift back\r\n cnrs = cnrs + quadrantorigins\r\n\r\n else:\r\n raise NotImplementedError(\r\n \"Available methods are: simple, quadrant, quadrant+\")\r\n\r\n # return the slit corner detected\r\n return cnrs\r\n\r\n\r\ndef calc_slit_box_aps_1id(slit_box_corners, inclip=(1, 10, 1, 10)):\r\n \"\"\"\r\n Calculate the clip box based on given slip corners.\r\n\r\n Parameters\r\n ----------\r\n slit_box_corners : np.ndarray\r\n Four corners of the slit box as a 4x2 matrix\r\n inclip : tuple, optional\r\n Extra inclipping to avoid clipping artifacts\r\n\r\n Returns\r\n -------\r\n Tuple:\r\n Cliping indices as a tuple of four\r\n (clipFromTop, clipToBottom, clipFromLeft, clipToRight)\r\n\r\n \"\"\"\r\n return (\r\n np.floor(slit_box_corners[:, 0].min()).astype(\r\n int) + inclip[0], # clip top row\r\n np.ceil(slit_box_corners[:, 0].max()).astype(\r\n int) - inclip[1], # clip bottom row\r\n np.floor(slit_box_corners[:, 1].min()).astype(\r\n int) + inclip[2], # clip left col\r\n np.ceil(slit_box_corners[:, 1].max()).astype(\r\n int) - inclip[3], # clip right col\r\n )\r\n\r\n\r\ndef remove_slits_aps_1id(imgstacks, slit_box_corners, inclip=(1, 10, 1, 10)):\r\n \"\"\"\r\n Remove the slits from still images\r\n\r\n Parameters\r\n ----------\r\n imgstacks : np.ndarray\r\n tomopy images stacks (axis_0 is the oemga direction)\r\n slit_box_corners : np.ndarray\r\n four corners of the slit box\r\n inclip : tuple, optional\r\n Extra inclipping to avoid clipping artifacts\r\n\r\n Returns\r\n -------\r\n np.ndarray\r\n tomopy images stacks without regions outside slits\r\n \"\"\"\r\n xl, xu, yl, yu = calc_slit_box_aps_1id(slit_box_corners, inclip=inclip)\r\n return imgstacks[:, yl:yu, xl:xu]\r\n\r\n\r\ndef detector_drift_adjust_aps_1id(imgstacks,\r\n slit_cnr_ref,\r\n medfilt2_kernel_size=3,\r\n medfilt_kernel_size=3,\r\n ncore=None,\r\n ):\r\n \"\"\"\r\n Adjust each still image based on the slit corners and generate report fig\r\n\r\n Parameters\r\n ----------\r\n imgstacks : np.ndarray\r\n tomopy images stacks (axis_0 is the oemga direction)\r\n slit_cnr_ref : np.ndarray\r\n reference slit corners from white field images\r\n medfilt2_kernel_size : int, optional\r\n 2D median filter kernel size for slit conner detection\r\n medfilt_kernel_size : int, optional\r\n 1D median filter kernel size for slit conner detection\r\n ncore : int, optional\r\n number of cores used for speed up\r\n\r\n Returns\r\n -------\r\n np.ndarray\r\n adjusted imgstacks\r\n np.ndarray\r\n detected corners on each still image\r\n np.ndarray\r\n transformation matrices used to adjust each image\r\n \"\"\"\r\n ncore = mproc.mp.cpu_count() - 1 if ncore is None else ncore\r\n\r\n def quick_diff(x): return np.amax(np.absolute(x))\r\n\r\n # -- find all projection corners (slow)\r\n # NOTE:\r\n # Here we are using an iterative approach to find stable slit corners\r\n # from each image\r\n # 1. calculate all slit corners with the given kernel size, preferably\r\n # a small one for speed.\r\n # 2. double the kernel size and calculate again, but this time we are\r\n # checking whether the slit corners are stable.\r\n # 3. find the ids (n_imgs) for those that are difficult, continue\r\n # increasing the kernel size until all slit corners are found, or max\r\n # number of iterations.\r\n # 4. move on to next step.\r\n nlist = range(imgstacks.shape[0])\r\n proj_cnrs = _calc_proj_cnrs(imgstacks, ncore, nlist,\r\n 'quadrant+',\r\n medfilt2_kernel_size,\r\n medfilt_kernel_size,\r\n )\r\n cnrs_found = np.array([quick_diff(proj_cnrs[n, :, :] - slit_cnr_ref) < 15\r\n for n in nlist])\r\n kernels = [(medfilt2_kernel_size+2*i, medfilt_kernel_size+2*j)\r\n for i in range(15)\r\n for j in range(15)]\r\n counter = 0\r\n\r\n while not cnrs_found.all():\r\n nlist = [idx for idx, cnr_found in enumerate(cnrs_found)\r\n if not cnr_found]\r\n # NOTE:\r\n # Check to see if we run out of candidate kernels:\r\n if counter > len(kernels):\r\n # we are giving up here...\r\n for idx, n_img in enumerate(nlist):\r\n proj_cnrs[n_img, :, :] = slit_cnr_ref\r\n break\r\n else:\r\n # test with differnt 2D and 1D kernels\r\n ks2d, ks1d = kernels[counter]\r\n\r\n _cnrs = _calc_proj_cnrs(imgstacks, ncore, nlist,\r\n 'quadrant+', ks2d, ks1d)\r\n for idx, _cnr in enumerate(_cnrs):\r\n n_img = nlist[idx]\r\n cnr = proj_cnrs[n_img, :, :] # previous results\r\n # NOTE:\r\n # The detector corner should not be far away from reference\r\n # -> adiff < 15\r\n # The detected corner should be stable\r\n # -> rdiff < 0.1 (pixel)s\r\n adiff = quick_diff(_cnr - slit_cnr_ref)\r\n rdiff = quick_diff(_cnr - cnr)\r\n if rdiff < 0.1 and adiff < 15:\r\n cnrs_found[n_img] = True\r\n else:\r\n # update results\r\n proj_cnrs[n_img, :, :] = _cnr # update results for next iter\r\n\r\n # next\r\n counter += 1\r\n\r\n # -- calculate affine transformation (fast)\r\n img_correct_F = np.ones((imgstacks.shape[0], 3, 3))\r\n for n_img in range(imgstacks.shape[0]):\r\n img_correct_F[n_img, :, :] = calc_affine_transform(\r\n proj_cnrs[n_img, :, :], slit_cnr_ref)\r\n\r\n # -- apply affine transformation (slow)\r\n tmp = []\r\n with cf.ProcessPoolExecutor(ncore) as e:\r\n for n_img in range(imgstacks.shape[0]):\r\n tmp.append(e.submit(affine_transform,\r\n # input image\r\n imgstacks[n_img, :, :],\r\n # rotation matrix\r\n img_correct_F[n_img, 0:2, 0:2],\r\n # offset vector\r\n offset=img_correct_F[n_img, 0:2, 2],\r\n )\r\n )\r\n imgstacks = np.stack([me.result() for me in tmp], axis=0)\r\n\r\n return imgstacks, proj_cnrs, img_correct_F\r\n\r\n\r\ndef _calc_proj_cnrs(imgs,\r\n ncore,\r\n nlist,\r\n method,\r\n medfilt2_kernel_size,\r\n medfilt_kernel_size,\r\n ):\r\n \"\"\"\r\n Private function calculate slit corners concurrently\r\n\r\n Parameters\r\n ----------\r\n imgs : ndarray\r\n tomopy images stacks (axis_0 is the oemga direction)\r\n ncore : int\r\n number of cores to use\r\n nlist : list of int\r\n index of images to be processed\r\n method : str\r\n slit corner detection method name\r\n medfilt2_kernel_size : int\r\n 2D median filter kernel size, must be odd\r\n medfilt_kernel_size : int\r\n 1D median filter kernel size, must be odd\r\n\r\n Returns\r\n -------\r\n np.3darray\r\n detected corners on each still image\r\n \"\"\"\r\n tmp = []\r\n with cf.ProcessPoolExecutor(ncore) as e:\r\n for n_img in nlist:\r\n tmp.append(e.submit(find_slits_corners_aps_1id,\r\n imgs[n_img, :, :],\r\n method=method,\r\n medfilt2_kernel_size=medfilt2_kernel_size,\r\n medfilt_kernel_size=medfilt_kernel_size,\r\n )\r\n )\r\n return np.stack([me.result() for me in tmp], axis=0)\r\n\r\n\r\ndef distortion_correction_proj(tomo, xcenter, ycenter, list_fact,\r\n ncore=None, nchunk=None):\r\n \"\"\"\r\n Apply distortion correction to projections using the polynomial model.\r\n Coefficients are calculated using Vounwarp package.:cite:`Vo:15`\r\n\r\n Parameters\r\n ----------\r\n tomo : ndarray\r\n 3D tomographic data.\r\n xcenter : float\r\n Center of distortion in x-direction. From the left of the image.\r\n ycenter : float\r\n Center of distortion in y-direction. From the top of the image. \r\n list_fact : list of floats\r\n Polynomial coefficients of the backward model.\r\n ncore : int, optional\r\n Number of cores that will be assigned to jobs.\r\n nchunk : int, optional\r\n Chunk size for each core.\r\n\r\n Returns\r\n -------\r\n ndarray\r\n Corrected 3D tomographic data.\r\n \"\"\"\r\n arr = mproc.distribute_jobs(\r\n tomo,\r\n func=_distortion_correction_proj,\r\n args=(xcenter, ycenter, list_fact),\r\n axis=0,\r\n ncore=ncore,\r\n nchunk=nchunk)\r\n return arr\r\n\r\n\r\ndef _unwarp_image_backward(mat, xcenter, ycenter, list_fact):\r\n \"\"\"\r\n Unwarp an image using the polynomial model.\r\n \r\n Parameters\r\n ----------\r\n mat : 2D array.\r\n xcenter : float \r\n Center of distortion in x-direction. From the left of the image.\r\n ycenter : float\r\n Center of distortion in y-direction. From the top of the image.\r\n list_fact : list of floats \r\n Polynomial coefficients of the backward model.\r\n \r\n Returns\r\n -------\r\n 2D array\r\n Corrected image.\r\n \"\"\"\r\n (height, width) = mat.shape\r\n xu_list = np.arange(width) - xcenter\r\n yu_list = np.arange(height) - ycenter\r\n xu_mat, yu_mat = np.meshgrid(xu_list, yu_list)\r\n ru_mat = np.sqrt(xu_mat**2 + yu_mat**2)\r\n fact_mat = np.sum(\r\n np.asarray([factor * ru_mat**i for i,\r\n factor in enumerate(list_fact)]), axis=0)\r\n xd_mat = np.float32(np.clip(xcenter + fact_mat * xu_mat, 0, width - 1))\r\n yd_mat = np.float32(np.clip(ycenter + fact_mat * yu_mat, 0, height - 1))\r\n indices = np.reshape(yd_mat, (-1, 1)), np.reshape(xd_mat, (-1, 1))\r\n mat = map_coordinates(mat, indices, order=1, mode='reflect')\r\n return mat.reshape((height, width))\r\n\r\n\r\ndef _distortion_correction_proj(tomo, xcenter, ycenter, list_fact):\r\n for m in np.arange(tomo.shape[0]):\r\n proj = tomo[m, :, :]\r\n proj = _unwarp_image_backward(proj, xcenter, ycenter, list_fact) \r\n tomo[m, :, :] = proj\r\n\r\n\r\ndef distortion_correction_sino(tomo, ind, xcenter, ycenter, list_fact):\r\n \"\"\"\r\n Generate an unwarped sinogram of a 3D tomographic data using\r\n the polynomial model. Coefficients are calculated using Vounwarp\r\n package :cite:`Vo:15`\r\n\r\n Parameters\r\n ----------\r\n tomo : ndarray\r\n 3D tomographic data.\r\n ind : int\r\n Index of the unwarped sinogram.\r\n xcenter : float\r\n Center of distortion in x-direction. From the left of the image.\r\n ycenter : float\r\n Center of distortion in y-direction. From the top of the image. \r\n list_fact : list of floats\r\n Polynomial coefficients of the backward model.\r\n\r\n Returns\r\n -------\r\n 2D array\r\n Corrected sinogram.\r\n \"\"\"\r\n (depth, height, width) = tomo.shape\r\n xu_list = np.arange(0, width) - xcenter\r\n yu = ind - ycenter\r\n ru_list = np.sqrt(xu_list**2 + yu**2)\r\n flist = np.sum(\r\n np.asarray([factor * ru_list**i for i,\r\n factor in enumerate(list_fact)]), axis=0)\r\n xd_list = np.clip(xcenter + flist * xu_list, 0, width - 1)\r\n yd_list = np.clip(ycenter + flist * yu, 0, height - 1)\r\n yd_min = np.int16(np.floor(np.amin(yd_list)))\r\n yd_max = np.int16(np.ceil(np.amax(yd_list))) + 1\r\n yd_list = yd_list - yd_min \r\n sino = np.zeros((depth, width), dtype=np.float32)\r\n indices = yd_list, xd_list\r\n for i in np.arange(depth):\r\n sino[i] = map_coordinates(\r\n tomo[i, yd_min:yd_max, :], indices, order=1, mode='reflect')\r\n return sino\r\n\r\n\r\ndef load_distortion_coefs(file_path):\r\n \"\"\"\r\n Load distortion coefficients from a text file.\r\n Order of the infor in the text file:\r\n xcenter\r\n ycenter\r\n factor_0\r\n factor_1\r\n factor_2\r\n ..\r\n\r\n Parameters\r\n ----------\r\n file_path: Path to the file.\r\n\r\n Returns\r\n -------\r\n Tuple of (xcenter, ycenter, list_fact).\r\n \"\"\"\r\n with open(file_path, 'r') as f:\r\n x = f.read().splitlines()\r\n list_data = []\r\n for i in x:\r\n list_data.append(float(i.split()[-1]))\r\n xcenter = list_data[0]\r\n ycenter = list_data[1]\r\n list_fact = list_data[2:]\r\n return xcenter, ycenter, list_fact" ]
[ [ "numpy.amax", "numpy.sqrt", "numpy.pad", "numpy.clip", "numpy.reshape", "numpy.arange", "numpy.std", "numpy.zeros", "numpy.amin", "scipy.ndimage.map_coordinates", "numpy.meshgrid", "numpy.array", "numpy.logical_and", "numpy.absolute", "numpy.gradient", "numpy.linalg.norm", "numpy.ones", "numpy.random.normal", "numpy.mod", "numpy.random.uniform", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
willyrv/optimal_lexic
[ "9f13c3cd65430168f1b7565aaf513dcf835b01a8" ]
[ "ETM/main.py" ]
[ "#/usr/bin/python\n\nfrom __future__ import print_function\n\nimport argparse\nimport torch\nimport pickle \nimport numpy as np \nimport os \nimport math \nimport random \nimport sys\nimport matplotlib.pyplot as plt \nimport data\nimport scipy.io\n\nfrom torch import nn, optim\nfrom torch.nn import functional as F\n\nfrom etm import ETM\nfrom utils import nearest_neighbors, get_topic_coherence, get_topic_diversity\n\nparser = argparse.ArgumentParser(description='The Embedded Topic Model')\n\n### data and file related arguments\nparser.add_argument('--dataset', type=str, default='asrs', help='name of corpus')\nparser.add_argument('--data_path', type=str, default='data/asrs', help='directory containing data')\nparser.add_argument('--emb_path', type=str, default='data/20ng_embeddings.txt', help='directory containing word embeddings')\nparser.add_argument('--save_path', type=str, default='./results', help='path to save results')\nparser.add_argument('--batch_size', type=int, default=1000, help='input batch size for training')\n\n### model-related arguments\nparser.add_argument('--num_topics', type=int, default=50, help='number of topics')\nparser.add_argument('--rho_size', type=int, default=300, help='dimension of rho')\nparser.add_argument('--emb_size', type=int, default=300, help='dimension of embeddings')\nparser.add_argument('--t_hidden_size', type=int, default=800, help='dimension of hidden space of q(theta)')\nparser.add_argument('--theta_act', type=str, default='relu', help='tanh, softplus, relu, rrelu, leakyrelu, elu, selu, glu)')\nparser.add_argument('--train_embeddings', type=int, default=0, help='whether to fix rho or train it')\n\n### optimization-related arguments\nparser.add_argument('--lr', type=float, default=0.005, help='learning rate')\nparser.add_argument('--lr_factor', type=float, default=4.0, help='divide learning rate by this...')\nparser.add_argument('--epochs', type=int, default=20, help='number of epochs to train...150 for 20ng 100 for others')\nparser.add_argument('--mode', type=str, default='train', help='train or eval model')\nparser.add_argument('--optimizer', type=str, default='adam', help='choice of optimizer')\nparser.add_argument('--seed', type=int, default=2019, help='random seed (default: 1)')\nparser.add_argument('--enc_drop', type=float, default=0.0, help='dropout rate on encoder')\nparser.add_argument('--clip', type=float, default=0.0, help='gradient clipping')\nparser.add_argument('--nonmono', type=int, default=10, help='number of bad hits allowed')\nparser.add_argument('--wdecay', type=float, default=1.2e-6, help='some l2 regularization')\nparser.add_argument('--anneal_lr', type=int, default=0, help='whether to anneal the learning rate or not')\nparser.add_argument('--bow_norm', type=int, default=1, help='normalize the bows or not')\n\n### evaluation, visualization, and logging-related arguments\nparser.add_argument('--num_words', type=int, default=10, help='number of words for topic viz')\nparser.add_argument('--log_interval', type=int, default=2, help='when to log training')\nparser.add_argument('--visualize_every', type=int, default=10, help='when to visualize results')\nparser.add_argument('--eval_batch_size', type=int, default=1000, help='input batch size for evaluation')\nparser.add_argument('--load_from', type=str, default='', help='the name of the ckpt to eval from')\nparser.add_argument('--tc', type=int, default=0, help='whether to compute topic coherence or not')\nparser.add_argument('--td', type=int, default=0, help='whether to compute topic diversity or not')\n\nargs = parser.parse_args()\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nprint('\\n')\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n torch.cuda.manual_seed(args.seed)\n\n## get data\n# 1. vocabulary\nvocab, train, valid, test = data.get_data(os.path.join(args.data_path))\nvocab_size = len(vocab)\nargs.vocab_size = vocab_size\n\n# 1. training data\ntrain_tokens = train['tokens']\ntrain_counts = train['counts']\nargs.num_docs_train = len(train_tokens)\n\n# 2. dev set\nvalid_tokens = valid['tokens']\nvalid_counts = valid['counts']\nargs.num_docs_valid = len(valid_tokens)\n\n# 3. test data\ntest_tokens = test['tokens']\ntest_counts = test['counts']\nargs.num_docs_test = len(test_tokens)\ntest_1_tokens = test['tokens_1']\ntest_1_counts = test['counts_1']\nargs.num_docs_test_1 = len(test_1_tokens)\ntest_2_tokens = test['tokens_2']\ntest_2_counts = test['counts_2']\nargs.num_docs_test_2 = len(test_2_tokens)\n\nembeddings = None\n\n#for using pre fitted models\nif not args.train_embeddings:\n emb_path = args.emb_path\n vect_path = os.path.join(args.data_path.split('/')[0], 'embeddings.pkl') \n vectors = {}\n with open(emb_path, 'rb') as f:\n for l in f:\n line = l.decode().split()\n word = line[0]\n if word in vocab:\n vect = np.array(line[1:]).astype(np.float)\n vectors[word] = vect\n embeddings = np.zeros((vocab_size, args.emb_size))\n words_found = 0\n for i, word in enumerate(vocab):\n try: \n embeddings[i] = vectors[word]\n words_found += 1\n except KeyError:\n embeddings[i] = np.random.normal(scale=0.6, size=(args.emb_size, ))\n embeddings = torch.from_numpy(embeddings).to(device)\n args.embeddings_dim = embeddings.size()\n\nprint('=*'*100)\nprint('Training an Embedded Topic Model on {} with the following settings: {}'.format(args.dataset.upper(), args))\nprint('=*'*100)\n\n## define checkpoint\nif not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n\nif args.mode == 'eval':\n ckpt = args.load_from\nelse: #we use this one\n ckpt = os.path.join(args.save_path, \n 'etm_{}_K_{}_Htheta_{}_Optim_{}_Clip_{}_ThetaAct_{}_Lr_{}_Bsz_{}_RhoSize_{}_trainEmbeddings_{}'.format(\n args.dataset, args.num_topics, args.t_hidden_size, args.optimizer, args.clip, args.theta_act, \n args.lr, args.batch_size, args.rho_size, args.train_embeddings))\n\n## define model and optimizer\nmodel = ETM(args.num_topics, vocab_size, args.t_hidden_size, args.rho_size, args.emb_size, \n args.theta_act, embeddings, args.train_embeddings, args.enc_drop).to(device)\n\nprint('model: {}'.format(model))\n\nif args.optimizer == 'adam':\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wdecay)\nelif args.optimizer == 'adagrad':\n optimizer = optim.Adagrad(model.parameters(), lr=args.lr, weight_decay=args.wdecay)\nelif args.optimizer == 'adadelta':\n optimizer = optim.Adadelta(model.parameters(), lr=args.lr, weight_decay=args.wdecay)\nelif args.optimizer == 'rmsprop':\n optimizer = optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=args.wdecay)\nelif args.optimizer == 'asgd':\n optimizer = optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)\nelse:\n print('Defaulting to vanilla SGD')\n optimizer = optim.SGD(model.parameters(), lr=args.lr)\n\ndef train(epoch):\n model.train()\n acc_loss = 0\n acc_kl_theta_loss = 0\n cnt = 0\n indices = torch.randperm(args.num_docs_train)\n indices = torch.split(indices, args.batch_size)\n for idx, ind in enumerate(indices):\n optimizer.zero_grad()\n model.zero_grad()\n data_batch = data.get_batch(train_tokens, train_counts, ind, args.vocab_size, device)\n sums = data_batch.sum(1).unsqueeze(1)\n if args.bow_norm:\n normalized_data_batch = data_batch / sums\n else:\n normalized_data_batch = data_batch\n recon_loss, kld_theta = model(data_batch, normalized_data_batch)\n total_loss = recon_loss + kld_theta\n total_loss.backward()\n\n if args.clip > 0:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)\n optimizer.step()\n\n acc_loss += torch.sum(recon_loss).item()\n acc_kl_theta_loss += torch.sum(kld_theta).item()\n cnt += 1\n\n if idx % args.log_interval == 0 and idx > 0:\n cur_loss = round(acc_loss / cnt, 2) \n cur_kl_theta = round(acc_kl_theta_loss / cnt, 2) \n cur_real_loss = round(cur_loss + cur_kl_theta, 2)\n\n print('Epoch: {} .. batch: {}/{} .. LR: {} .. KL_theta: {} .. Rec_loss: {} .. NELBO: {}'.format(\n epoch, idx, len(indices), optimizer.param_groups[0]['lr'], cur_kl_theta, cur_loss, cur_real_loss))\n \n cur_loss = round(acc_loss / cnt, 2) \n cur_kl_theta = round(acc_kl_theta_loss / cnt, 2) \n cur_real_loss = round(cur_loss + cur_kl_theta, 2)\n print('*'*100)\n print('Epoch----->{} .. LR: {} .. KL_theta: {} .. Rec_loss: {} .. NELBO: {}'.format(\n epoch, optimizer.param_groups[0]['lr'], cur_kl_theta, cur_loss, cur_real_loss))\n print('*'*100)\n\ndef visualize(m, show_emb=True):\n if not os.path.exists('./results'):\n os.makedirs('./results')\n\n m.eval()\n\n queries = ['snow', 'flt', 'light', 'leak', 'gpws', 'turbulence', \n 'pressurization', 'collision', 'smoke',\n 'vfr', 'twr', 'approach','medical','failure','comply',\n 'door','tire']\n\n ## visualize topics using monte carlo\n with torch.no_grad():\n print('#'*100)\n print('Visualize topics...')\n topics_words = []\n gammas = m.get_beta()\n for k in range(args.num_topics):\n gamma = gammas[k]\n top_words = list(gamma.cpu().numpy().argsort()[-args.num_words+1:][::-1])\n topic_words = [vocab[a] for a in top_words]\n topics_words.append(' '.join(topic_words))\n print('Topic {}: {}'.format(k, topic_words))\n\n if show_emb:\n ## visualize word embeddings by using V to get nearest neighbors\n print('#'*100)\n print('Visualize word embeddings by using output embedding matrix')\n try:\n embeddings = m.rho.weight # Vocab_size x E\n except:\n embeddings = m.rho # Vocab_size x E\n neighbors = []\n for word in queries:\n print('word: {} .. neighbors: {}'.format(\n word, nearest_neighbors(word, embeddings, vocab)))\n print('#'*100)\n\ndef evaluate(m, source, tc=False, td=False):\n \"\"\"Compute perplexity on document completion.\n \"\"\"\n m.eval()\n with torch.no_grad():\n if source == 'val':\n indices = torch.split(torch.tensor(range(args.num_docs_valid)), args.eval_batch_size)\n tokens = valid_tokens\n counts = valid_counts\n else: \n indices = torch.split(torch.tensor(range(args.num_docs_test)), args.eval_batch_size)\n tokens = test_tokens\n counts = test_counts\n\n ## get \\beta here\n beta = m.get_beta()\n\n ### do dc and tc here\n acc_loss = 0\n cnt = 0\n indices_1 = torch.split(torch.tensor(range(args.num_docs_test_1)), args.eval_batch_size)\n for idx, ind in enumerate(indices_1):\n ## get theta from first half of docs\n data_batch_1 = data.get_batch(test_1_tokens, test_1_counts, ind, args.vocab_size, device)\n sums_1 = data_batch_1.sum(1).unsqueeze(1)\n if args.bow_norm:\n normalized_data_batch_1 = data_batch_1 / sums_1\n else:\n normalized_data_batch_1 = data_batch_1\n theta, _ = m.get_theta(normalized_data_batch_1)\n\n ## get prediction loss using second half\n data_batch_2 = data.get_batch(test_2_tokens, test_2_counts, ind, args.vocab_size, device)\n sums_2 = data_batch_2.sum(1).unsqueeze(1)\n res = torch.mm(theta, beta)\n preds = torch.log(res)\n recon_loss = -(preds * data_batch_2).sum(1)\n \n loss = recon_loss / sums_2.squeeze()\n loss = loss.mean().item()\n acc_loss += loss\n cnt += 1\n cur_loss = acc_loss / cnt\n ppl_dc = round(math.exp(cur_loss), 1)\n print('*'*100)\n print('{} Doc Completion PPL: {}'.format(source.upper(), ppl_dc))\n print('*'*100)\n if tc or td:\n beta = beta.data.cpu().numpy()\n if tc:\n print('Computing topic coherence...')\n get_topic_coherence(beta, train_tokens, vocab)\n if td:\n print('Computing topic diversity...')\n get_topic_diversity(beta, 25)\n return ppl_dc\n\nif args.mode == 'train':\n ## train model on data \n best_epoch = 0\n best_val_ppl = 1e9\n all_val_ppls = []\n print('\\n')\n print('Visualizing model quality before training...')\n visualize(model)\n print('\\n')\n for epoch in range(1, args.epochs):\n train(epoch)\n val_ppl = evaluate(model, 'val')\n if val_ppl < best_val_ppl:\n with open(ckpt, 'wb') as f:\n torch.save(model, f)\n best_epoch = epoch\n best_val_ppl = val_ppl\n else:\n ## check whether to anneal lr\n lr = optimizer.param_groups[0]['lr']\n if args.anneal_lr and (len(all_val_ppls) > args.nonmono and val_ppl > min(all_val_ppls[:-args.nonmono]) and lr > 1e-5):\n optimizer.param_groups[0]['lr'] /= args.lr_factor\n if epoch % args.visualize_every == 0:\n visualize(model)\n all_val_ppls.append(val_ppl)\n with open(ckpt, 'rb') as f:\n model = torch.load(f)\n model = model.to(device)\n val_ppl = evaluate(model, 'val')\nelse: \n with open(ckpt, 'rb') as f:\n model = torch.load(f)\n model = model.to(device)\n model.eval()\n\n with torch.no_grad():\n ## get document completion perplexities\n test_ppl = evaluate(model, 'test', tc=args.tc, td=args.td)\n\n ## get most used topics\n indices = torch.tensor(range(args.num_docs_train))\n indices = torch.split(indices, args.batch_size)\n thetaAvg = torch.zeros(1, args.num_topics).to(device)\n thetaWeightedAvg = torch.zeros(1, args.num_topics).to(device)\n cnt = 0\n for idx, ind in enumerate(indices):\n data_batch = data.get_batch(train_tokens, train_counts, ind, args.vocab_size, device)\n sums = data_batch.sum(1).unsqueeze(1)\n cnt += sums.sum(0).squeeze().cpu().numpy()\n if args.bow_norm:\n normalized_data_batch = data_batch / sums\n else:\n normalized_data_batch = data_batch\n theta, _ = model.get_theta(normalized_data_batch)\n thetaAvg += theta.sum(0).unsqueeze(0) / args.num_docs_train\n weighed_theta = sums * theta\n thetaWeightedAvg += weighed_theta.sum(0).unsqueeze(0)\n if idx % 100 == 0 and idx > 0:\n print('batch: {}/{}'.format(idx, len(indices)))\n thetaWeightedAvg = thetaWeightedAvg.squeeze().cpu().numpy() / cnt\n print('\\nThe 10 most used topics are {}'.format(thetaWeightedAvg.argsort()[::-1][:10]))\n\n ## show topics\n beta = model.get_beta()\n topic_indices = list(np.random.choice(args.num_topics, 10)) # 10 random topics\n print('\\n')\n for k in range(args.num_topics):#topic_indices:\n gamma = beta[k]\n top_words = list(gamma.cpu().numpy().argsort()[-args.num_words+1:][::-1])\n topic_words = [vocab[a] for a in top_words]\n print('Topic {}: {}'.format(k, topic_words))\n\n if args.train_embeddings:\n ## show etm embeddings \n try:\n rho_etm = model.rho.weight.cpu()\n except:\n rho_etm = model.rho.cpu()\n queries = ['andrew', 'woman', 'computer', 'sports', 'religion', 'man', 'love', \n 'intelligence', 'money', 'politics', 'health', 'people', 'family']\n print('\\n')\n print('ETM embeddings...')\n for word in queries:\n print('word: {} .. etm neighbors: {}'.format(word, nearest_neighbors(word, rho_etm, vocab)))\n print('\\n')\n" ]
[ [ "torch.mm", "torch.cuda.manual_seed", "numpy.random.seed", "torch.randperm", "torch.manual_seed", "torch.load", "numpy.random.choice", "torch.sum", "torch.from_numpy", "torch.zeros", "numpy.random.normal", "torch.no_grad", "torch.log", "torch.cuda.is_available", "torch.split", "numpy.array", "numpy.zeros", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
meyer-lab/tfac-ccle
[ "949a17f345f9a6d13d42c5284195b4e0362919ae" ]
[ "tfac/dataHelpers.py" ]
[ "\"\"\"Contains function for importing and handling OHSU data\"\"\"\nfrom os.path import join, dirname\nimport numpy as np\nimport pandas as pd\n\npath_here = dirname(dirname(__file__))\n\n\ndef importLINCSprotein():\n \"\"\" Import protein characterization from LINCS. \"\"\"\n dataA = pd.read_csv(join(path_here, \"tfac/data/01_Laura_Heiser__Sean_Gross_A.csv\"))\n dataB = pd.read_csv(join(path_here, \"tfac/data/01_Laura_Heiser__Sean_Gross_B.csv\"))\n dataC = pd.read_csv(join(path_here, \"tfac/data/01_Laura_Heiser__Sean_Gross_C.csv\"))\n\n dataA[\"File\"] = \"A\"\n dataB[\"File\"] = \"B\"\n dataC[\"File\"] = \"C\"\n\n return pd.concat([dataA, dataB, dataC])\n\n\ndef ohsu_data():\n \"\"\" Import OHSU data for PARAFAC2\"\"\"\n return pd.read_csv(join(path_here, \"tfac/data/ohsu/MDD_RNAseq_Level4.txt\"), delimiter=\"\\t\")\n\n\ndef proteinNames():\n \"\"\"Return protein names (data columns)\"\"\"\n data = importLINCSprotein()\n data = data.drop(columns=[\"Treatment\", \"Sample description\", \"File\", \"Time\"], axis=1)\n return data.columns.values.tolist()\n\n\ndef form_tensor():\n \"\"\" Creates tensor in numpy array form and returns tensor, treatments, and time.\n Returns both the protein and RNAseq tensors in aligned format. \"\"\"\n df = importLINCSprotein()\n df.drop(columns=[\"Sample description\", \"File\"], inplace=True)\n times = pd.unique(df[\"Time\"])\n\n # Group replicates and average\n df = df.groupby([\"Treatment\", \"Time\"]).mean()\n\n for treatment in df.index.unique(level=0):\n df.loc[(treatment, 0), :] = df.loc[('Control', 0)].values\n\n df.drop('Control', inplace=True, level=0)\n df = df.sort_index()\n\n dfArray = df.to_numpy()\n tensor = np.reshape(dfArray, (-1, len(times), dfArray.shape[1]))\n\n # Subtract off control\n tensor -= tensor[0, 0, :]\n\n RNAseq = ohsu_data()\n\n # Copy over control\n for treatment in df.index.unique(level=0):\n RNAseq[treatment + \"_0\"] = RNAseq[\"ctrl_0\"]\n\n RNAseq = RNAseq.set_index(\"ensembl_gene_id\").T\n RNAseq.index = RNAseq.index.str.split('_', expand=True)\n RNAseq.index = RNAseq.index.set_levels(RNAseq.index.levels[1].astype(int), level=1)\n\n RNAseq.drop('ctrl', inplace=True, level=0)\n RNAseq = RNAseq.reindex(index=df.index)\n\n rArray = RNAseq.to_numpy()\n rTensor = np.reshape(rArray, (-1, len(times), rArray.shape[1]))\n\n # Normalize the data\n tensor -= np.mean(tensor, axis=(0, 1), keepdims=True) # proteins\n rTensor -= np.nanmean(rTensor, axis=(0, 1), keepdims=True) # genes\n\n # Match variance of both datasets\n tensor /= np.nansum(np.square(tensor))\n rTensor /= np.nansum(np.square(rTensor))\n\n assert rTensor.shape[0] == tensor.shape[0]\n assert rTensor.shape[1] == tensor.shape[1]\n\n return np.append(tensor, rTensor, axis=2), df.index.unique(level=0), times\n\n\"Will give a tensor of shape (7, 6, 57662)\"\n\"7 treatments, in this order: 'BMP2', 'EGF', 'HGF', 'IFNg', 'OSM', 'PBS', 'TGFb'\"\n\"6 time points (in hours), in this order: 0.0, 1.0, 4.0, 8.0, 24.0, 48.0\"\n\"295 protein data points + 57367 gene data points = 57662 total data points\"\n" ]
[ [ "numpy.square", "pandas.concat", "numpy.append", "numpy.mean", "pandas.unique", "numpy.nanmean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
Theocrat/Iris
[ "5aaba5dc915f53d148106c0c6bca57e09c548d9c" ]
[ "Iris_recog/locate.py" ]
[ "from pupil import *\nfrom iris import *\nfrom numpy import zeros\nfrom skimage import draw\nfrom imworks import *\n\ndef locate(fname):\n\tpupil_img = pupil_detect(fname)\n\trows = pupil_img.shape[0]\n\tcols = pupil_img.shape[1]\n\n\tfor col in range(cols):\n\t\tcol = cols - 1 - col\n\t\tif sum(pupil_img[:,col]) > 0:\n\t\t\teast_mark = col\n\t\t\tbreak\n\n\tfor col in range(east_mark):\n\t\tcol = east_mark - 1 - col\n\t\tif sum(pupil_img[:,col]) == 0:\n\t\t\twest_mark = col\n\t\t\tbreak\n\n\tfor row in range(rows):\n\t\trow = rows - 1 - row\n\t\tif sum(pupil_img[row,:]) > 0:\n\t\t\tsouth_mark = row\n\t\t\tbreak\n\n\tfor row in range(south_mark):\n\t\trow = south_mark - 1 - row\n\t\tif sum(pupil_img[row,:]) == 0:\n\t\t\tnorth_mark = row\n\t\t\tbreak\n\n\tcenter_x = (west_mark + east_mark) / 2\n\tcenter_y = (north_mark + south_mark) / 2\n\n\tlines = zeros([rows,cols])\n\trr, cc = draw.line(south_mark,east_mark,north_mark,east_mark)\n\tlines[rr,cc] = 1\t\n\trr, cc = draw.line(south_mark,west_mark,north_mark,west_mark)\n\tlines[rr,cc] = 1\n\trr, cc = draw.line(south_mark,west_mark,south_mark,east_mark)\n\tlines[rr,cc] = 1\t\n\trr, cc = draw.line(north_mark,west_mark,north_mark,east_mark)\n\tlines[rr,cc] = 1\t\n\trr, cc = draw.circle(center_y,center_x,3)\n\tlines[rr,cc] = 1\n\n\t#Locating Iris bounding box\n\tiris_img = iris_detect(fname)\n\n\tx = east_mark\n\twhile(iris_img[center_y,x]) == 1: x += 1\n\tiris_east = x\n\n\tx = west_mark\n\twhile(iris_img[center_y,x]) == 1: x -= 1\n\tiris_west = x\n\n\trr, cc = draw.line(0,iris_east,rows-1,iris_east)\n\tlines[rr,cc] = 1\t\n\trr, cc = draw.line(0,iris_west,rows-1,iris_west)\n\tlines[rr,cc] = 1\n\n\t# Displaying bounding boxes with lines\n\tfull_color = zeros([rows,cols,3])\n\tfor i in range(rows):\n\t\tfor j in range(cols):\n\t\t\tfull_color[i,j,0] = pupil_img[i,j]\n\t\t\tfull_color[i,j,1] = lines[i,j]\n\n\tfor i in range(rows):\n\t\tfor j in range(cols):\n\t\t\tfull_color[i,j,2] = iris_img[i,j]\n\n\t#print('Eastern distance: ' + str(iris_east - center_x))\n\t#print('Western distance: ' + str(center_x - iris_west))\n\t#disp(full_color)\n\n\t# Generating mask:\n\tradius = max([(iris_east - center_x),(center_x - iris_west)])\n\tmask = zeros([rows,cols])\n\n\trr, cc = draw.circle(center_y, center_x,radius)\n\tfor i in range(len(rr)):\n\t\tif rr[i] < 0: rr[i] = 0\n\t\tif rr[i] >= rows: rr[i] = rows - 1\n\tfor i in range(len(cc)):\n\t\tif cc[i] < 0: cc[i] = 0\n\t\tif cc[i] >= cols: cc[i] = cols - 1\n\tmask[rr,cc] = 1\n\n\trr, cc = draw.circle(center_y, center_x,(0.5*(east_mark-west_mark)))\n\tfor i in range(len(rr)):\n\t\tif rr[i] < 0: rr[i] = 0\n\t\tif rr[i] >= rows: rr[i] = rows - 1\n\tfor i in range(len(cc)):\n\t\tif cc[i] < 0: cc[i] = 0\n\t\tif cc[i] >= cols: cc[i] = cols - 1\n\tmask[rr,cc] = 0\n\n\timg = bnw(fname)\n\tpad = 6\n\tmasked_eye = zeros([img.shape[0]-2*pad,img.shape[1]-2*pad])\n\tfor i in range(rows):\n\t\t\tfor j in range(cols):\n\t\t\t\tmasked_eye[i,j] = min([mask[i,j],img[pad+i,pad+j]])\n\n\tcheck_mask = zeros([rows,cols,3])\n\tfor i in range(rows):\n\t\tfor j in range(cols):\n\t\t\tcheck_mask[i,j,0] = img[i,j] * 0.8\n\t\t\tcheck_mask[i,j,1] = img[i,j] * (0.8 + 0.2*mask[i-2*pad,j-2*pad])\n\t\t\tcheck_mask[i,j,2] = img[i,j] * (0.8 + 0.2*mask[i-2*pad,j-2*pad])\n\t\n\tinner_radius = 0.5 * (east_mark - west_mark)\n\touter_radius = 0.5 * (iris_east - iris_west)\n\tcenter_r, center_c = center_y,center_x\n\n\treturn [inner_radius, outer_radius, (center_r, center_c)]\n\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GabrielGustavoMS/processamentoDeImagens
[ "df4d553a60b4299b1ae161ca8f067105c46f9155" ]
[ "opencv2/BlackHat.py" ]
[ "import cv2\r\nimport numpy as np\r\n\r\nimg = cv2.imread('j.png',0)\r\nkernel = np.ones((5,5),np.uint8)\r\n\r\nblackhat = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)\r\n\r\nres1 = np.hstack((img, blackhat))\r\n\r\ncv2.imshow('Imagem Comum J, Com blackhat', res1)\r\ncv2.waitKey(0) \r\ncv2.destroyAllWindows()\r\n" ]
[ [ "numpy.hstack", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Anysomeday/SpecPatConv3D-Network
[ "2839268171bc17f58f38c1815368e248f1f7ad34" ]
[ "train.py" ]
[ "from sklearn.metrics import confusion_matrix, classification_report, accuracy_score\nfrom argparse import ArgumentParser\nfrom model import *\nimport tensorflow as tf\nimport numpy as np\nfrom helper import showClassTable, maybeExtract\nimport os\n\nfrom tqdm import tqdm\n\nnumber_of_band = {'Indian_pines': 2, 'Salinas': 2, 'KSC': 2, 'Botswana': 1}\n\n# get_available_gpus()\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '0,1'\n\n# GPU_DEVICE_IDX = '1'\nmodel_directory = os.path.join(os.getcwd(), 'Trained_model/')\n\nparser = ArgumentParser()\nparser.add_argument('--data', type=str, default='Indian_pines', help='Indian_pines or Salinas or KSC')\nparser.add_argument('--epoch', type=int, default=650, help='Epochs')\nparser.add_argument('--batch_size', type=int, default=50, help='Mini batch at training')\nparser.add_argument('--patch_size', type=int, default=5)\nparser.add_argument('--device', type=str, default='CPU')\n\ndef main(opt):\n\n # Load MATLAB data that contains data and labels\n TRAIN, VALIDATION, TEST = maybeExtract(opt.data, opt.patch_size)\n\n # Extract data and label from MATLAB file\n training_data, training_label = TRAIN[0], TRAIN[1]\n validation_data, validation_label = VALIDATION[0], VALIDATION[1]\n test_data, test_label = TEST[0], TEST[1]\n\n print('\\nData shapes')\n print('training_data shape' + str(training_data.shape))\n print('training_label shape' + str(training_label.shape) + '\\n')\n print('validation_data shape' + str(validation_data.shape))\n print('validation_label shape' + str(validation_label.shape) + '\\n')\n print('test_data shape' + str(test_data.shape))\n print('test_label shape' + str(test_label.shape) + '\\n')\n\n SIZE = training_data.shape[0]\n HEIGHT = training_data.shape[1]\n WIDTH = training_data.shape[2]\n CHANNELS = training_data.shape[3]\n N_PARALLEL_BAND = number_of_band[opt.data]\n NUM_CLASS = training_label.shape[1]\n\n EPOCHS = opt.epoch\n BATCH = opt.batch_size\n\n graph = tf.Graph()\n with graph.as_default():\n # Define Model entry placeholder\n img_entry = tf.placeholder(tf.float32, shape=[None, WIDTH, HEIGHT, CHANNELS])\n img_label = tf.placeholder(tf.uint8, shape=[None, NUM_CLASS])\n\n # Get true class from one-hot encoded format\n image_true_class = tf.argmax(img_label, axis=1)\n\n # Dropout probability for the model\n prob = tf.placeholder(tf.float32)\n\n # Network model definition\n model = net(img_entry, prob, HEIGHT, WIDTH, CHANNELS, N_PARALLEL_BAND, NUM_CLASS)\n\n # Cost Function\n final_layer = model['dense3']\n\n with tf.name_scope('loss'):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=final_layer,\n labels=img_label)\n cost = tf.reduce_mean(cross_entropy)\n\n # Optimisation function\n with tf.name_scope('adam_optimizer'):\n optimizer = tf.train.AdamOptimizer(learning_rate=0.0005).minimize(cost)\n\n # Model Performance Measure\n with tf.name_scope('accuracy'):\n predict_class = model['predict_class_number']\n correction = tf.equal(predict_class, image_true_class)\n\n accuracy = tf.reduce_mean(tf.cast(correction, tf.float32))\n\n # Checkpoint Saver\n saver = tf.train.Saver()\n with tf.Session(graph=graph) as session:\n\n session.run(tf.global_variables_initializer())\n\n def test(t_data, t_label, test_iterations=1, evalate=False):\n\n assert test_data.shape[0] == test_label.shape[0]\n\n y_predict_class = model['predict_class_number']\n\n # OverallAccuracy, averageAccuracy and accuracyPerClass\n overAllAcc, avgAcc, averageAccClass = [], [], []\n for _ in range(test_iterations):\n\n pred_class = []\n for t in tqdm(t_data):\n t = np.expand_dims(t, axis=0)\n feed_dict_test = {img_entry: t, prob: 1.0}\n prediction = session.run(y_predict_class, feed_dict=feed_dict_test)\n pred_class.append(prediction)\n\n true_class = np.argmax(t_label, axis=1)\n conMatrix = confusion_matrix(true_class, pred_class)\n\n # Calculate recall score across each class\n classArray = []\n for c in range(len(conMatrix)):\n recallScore = conMatrix[c][c] / sum(conMatrix[c])\n classArray += [recallScore]\n averageAccClass.append(classArray)\n avgAcc.append(sum(classArray) / len(classArray))\n overAllAcc.append(accuracy_score(true_class, pred_class))\n\n averageAccClass = np.transpose(averageAccClass)\n meanPerClass = np.mean(averageAccClass, axis=1)\n\n showClassTable(meanPerClass, title='Class accuracy')\n print('Average Accuracy: ' + str(np.mean(avgAcc)))\n print('Overall Accuracy: ' + str(np.mean(overAllAcc)))\n\n def train(num_iterations, train_batch_size=50):\n\n maxValidRate = 0\n for i in range(num_iterations + 1):\n\n print('Optimization Iteration: ' + str(i))\n\n for x in range(int(SIZE / train_batch_size) + 1):\n train_batch = training_data[x * train_batch_size: (x + 1) * train_batch_size]\n train_batch_label = training_label[x * train_batch_size: (x + 1) * train_batch_size]\n feed_dict_train = {img_entry: train_batch, img_label: train_batch_label, prob: 0.5}\n _, loss_val = session.run([optimizer, cross_entropy], feed_dict=feed_dict_train)\n\n if i % 15 == 0:\n acc = session.run(accuracy, feed_dict={img_entry: validation_data,\n img_label: validation_label,\n prob: 1.0})\n print('Model Performance, Validation accuracy: ', acc * 100)\n if maxValidRate < acc:\n location = i\n maxValidRate = acc\n saver.save(session, './Trained_model/' + str(opt.data) +'/the3dnetwork-'+opt.data)\n print('Maximum validation accuracy: ', acc, ' at epoch ', location)\n test(validation_data, validation_label, 1)\n\n def count_param():\n total_parameters = 0\n for variable in tf.trainable_variables():\n shape = variable.get_shape()\n variable_parameters = 1\n for dim in shape:\n variable_parameters *= dim.value\n total_parameters += variable_parameters\n print('Trainable parameters: ' + '\\033[92m' + str(total_parameters) + '\\033[0m')\n\n count_param()\n # Train model\n train(num_iterations=EPOCHS, train_batch_size=BATCH)\n #saver.save(session, model_directory)\n\n # Test model\n test(test_data, test_label, test_iterations=1)\n print('End session ' + str(opt.data))\n\nif __name__ == '__main__':\n option = parser.parse_args()\n main(option)\n" ]
[ [ "numpy.expand_dims", "tensorflow.equal", "tensorflow.cast", "sklearn.metrics.confusion_matrix", "numpy.mean", "tensorflow.train.AdamOptimizer", "tensorflow.Graph", "numpy.argmax", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.trainable_variables", "tensorflow.train.Saver", "tensorflow.argmax", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "numpy.transpose", "tensorflow.reduce_mean", "tensorflow.nn.softmax_cross_entropy_with_logits_v2", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Sina-Mehdiz/Bioptim
[ "49c13c089db8200f503d0209f7f8685607d9ccaa" ]
[ "examples/getting_started/custom_initial_guess.py" ]
[ "\"\"\"\nThis example is a trivial box that must superimpose one of its corner to a marker at the beginning of the movement\nand superimpose the same corner to a different marker at the end.\nIt is designed to investigate the different way to define the initial guesses at each node sent to the solver\n\nAll the types of interpolation are shown:\nInterpolationType.CONSTANT: All the values are the same at each node\nInterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT: Same as constant, but have the first\n and last nodes different. This is particularly useful when you want to fix the initial and\n final position and leave the rest of the movement free.\nInterpolationType.LINEAR: The values are linearly interpolated between the first and last nodes.\nInterpolationType.EACH_FRAME: Each node values are specified\nInterpolationType.SPLINE: The values are interpolated from the first to last node using a cubic spline\nInterpolationType.CUSTOM: Provide a user-defined interpolation function\n\"\"\"\n\nimport numpy as np\nimport biorbd\nfrom bioptim import (\n Node,\n OptimalControlProgram,\n Dynamics,\n DynamicsFcn,\n Objective,\n ObjectiveFcn,\n ConstraintList,\n ConstraintFcn,\n Bounds,\n QAndQDotBounds,\n InitialGuess,\n InterpolationType,\n OdeSolver,\n)\n\n\ndef custom_init_func(current_shooting_point: int, my_values: np.ndarray, n_shooting: int) -> np.ndarray:\n \"\"\"\n The custom function for the x and u initial guesses (this particular one mimics linear interpolation)\n\n Parameters\n ----------\n current_shooting_point: int\n The current point to return the value, it is defined between [0; n_shooting] for the states\n and [0; n_shooting[ for the controls\n my_values: np.ndarray\n The values provided by the user\n n_shooting: int\n The number of shooting point\n\n Returns\n -------\n The vector value of the initial guess at current_shooting_point\n \"\"\"\n\n # Linear interpolation created with custom function\n return my_values[:, 0] + (my_values[:, -1] - my_values[:, 0]) * current_shooting_point / n_shooting\n\n\ndef prepare_ocp(\n biorbd_model_path: str,\n n_shooting: int,\n final_time: float,\n initial_guess: InterpolationType = InterpolationType.CONSTANT,\n ode_solver=OdeSolver.RK4(),\n) -> OptimalControlProgram:\n \"\"\"\n Prepare the program\n\n Parameters\n ----------\n biorbd_model_path: str\n The path of the biorbd model\n n_shooting: int\n The number of shooting points\n final_time: float\n The time at the final node\n initial_guess: InterpolationType\n The type of interpolation to use for the initial guesses\n ode_solver: OdeSolver\n The type of ode solver used\n\n Returns\n -------\n The ocp ready to be solved\n \"\"\"\n\n # --- Options --- #\n # Model path\n biorbd_model = biorbd.Model(biorbd_model_path)\n nq = biorbd_model.nbQ()\n nqdot = biorbd_model.nbQdot()\n ntau = biorbd_model.nbGeneralizedTorque()\n tau_min, tau_max, tau_init = -100, 100, 0\n\n # Add objective functions\n objective_functions = Objective(ObjectiveFcn.Lagrange.MINIMIZE_TORQUE, weight=100)\n\n # Dynamics\n dynamics = Dynamics(DynamicsFcn.TORQUE_DRIVEN)\n\n # Constraints\n constraints = ConstraintList()\n constraints.add(ConstraintFcn.SUPERIMPOSE_MARKERS, node=Node.START, first_marker=\"m0\", second_marker=\"m1\")\n constraints.add(ConstraintFcn.SUPERIMPOSE_MARKERS, node=Node.END, first_marker=\"m0\", second_marker=\"m2\")\n\n # Path constraint and control path constraints\n x_bounds = QAndQDotBounds(biorbd_model)\n x_bounds[1:6, [0, -1]] = 0\n x_bounds[2, -1] = 1.57\n u_bounds = Bounds([tau_min] * ntau, [tau_max] * ntau)\n\n # Initial guesses\n t = None\n extra_params_x = {}\n extra_params_u = {}\n if initial_guess == InterpolationType.CONSTANT:\n x = [0] * (nq + nqdot)\n u = [tau_init] * ntau\n elif initial_guess == InterpolationType.CONSTANT_WITH_FIRST_AND_LAST_DIFFERENT:\n x = np.array([[1.0, 0.0, 0.0, 0, 0, 0], [1.5, 0.0, 0.785, 0, 0, 0], [2.0, 0.0, 1.57, 0, 0, 0]]).T\n u = np.array([[1.45, 9.81, 2.28], [0, 9.81, 0], [-1.45, 9.81, -2.28]]).T\n elif initial_guess == InterpolationType.LINEAR:\n x = np.array([[1.0, 0.0, 0.0, 0, 0, 0], [2.0, 0.0, 1.57, 0, 0, 0]]).T\n u = np.array([[1.45, 9.81, 2.28], [-1.45, 9.81, -2.28]]).T\n elif initial_guess == InterpolationType.EACH_FRAME:\n x = np.random.random((nq + nqdot, n_shooting + 1))\n u = np.random.random((ntau, n_shooting))\n elif initial_guess == InterpolationType.SPLINE:\n # Bound spline assume the first and last point are 0 and final respectively\n t = np.hstack((0, np.sort(np.random.random((3,)) * final_time), final_time))\n x = np.random.random((nq + nqdot, 5))\n u = np.random.random((ntau, 5))\n elif initial_guess == InterpolationType.CUSTOM:\n # The custom function refers to the one at the beginning of the file. It emulates a Linear interpolation\n x = custom_init_func\n u = custom_init_func\n extra_params_x = {\"my_values\": np.random.random((nq + nqdot, 2)), \"n_shooting\": n_shooting}\n extra_params_u = {\"my_values\": np.random.random((ntau, 2)), \"n_shooting\": n_shooting}\n else:\n raise RuntimeError(\"Initial guess not implemented yet\")\n x_init = InitialGuess(x, t=t, interpolation=initial_guess, **extra_params_x)\n\n u_init = InitialGuess(u, t=t, interpolation=initial_guess, **extra_params_u)\n # ------------- #\n\n return OptimalControlProgram(\n biorbd_model,\n dynamics,\n n_shooting,\n final_time,\n x_init,\n u_init,\n x_bounds,\n u_bounds,\n objective_functions,\n constraints,\n ode_solver=ode_solver,\n )\n\n\ndef main():\n \"\"\"\n Solve the program for all the InterpolationType available\n \"\"\"\n\n sol = None\n for initial_guess in InterpolationType:\n print(f\"Solving problem using {initial_guess} initial guess\")\n ocp = prepare_ocp(\"cube.bioMod\", n_shooting=30, final_time=2, initial_guess=initial_guess)\n sol = ocp.solve()\n print(\"\\n\")\n\n # Print the last solution\n sol.animate()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array", "numpy.random.random" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Manifold-Computing/MMAML-Classification
[ "bdf1a93e798ab81619563038b95a3c5aa18717e0" ]
[ "maml/models/conv_embedding_model.py" ]
[ "from collections import OrderedDict\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\n\n\nclass ConvEmbeddingModel(torch.nn.Module):\n def __init__(self, input_size, output_size, embedding_dims,\n hidden_size=128, num_layers=1,\n convolutional=False, num_conv=4, num_channels=32, num_channels_max=256,\n rnn_aggregation=False, linear_before_rnn=False, \n embedding_pooling='max', batch_norm=True, avgpool_after_conv=True,\n num_sample_embedding=0, sample_embedding_file='embedding.hdf5',\n img_size=(1, 28, 28), verbose=False):\n\n super(ConvEmbeddingModel, self).__init__()\n self._input_size = input_size\n self._output_size = output_size\n self._hidden_size = hidden_size\n self._num_layers = num_layers\n self._embedding_dims = embedding_dims\n self._bidirectional = True\n self._device = 'cpu'\n self._convolutional = convolutional\n self._num_conv = num_conv\n self._num_channels = num_channels\n self._num_channels_max = num_channels_max\n self._batch_norm = batch_norm\n self._img_size = img_size\n self._rnn_aggregation = rnn_aggregation\n self._embedding_pooling = embedding_pooling\n self._linear_before_rnn = linear_before_rnn\n self._embeddings_array = []\n self._num_sample_embedding = num_sample_embedding\n self._sample_embedding_file = sample_embedding_file\n self._avgpool_after_conv = avgpool_after_conv\n self._reuse = False\n self._verbose = verbose\n\n if self._convolutional:\n conv_list = OrderedDict([])\n num_ch = [self._img_size[0]] + [self._num_channels*2**i for i in range(self._num_conv)]\n num_ch = [min(num_channels_max, ch) for ch in num_ch]\n for i in range(self._num_conv):\n conv_list.update({\n 'conv{}'.format(i+1): \n torch.nn.Conv2d(num_ch[i], num_ch[i+1], \n (3, 3), stride=2, padding=1)})\n if self._batch_norm:\n conv_list.update({\n 'bn{}'.format(i+1): \n torch.nn.BatchNorm2d(num_ch[i+1], momentum=0.001)})\n conv_list.update({'relu{}'.format(i+1): torch.nn.ReLU(inplace=True)})\n self.conv = torch.nn.Sequential(conv_list)\n self._num_layer_per_conv = len(conv_list) // self._num_conv\n\n if self._linear_before_rnn:\n linear_input_size = self.compute_input_size(\n 1, 3, 2, self.conv[self._num_layer_per_conv*(self._num_conv-1)].out_channels)\n rnn_input_size = 128\n else:\n if self._avgpool_after_conv:\n rnn_input_size = self.conv[self._num_layer_per_conv*(self._num_conv-1)].out_channels\n else:\n rnn_input_size = self.compute_input_size(\n 1, 3, 2, self.conv[self._num_layer_per_conv*(self._num_conv-1)].out_channels)\n else:\n rnn_input_size = int(input_size)\n\n if self._rnn_aggregation:\n if self._linear_before_rnn:\n self.linear = torch.nn.Linear(linear_input_size, rnn_input_size)\n self.relu_after_linear = torch.nn.ReLU(inplace=True)\n self.rnn = torch.nn.GRU(rnn_input_size, hidden_size,\n num_layers, bidirectional=self._bidirectional)\n embedding_input_size = hidden_size*(2 if self._bidirectional else 1)\n else:\n self.rnn = None\n embedding_input_size = hidden_size\n self.linear = torch.nn.Linear(rnn_input_size, embedding_input_size)\n self.relu_after_linear = torch.nn.ReLU(inplace=True)\n\n self._embeddings = torch.nn.ModuleList()\n for dim in embedding_dims:\n self._embeddings.append(torch.nn.Linear(embedding_input_size, dim))\n\n def compute_input_size(self, p, k, s, ch):\n current_img_size = self._img_size[1]\n for _ in range(self._num_conv):\n current_img_size = (current_img_size+2*p-k)//s+1\n return ch * int(current_img_size) ** 2\n\n def forward(self, task, params=None):\n if not self._reuse and self._verbose: print('='*8 + ' Emb Model ' + '='*8)\n if params is None:\n params = OrderedDict(self.named_parameters())\n\n if self._convolutional:\n x = task.x\n if not self._reuse and self._verbose: print('input size: {}'.format(x.size()))\n for layer_name, layer in self.conv.named_children():\n weight = params.get('conv.' + layer_name + '.weight', None)\n bias = params.get('conv.' + layer_name + '.bias', None)\n if 'conv' in layer_name:\n x = F.conv2d(x, weight=weight, bias=bias, stride=2, padding=1)\n elif 'relu' in layer_name:\n x = F.relu(x)\n elif 'bn' in layer_name:\n x = F.batch_norm(x, weight=weight, bias=bias,\n running_mean=layer.running_mean,\n running_var=layer.running_var,\n training=True)\n if not self._reuse and self._verbose: print('{}: {}'.format(layer_name, x.size()))\n if self._avgpool_after_conv:\n x = x.view(x.size(0), x.size(1), -1)\n if not self._reuse and self._verbose: print('reshape to: {}'.format(x.size()))\n x = torch.mean(x, dim=2)\n if not self._reuse and self._verbose: print('reduce mean: {}'.format(x.size()))\n\n else:\n x = task.x.view(task.x.size(0), -1)\n if not self._reuse and self._verbose: print('flatten: {}'.format(x.size()))\n else:\n x = task.x.view(task.x.size(0), -1)\n if not self._reuse and self._verbose: print('flatten: {}'.format(x.size()))\n\n if self._rnn_aggregation:\n # LSTM input dimensions are seq_len, batch, input_size\n batch_size = 1\n h0 = torch.zeros(self._num_layers*(2 if self._bidirectional else 1),\n batch_size, self._hidden_size, device=self._device)\n if self._linear_before_rnn: \n x = F.relu(self.linear(x))\n inputs = x.view(x.size(0), 1, -1)\n output, hn = self.rnn(inputs, h0)\n if self._bidirectional:\n N, B, H = output.shape\n output = output.view(N, B, 2, H // 2)\n embedding_input = torch.cat([output[-1, :, 0], output[0, :, 1]], dim=1)\n\n else:\n inputs = F.relu(self.linear(x).view(1, x.size(0), -1).transpose(1, 2))\n if not self._reuse and self._verbose: print('fc: {}'.format(inputs.size()))\n if self._embedding_pooling == 'max':\n embedding_input = F.max_pool1d(inputs, x.size(0)).view(1, -1)\n elif self._embedding_pooling == 'avg':\n embedding_input = F.avg_pool1d(inputs, x.size(0)).view(1, -1)\n else:\n raise NotImplementedError\n if not self._reuse and self._verbose: print('reshape after {}pool: {}'.format(\n self._embedding_pooling, embedding_input.size()))\n\n # randomly sample embedding vectors\n if not self._num_sample_embedding == 0:\n self._embeddings_array.append(embedding_input.cpu().clone().detach().numpy())\n if len(self._embeddings_array) >= self._num_sample_embedding:\n if self._sample_embedding_file.split('.')[-1] == 'hdf5':\n import h5py\n f = h5py.File(self._sample_embedding_file, 'w')\n f['embedding'] = np.squeeze(np.stack(self._embeddings_array))\n f.close()\n elif self._sample_embedding_file.split('.')[-1] == 'pt':\n torch.save(np.squeeze(np.stack(self._embeddings_array)),\n self._sample_embedding_file)\n else:\n raise NotImplementedError\n\n out_embeddings = []\n for i, embedding in enumerate(self._embeddings):\n embedding_vec = embedding(embedding_input)\n out_embeddings.append(embedding_vec)\n if not self._reuse and self._verbose: print('emb vec {} size: {}'.format(\n i+1, embedding_vec.size()))\n if not self._reuse and self._verbose: print('='*27)\n self._reuse = True\n return out_embeddings\n\n def to(self, device, **kwargs):\n self._device = device\n super(ConvEmbeddingModel, self).to(device, **kwargs)\n" ]
[ [ "torch.nn.Sequential", "torch.mean", "torch.nn.functional.batch_norm", "torch.zeros", "torch.cat", "torch.nn.ModuleList", "torch.nn.GRU", "torch.nn.functional.conv2d", "torch.nn.Conv2d", "numpy.stack", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
janakhpon/Datavisualization-plot
[ "1144cbc9cd16cd6ecaf996f6554ab039920e31ce" ]
[ "ex0001.py" ]
[ "import matplotlib.pyplot as plt\n\nyears = [1950, 1955, 1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2016, 2017, 2018, 2019]\npops = [0.00, 1.92, 2.14, 2.22, 2.37, 2.34, 2.26, 2.10, 1.71, 1.21, 1.25, 0.94, 0.67, 0.81, 0.69, 0.64, 0.61, 0.63]\ndeaths = [0.00, 0.77, 0.68, 0.54, 0.38, 0.23, 0.11, 0.02, 0.05, 0.12, 0.18, 0.25, 0.31, 0.35, 0.37, 0.38]\n\nlines= plt.plot(years, pops, deaths)\nplt.grid(True)\nplt.setp(lines, color=(.8,.6,1), marker=\"o\")\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.show", "matplotlib.pyplot.setp", "matplotlib.pyplot.grid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ZhaozhiQIAN/SyncTwin-NeurIPS-2021
[ "78eff91d0287c7f1f66c76ca24834c7d1029ad3b" ]
[ "sim/pkpd.py" ]
[ "import numpy as np\nimport numpy.random\nimport scipy.integrate\nimport torch\n\nfrom config import DEVICE\n\n\ndef f(t, y, Kin, K, O, H, D50): # noqa: E741\n P = y[0]\n R = y[1]\n D = y[2]\n\n dP = Kin[int(t)] - K * P\n dR = K * P - (D / (D + D50)) * K * R\n dD = O[int(t)] - H * D\n\n return [dP, dR, dD]\n\n\ndef solve(init, Kin, K, Os, H, D50, step=30):\n ode = scipy.integrate.ode(f).set_integrator(\"dopri5\")\n\n Ot = np.zeros(step + 1)\n if Os >= 0:\n Ot[Os:] = 1.0\n\n try:\n len(Kin)\n except Exception: # pylint: disable=broad-except\n Kin = np.ones(step + 1) * Kin\n\n ode.set_initial_value(init, 0).set_f_params(Kin, K, Ot, H, D50)\n t1 = step\n dt = 1\n\n res_list = []\n\n while ode.successful() and ode.t < t1:\n res = ode.integrate(ode.t + dt, ode.t + dt)\n res_list.append(res)\n\n res = np.stack(res_list, axis=-1)\n return res\n\n\ndef get_Kin(step=30, n_basis=12):\n # define Kin\n Kin_b_list = list()\n Kin_b_list.append(np.ones(step + 1))\n x = np.arange(step + 1) / step\n Kin_b_list.append(x)\n\n for i in range(n_basis - 2):\n bn = 2 * x * Kin_b_list[-1] - Kin_b_list[-2]\n Kin_b_list.append(bn)\n\n Kin_b = np.stack(Kin_b_list, axis=-1)\n\n Kin_list = [Kin_b[:, i] for i in range(n_basis)]\n return Kin_list, Kin_b\n\n\ndef get_clustered_Kin(Kin_b, n_cluster, n_sample_total):\n n_basis = Kin_b.shape[1]\n\n n_sample_cluster = n_sample_total // n_cluster\n if n_sample_total % n_cluster != 0:\n print(\"Warning: sample size not divisible by number of clusters\")\n\n # generate cluster masks\n mask_list = []\n for i in range(n_cluster):\n mask = np.zeros(n_basis)\n mask[i:-1:4] = 1.0\n mask_list.append(mask)\n\n Kin_list = []\n for mask in mask_list:\n for j in range(n_sample_cluster):\n Kin = np.matmul(Kin_b, numpy.random.randn(n_basis) * mask)\n Kin_list.append(Kin)\n\n Kin_b = np.stack(Kin_list, axis=-1)\n return Kin_list, Kin_b\n\n\ndef generate_data(Kin_list, K_list, P0_list, R0_list, train_step, H=0.1, D50=3, step=30):\n\n # K_list = [0.18, 0.28, 0.38]\n # K_list = [0.18]\n\n # P0_list = [0., 0.5, 1.]\n # P0_list = [0.5]\n\n # R0_list = [0.5, 1., 1.5]\n # R0_list = [0.5]\n\n control_res_list = []\n\n for Kin in Kin_list:\n for K in K_list:\n for P0 in P0_list:\n for R0 in R0_list:\n control_res = solve([P0, R0, 0.0], Kin, K, train_step, H, D50, step)\n control_res_list.append(control_res)\n\n control_res_arr = np.stack(control_res_list, axis=-1)\n # Dim, T, N\n # Dim = 3: precursor, Cholesterol, Statins concentration\n # slice on dim=1 to get the outcome of interest\n return control_res_arr\n\n\ndef get_covariate(\n control_Kin_b,\n treat_Kin_b,\n control_res_arr,\n treat_res_arr,\n step=30,\n train_step=25,\n device=DEVICE,\n noise=None,\n double_up=False,\n hidden_confounder=0,\n):\n n_units = control_res_arr.shape[-1] * 2 if double_up else control_res_arr.shape[-1]\n n_treated = treat_res_arr.shape[-1]\n\n covariates_control = np.concatenate([control_Kin_b[:step, :][None, :, :], control_res_arr], axis=0)\n covariates_treated = np.concatenate([treat_Kin_b[:step, :][None, :, :], treat_res_arr], axis=0)\n covariates = np.concatenate([covariates_control, covariates_treated], axis=2)\n\n covariates = torch.tensor(covariates, dtype=torch.float32)\n\n covariates = covariates.permute((1, 2, 0)).to(device)\n # remove the last covariate\n covariates = covariates[:, :, :3]\n\n # standardize\n m = covariates.mean(dim=(0, 1))\n sd = covariates.std(dim=(0, 1))\n covariates = (covariates - m) / sd\n\n if double_up:\n covariates_control = covariates[:, : (covariates.shape[1] // 2), :]\n covariates_twin = covariates_control + torch.randn_like(covariates_control) * 0.1\n covariates = torch.cat([covariates_twin, covariates], dim=1)\n\n if noise is not None:\n covariates = covariates + torch.randn_like(covariates) * noise\n\n n_units_total = n_units + n_treated\n\n pretreatment_time = train_step\n\n x_full = covariates[:pretreatment_time, :, :]\n if hidden_confounder == 1:\n x_full[:, :, 0] = 0\n if hidden_confounder == 2:\n x_full[:, :, 0] = 0\n x_full[:, :, 1] = 0\n y_full = covariates[pretreatment_time:, :, 2:3].detach().clone()\n y_full_all = covariates[pretreatment_time:, :, :]\n y_control = covariates[pretreatment_time:, :n_units, 2:3]\n\n t_full = torch.ones_like(x_full)\n mask_full = torch.ones_like(x_full)\n batch_ind_full = torch.arange(n_units_total).to(DEVICE)\n y_mask_full = (batch_ind_full < n_units) * 1.0\n return (\n (n_units, n_treated, n_units_total),\n x_full,\n t_full,\n mask_full,\n batch_ind_full,\n y_full,\n y_control,\n y_mask_full,\n y_full_all,\n m,\n sd,\n )\n\n\ndef get_treatment_effect(treat_res_arr, treat_counterfactual_arr, train_step, m, sd, device=DEVICE):\n m = m[2:3].item()\n sd = sd[2:3].item()\n\n treat_res_arr = (treat_res_arr - m) / sd\n treat_counterfactual_arr = (treat_counterfactual_arr - m) / sd\n\n return torch.tensor(treat_res_arr - treat_counterfactual_arr, device=device).permute((1, 2, 0))[train_step:, :, 1:2]\n" ]
[ [ "torch.randn_like", "torch.cat", "numpy.arange", "numpy.stack", "torch.tensor", "numpy.concatenate", "numpy.ones", "torch.arange", "numpy.zeros", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pdamiano-11/Team-4-Code
[ "39736f258ca14b96410d74e30e1f57d5e0fe18ba", "39736f258ca14b96410d74e30e1f57d5e0fe18ba" ]
[ "GMiguel/Project02.py", "src/UserStories/us30.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n\nimport pandas as pd\nimport datetime\nfrom tabulate import tabulate\n\ndef collectInputFile(gedcom_name):\n file = open(gedcom_name, \"r\")\n lines = []\n for line in file:\n lines.append(str(line))\n\n for idx, line in enumerate(lines):\n lines[idx] = line.replace('\\n', '').replace('@', '').replace('_MARNM', 'MARR')\n input_lines = ['0 NOTE Team-4-Project'] + lines\n\n return input_lines\n\ndef organizeInput(gedcom_name):\n input_lines = collectInputFile(gedcom_name)\n\n indexes = []\n for idx, line in enumerate(input_lines):\n lst = line.strip().split()\n if lst[0] == '0':\n indexes.append(idx)\n\n fam_split = []\n for n in range(len(indexes)):\n try:\n fam_split.append(' '.join(input_lines[indexes[n]:indexes[n+1]]))\n except:\n continue\n\n del fam_split[0:2]\n\n return fam_split\n\n\ndef createIndiList(gedcom_name):\n fam_split = organizeInput(gedcom_name)\n indi_list = []\n for text in fam_split:\n sub_text = text.strip().split()\n char = list(sub_text[1])\n if char[0] == 'I':\n indi_list.append(text)\n \n return indi_list\n\ndef createFamList(gedcom_name):\n fam_split = organizeInput(gedcom_name)\n fam_list = []\n for text in fam_split:\n sub_text = text.strip().split()\n char = list(sub_text[1])\n if char[0] == 'F':\n fam_list.append(text)\n\n return fam_list\n\n\ndef createIndividualsDataFrame(gedcom_name):\n indi_list = createIndiList(gedcom_name)\n\n individuals = pd.DataFrame(index = range(len(indi_list)), columns = \n ['ID', 'Name', 'Gender', 'Birthday', 'Age', \n 'Alive', 'Dead', 'Child', 'Spouse'])\n\n now = pd.to_datetime('now')\n\n for idx, indi in enumerate(indi_list):\n lst = indi.strip().split()\n individuals.ID[idx] = lst[1]\n \n if \"NAME\" in lst:\n i = lst.index(\"NAME\")\n individuals.Name[idx] = ' '.join(lst[i+1:i+3]).replace('/', '')\n \n if \"SEX\" in lst:\n i = lst.index(\"SEX\")\n individuals.Gender[idx] = lst[i+1]\n \n if \"BIRT\" in lst:\n i = lst.index(\"BIRT\")\n date_b = pd.to_datetime('-'.join(lst[i+3:i+6]))\n individuals.Birthday[idx] = date_b.strftime(\"%b-%d-%Y\")\n \n if \"DEAT\" in lst:\n i = lst.index(\"DEAT\")\n date_d = pd.to_datetime('-'.join(lst[i+4:i+7]))\n individuals.Dead[idx] = date_d.strftime(\"%b-%d-%Y\")\n individuals.Age[idx] = int((date_d - date_b).days/365)\n individuals.Alive[idx] = 'False'\n else:\n individuals.Alive[idx] = 'True'\n individuals.Age[idx] = int((now - date_b).days/365)\n \n if \"FAMC\" in lst:\n i = lst.index(\"FAMC\")\n individuals.Child[idx] = lst[i+1]\n \n if \"FAMS\" in lst:\n i = lst.index(\"FAMS\")\n individuals.Spouse[idx] = lst[i+1]\n\n return individuals\n\n\ndef createFamiliesDataFrame(gedcom_name):\n fam_list = createFamList(gedcom_name)\n individuals = createIndividualsDataFrame(gedcom_name)\n\n families = pd.DataFrame(index = range(len(fam_list)), \n columns = ['ID', 'Married', 'Divorced', 'Husband ID', \n 'Husband Name', 'Wife ID', 'Wife Name', 'Children'])\n\n for idx, fam in enumerate(fam_list):\n lst = fam.strip().split()\n families.ID[idx] = lst[1]\n \n if \"MARR\" in lst:\n i = lst.index(\"MARR\")\n date_d= pd.to_datetime('-'.join(lst[i+3:i+6]))\n families.Married[idx] = date_d.strftime(\"%b-%d-%Y\")\n \n div_case = lst.index(\"_CURRENT\")\n if lst[div_case+1] == \"N\":\n families.Divorced[idx] = \"True\"\n else:\n families.Divorced[idx] = \"False\"\n\n if 'HUSB' in lst:\n i = lst.index('HUSB')\n families['Husband ID'][idx] = lst[i+1]\n families['Husband Name'][idx] = list(individuals.Name[individuals.ID == lst[i+1]])[0]\n \n if 'WIFE' in lst:\n i = lst.index('WIFE')\n families['Wife ID'][idx] = lst[i+1]\n families['Wife Name'][idx] = list(individuals.Name[individuals.ID == lst[i+1]])[0]\n \n chil_ids = [idx for idx, val in enumerate(lst) if val in lst[:idx] and val == \"CHIL\"]\n chil_ids = [lst.index(\"CHIL\")] + chil_ids\n for n in range(len(chil_ids)):\n chil_ids[n] += 1\n chil_ids[n] = lst[chil_ids[n]]\n families.Children[idx] = chil_ids\n\n return families\n\n\ndef displayTable(gedcom_name):\n individuals = createIndividualsDataFrame(gedcom_name)\n families = createFamiliesDataFrame(gedcom_name)\n\n otp = open(\"Project03_Output.txt\", \"w\")\n otp.truncate(0)\n otp.write(\"Individuals: \\n\")\n otp.write(tabulate(individuals, headers='keys', tablefmt='psql'))\n otp.write(\"\\n\")\n otp.write(\"Families: \\n\")\n otp.write(tabulate(families, headers='keys', tablefmt='psql'))\n otp.close()\n\ndef displayOutput(gedcom_name):\n supported = ['INDI', '0 NOTE', '0 HEAD', '0 TRLR', 'FAM', '1 NAME', '1 SEX', '1 BIRT', '1 DEAT', '1 FAMC', \n '1 FAMS', '1 MARR', '1 HUSB', '1 WIFE', '1 CHIL', '1 DIV', '2 DATE']\n\n output_lines = []\n input_lines = collectInputFile(gedcom_name)\n for line in input_lines:\n if any(s in line for s in supported):\n t = line.strip().split()\n if 'INDI' in t:\n idx = t.index('INDI')\n output_lines.append('|'.join([t[0]] + [t[idx]] + ['Y'] + [' '.join(t[1:idx])]))\n elif 'FAM' in t:\n idx = t.index('FAM')\n output_lines.append('|'.join([t[0]] + [t[idx]] + ['Y'] + [' '.join(t[1:idx])]))\n else:\n output_lines.append('|'.join(t[0:2] + ['Y'] + [' '.join(t[2:])]))\n else:\n t = line.strip().split()\n output_lines.append('|'.join(t[0:2] + ['N'] + [' '.join(t[2:])]))\n\n\n otp = open(\"outputProject02.txt\", \"w\")\n for n in range(len(input_lines)):\n otp.write(\"\\n\" + \"--> \" + input_lines[n])\n otp.write(\"\\n\" + \"<-- \" + output_lines[n] + \"\\n\")\n otp.close()\n\nif __name__ == '__main__':\n file_name = input(\"Enter the name of the GEDCOM file: \")\n displayTable(file_name)\n\n\n\n\n\n", "''' Author: Samantha Inneo\r\n Sprint: Sprint 1\r\n User Story: List all living married people in a GEDCOM file\r\n '''\r\nimport datetime\r\nimport unittest\r\n\r\nimport sys\r\nsys.path.append(\"c:\\\\Users\\\\Stevens User\\\\555_Team_4\\\\Team-4-Code\\\\src\")\r\nimport Project02\r\nimport pandas as pd\r\n\r\n\r\ndef listLivingMarried(gedcom_name):\r\n # iterate through dataframe and check if current individual is married\r\n # if not, continute to next individual\r\n # if they are, check if they are alive\r\n #if they are, add to list\r\n # if not, continue\r\n #return list\r\n living_married = []\r\n individuals = Project02.createIndividualsDataFrame(gedcom_name)\r\n for ind, row in individuals.iterrows():\r\n if(type(row['Spouse']) == float and pd.isna(row['Spouse'])):\r\n pass\r\n elif (individuals['Alive'][ind] == 'True'):\r\n living_married.append(individuals['Name'][ind])\r\n # print(individuals['Name'][ind])\r\n return living_married\r\n \r\n# # listLivingMarried(\"testing.ged\")\r\n# '''testing'''\r\n# class Inneo_Tests_HW4(unittest.TestCase):\r\n# def test1(self):\r\n# filename = \"testing.ged\" \r\n# ret = ['John Smith', 'Susan Jones', 'Frank Jones', 'Emily Michaels', 'Bernard Smith', 'Theresa Kelly', 'Kevin Brown', 'Diane Brown']\r\n# self.assertEqual(listLivingMarried(filename),ret)\r\n\r\n\r\n# if __name__ == '__main__':\r\n# unittest.main() " ]
[ [ "pandas.to_datetime" ], [ "pandas.isna" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
lazar505/transformer-xl
[ "76668624cbf5233fd02883701ffad0446f62de7b" ]
[ "run_web_transformer.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import flags\nimport bottle\nimport io\nimport math\nimport numpy as np\nimport os\nimport pickle\nimport random\nfrom scipy.special import softmax\nimport spacy\nimport sys\nimport tensorflow as tf\n\nimport data_utils\nimport model\n\n\nos.environ[\"TF_FORCE_GPU_ALLOW_GROWTH\"] = \"true\"\n\n# GPU config\nflags.DEFINE_integer(\"num_core_per_host\", default=8,\n help=\"Number of cores per host\")\n\n# Experiment (data/checkpoint/directory) config\nflags.DEFINE_string(\"data_dir\", default=\"\",\n help=\"Path to tf-records directory.\")\nflags.DEFINE_string(\"record_info_dir\", default=\"\",\n help=\"Path to local directory containing filenames.txt.\")\nflags.DEFINE_string(\"corpus_info_path\", default=\"\",\n help=\"Path to corpus-info.json file.\")\nflags.DEFINE_string(\"model_dir\", default=None,\n help=\"Estimator model_dir.\")\nflags.DEFINE_string(\"eval_ckpt_path\", None,\n help=\"Checkpoint path for do_test evaluation.\"\n \"If set, model_dir will be ignored.\"\n \"If unset, will use the latest ckpt in model_dir.\")\n\n# Training config\nflags.DEFINE_integer(\"eval_batch_size\", default=60,\n help=\"Size of valid batch.\")\n\n# Model config\nflags.DEFINE_integer(\"tgt_len\", default=70,\n help=\"Number of steps to predict\")\nflags.DEFINE_integer(\"mem_len\", default=70,\n help=\"Number of steps to cache\")\nflags.DEFINE_bool(\"same_length\", default=False,\n help=\"Same length attention\")\nflags.DEFINE_integer(\"clamp_len\", default=-1,\n help=\"Clamp length\")\n\nflags.DEFINE_integer(\"n_layer\", default=6,\n help=\"Number of layers.\")\nflags.DEFINE_integer(\"d_model\", default=500,\n help=\"Dimension of the model.\")\nflags.DEFINE_integer(\"d_embed\", default=500,\n help=\"Dimension of the embeddings.\")\nflags.DEFINE_integer(\"n_head\", default=10,\n help=\"Number of attention heads.\")\nflags.DEFINE_integer(\"d_head\", default=50,\n help=\"Dimension of each attention head.\")\nflags.DEFINE_integer(\"d_inner\", default=1000,\n help=\"Dimension of inner hidden size in positionwise feed-forward.\")\nflags.DEFINE_float(\"dropout\", default=0.1,\n help=\"Dropout rate.\")\nflags.DEFINE_float(\"dropatt\", default=0.1,\n help=\"Attention dropout rate.\")\nflags.DEFINE_bool(\"untie_r\", default=False,\n help=\"untie r_w_bias and r_r_bias\")\n\n# Adaptive Softmax / Embedding\nflags.DEFINE_integer(\"div_val\", default=1,\n help=\"Divide the embedding size by this val for each bin\")\nflags.DEFINE_bool(\"proj_share_all_but_first\", default=False,\n help=\"True to share all but first projs, False not to share.\")\nflags.DEFINE_bool(\"proj_same_dim\", default=True,\n help=\"Project the bin with the same dimension.\")\n\n# Parameter initialization\nflags.DEFINE_enum(\"init\", default=\"normal\",\n enum_values=[\"normal\", \"uniform\"],\n help=\"Initialization method.\")\nflags.DEFINE_float(\"init_std\", default=0.02,\n help=\"Initialization std when init is normal.\")\nflags.DEFINE_float(\"proj_init_std\", default=0.01,\n help=\"Initialization std for embedding projection.\")\nflags.DEFINE_float(\"init_range\", default=0.1,\n help=\"Initialization std when init is uniform.\")\n\n\nFLAGS = flags.FLAGS\nFLAGS(sys.argv)\n\n\ndef get_model_fn(n_token, cutoffs):\n def model_fn(inp, tgt, mems, is_training, generate_text, prev_text):\n if generate_text == False:\n inp = tf.transpose(inp, [1, 0])\n tgt = tf.transpose(tgt, [1, 0])\n\n if FLAGS.init == \"uniform\":\n initializer = tf.initializers.random_uniform(\n minval=-FLAGS.init_range,\n maxval=FLAGS.init_range,\n seed=None)\n elif FLAGS.init == \"normal\":\n initializer = tf.initializers.random_normal(\n stddev=FLAGS.init_std,\n seed=None)\n proj_initializer = tf.initializers.random_normal(\n stddev=FLAGS.proj_init_std,\n seed=None)\n\n tie_projs = [False for _ in range(len(cutoffs) + 1)]\n if FLAGS.proj_share_all_but_first:\n for i in range(1, len(tie_projs)):\n tie_projs[i] = True\n\n loss, new_mems, t_output = model.transformer(\n dec_inp=inp,\n target=tgt,\n mems=mems,\n n_token=n_token,\n n_layer=FLAGS.n_layer,\n d_model=FLAGS.d_model,\n d_embed=FLAGS.d_embed,\n n_head=FLAGS.n_head,\n d_head=FLAGS.d_head,\n d_inner=FLAGS.d_inner,\n dropout=FLAGS.dropout,\n dropatt=FLAGS.dropatt,\n initializer=initializer,\n proj_initializer=proj_initializer,\n is_training=is_training,\n generate_text=generate_text,\n prev_text=prev_text,\n mem_len=FLAGS.mem_len,\n cutoffs=cutoffs,\n div_val=FLAGS.div_val,\n tie_projs=tie_projs,\n input_perms=None,\n target_perms=None,\n head_target=None,\n same_length=FLAGS.same_length,\n clamp_len=FLAGS.clamp_len,\n untie_r=FLAGS.untie_r,\n proj_same_dim=FLAGS.proj_same_dim)\n\n # number of parameters\n num_params = sum([np.prod(v.shape) for v in tf.compat.v1.trainable_variables()])\n tf.compat.v1.logging.info('#params: {}'.format(num_params))\n\n # format_str = '{{:<{0}s}}\\t{{}}'.format(\n # max([len(v.name) for v in tf.compat.v1.trainable_variables()]))\n # for v in tf.compat.v1.trainable_variables():\n # tf.compat.v1.logging.info(format_str.format(v.name, v.get_shape()))\n\n if is_training:\n all_vars = tf.compat.v1.trainable_variables()\n grads = tf.gradients(loss, all_vars)\n grads_and_vars = list(zip(grads, all_vars))\n\n return loss, new_mems, grads_and_vars\n elif generate_text:\n return loss, new_mems, t_output\n else:\n return loss, new_mems, inp\n\n return model_fn\n\n\ndef single_core_graph(n_token, cutoffs, is_training, inp, tgt, mems, generate_text=False, prev_text=None):\n model_fn = get_model_fn(\n n_token=n_token,\n cutoffs=cutoffs)\n\n model_ret = model_fn(\n inp=inp,\n tgt=tgt,\n mems=mems,\n is_training=is_training,\n generate_text=generate_text,\n prev_text=prev_text)\n\n return model_ret\n\ndef generate_text(n_token, cutoffs, ps_device, vocab, text_length=100, word_range=3):\n per_core_bsz = FLAGS.eval_batch_size // FLAGS.num_core_per_host\n tower_mems, tower_losses, tower_new_mems = [], [], []\n\n for i in range(FLAGS.num_core_per_host):\n with tf.device(\"/device:CPU:0\"), \\\n tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope(), reuse=tf.AUTO_REUSE):\n mems_i = [tf.compat.v1.placeholder(tf.float32,\n [FLAGS.mem_len, per_core_bsz, FLAGS.d_model])\n for _ in range(FLAGS.n_layer)]\n\n prev_text_tf = tf.compat.v1.placeholder(tf.int64,\n [FLAGS.tgt_len, 1])\n\n loss_i, new_mems_i, t_output = single_core_graph(\n n_token=n_token,\n cutoffs=cutoffs,\n is_training=False,\n inp=None,\n tgt=None,\n mems=mems_i,\n generate_text=True,\n prev_text=prev_text_tf)\n\n tower_mems.append(mems_i)\n tower_losses.append(loss_i)\n tower_new_mems.append(new_mems_i)\n\n ## sum losses across towers\n if len(tower_losses) > 1:\n loss = tf.add_n(tower_losses) / len(tower_losses)\n else:\n loss = tower_losses[0]\n\n tower_mems_np = [\n [np.zeros([FLAGS.mem_len, per_core_bsz, FLAGS.d_model], dtype=np.float32)\n for layer in range(FLAGS.n_layer)]\n for core in range(FLAGS.num_core_per_host)\n ]\n\n start_text = vocab.encode_file(\"temp_input_text.txt\", ordered=True)\n\n prev_text_np = []\n start_en_dec = []\n\n for i in start_text[-FLAGS.tgt_len:]:\n prev_text_np.append([i])\n start_en_dec.append(vocab.get_sym(i))\n\n start_en_dec = ' '.join(start_en_dec)\n\n text = []\n\n saver = tf.compat.v1.train.Saver()\n\n with tf.compat.v1.Session(\n config=tf.compat.v1.ConfigProto(allow_soft_placement=True, device_count={'GPU': 0})) as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n\n if FLAGS.eval_ckpt_path is None:\n eval_ckpt_path = tf.compat.v1.train.latest_checkpoint(FLAGS.model_dir)\n else:\n eval_ckpt_path = FLAGS.eval_ckpt_path\n tf.compat.v1.logging.info(\"Model: {}\".format(eval_ckpt_path))\n saver.restore(sess, eval_ckpt_path)\n\n fetches = [loss, tower_new_mems, t_output]\n\n print(\"Generating text...\")\n\n step = 0\n while step < text_length:\n feed_dict = {}\n for i in range(FLAGS.num_core_per_host):\n for m, m_np in zip(tower_mems[i], tower_mems_np[i]):\n feed_dict[m] = m_np\n feed_dict[prev_text_tf] = prev_text_np\n\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n sm_prob = fetched[2]\n sm_prob = softmax(sm_prob[-1, 0, :])\n sorted_indices = np.argsort(sm_prob)\n\n # random word choice from the set of tokens with the highest probabilities\n # If we put the most probable word, repetition of the same phrases can occur\n # word_range = 3\n next_word_index = int(math.floor(random.uniform(1, word_range + 1)))\n next_word = sorted_indices[-next_word_index]\n text.append(vocab.get_sym(next_word))\n prev_text_np = np.append(prev_text_np[1:], [next_word]).reshape((FLAGS.tgt_len, 1))\n loss_np, tower_mems_np = fetched[:2]\n\n if vocab.get_sym(next_word) != \"<eos>\":\n step = step + 1\n\n stext = ' '.join(text)\n return start_en_dec, stext\n\n\ninput_page = \"\"\"\n<h3>\n <center>\n Legal Text Generation with Transformers\n</h3>\n<h4>\n <center>\n Lazar Peric, Stefan Mijic, Dominik Stammbach and Elliott Ash<br>\n ETH Zurich<br>\n</h4>\n\n<form action=\"/transformer\" method=\"post\">\n Text length: <input name=\"text_length\" type=\"text\" value=\"100\" />\n Randomness: <input name=\"randomness\" type=\"text\" value=\"3\" /><br><br>\n Input text:<br>\n <textarea rows=\"18\" style=\"width:90%;\" name=\"input_text\">Law is a bottomless pit.</textarea><br>\n <input value=\"Generate\" type=\"submit\" />\n</form>\n\"\"\"\n\noutput_page = \"\"\"\n<h3>\n <center>\n Legal Text Generation with Transformers\n</h3>\n<h4>\n <center>\n Lazar Peric, Stefan Mijic, Dominik Stammbach and Elliott Ash<br>\n ETH Zurich<br>\n</h4>\nInput text:<br>\n{{input_text}}<br><br>\nOutput text:<br>\n{{output_text}}\n\"\"\"\n\n\[email protected]('/transformer')\ndef get_transformer_text():\n return input_page\n\n\[email protected]('/transformer')\ndef generate_new_text():\n original_text = bottle.request.forms.get('input_text')\n\n def process_text(text, process=False):\n if process:\n text = text.replace(u\" , \", u\", \")\n text = text.replace(u\" . \", u\". \")\n text = text.replace(u\" ; \", u\"; \")\n text = text.replace(u\" : \", u\": \")\n text = text.replace(u\" ( \", u\" (\")\n text = text.replace(u\" ) \", u\") \")\n text = text.replace(u\"<eos>\", u\" \")\n text = text.replace(u\" 's \", u\"'s \")\n while text.count(u\" \") > 0:\n text = text.replace(u\" \", u\" \")\n return text\n\n text_length = abs(int(bottle.request.forms.get(\"text_length\")))\n word_range = abs(int(bottle.request.forms.get(\"randomness\")))\n\n if word_range == 0:\n word_range = 1\n\n print(\"Processing text...\")\n doc = nlp(original_text)\n text_out = []\n for sent in doc.sents:\n for token in sent:\n text_out.append(token.text)\n if len(text_out) < test_tgt_len:\n text_out = [\"<unk>\"] * (test_tgt_len - len(text_out)) + text_out\n text_out = u\" \" + ' '.join(text_out)\n while text_out.count(u\" \") > 0:\n text_out = text_out.replace(u\" \", u\" \")\n\n file_o = io.open(\"temp_input_text.txt\", \"w\", encoding='utf8')\n file_o.write(text_out)\n file_o.close()\n\n input_text, output_text = generate_text(n_token, cutoffs, \"/gpu:0\", vocab, text_length=text_length,\n word_range=word_range)\n\n os.remove(\"temp_input_text.txt\")\n\n input_text = process_text(original_text, process=True)\n output_text =process_text(output_text, process=True)\n return bottle.template(output_page, input_text=original_text, output_text=output_text)\n\ndef main(unused_argv):\n del unused_argv # Unused\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n # chech: http://localhost:8787/transformer\n bottle.run(host='0.0.0.0', port=8787, reloader=True)\n\n\nprint(\"Loading Spacy...\")\nnlp = spacy.load('en_core_web_lg')\nprint(\"Loading corpus info...\")\ncorpus_info = data_utils.get_corpus_info(FLAGS.corpus_info_path)\nn_token = corpus_info[\"vocab_size\"]\ncutoffs = corpus_info[\"cutoffs\"][1:-1]\ntest_tgt_len = FLAGS.tgt_len\nvocab_path = os.path.join(FLAGS.data_dir, \"cache_vocab.pkl\")\nprint(\"Loading cached vocabulary...\")\nwith open(vocab_path, \"rb\") as fp:\n vocab = pickle.load(fp)\n\ntf.compat.v1.app.run()\n" ]
[ [ "tensorflow.device", "tensorflow.compat.v1.train.Saver", "scipy.special.softmax", "tensorflow.add_n", "tensorflow.compat.v1.app.run", "tensorflow.gradients", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.get_variable_scope", "numpy.zeros", "tensorflow.initializers.random_normal", "numpy.append", "tensorflow.initializers.random_uniform", "numpy.argsort", "tensorflow.compat.v1.train.latest_checkpoint", "tensorflow.compat.v1.ConfigProto", "tensorflow.transpose", "tensorflow.compat.v1.logging.set_verbosity", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.placeholder", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.9", "1.5", "1.2", "1.7", "1.3", "1.8" ], "tensorflow": [] } ]
kim-jiyoon/aimd
[ "6616528cb4cde0a92c3336b2f109a1160e1eb271" ]
[ "aimd/diffusion.py" ]
[ "# coding: utf-8\n# Copyright (c) MoGroup at UMD.\n# Distributed under the terms of the MIT License.\n\nfrom __future__ import division, unicode_literals\nimport numpy as np\nfrom monty.json import MSONable\nfrom scipy import stats\nfrom scipy.optimize import curve_fit\nfrom pymatgen.io.vasp.outputs import Vasprun\nfrom pymatgen.util.coord import pbc_diff\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.core.periodic_table import Specie\nimport csv\n\n__author__ = \"Xingfeng He\"\n__email__ = \"[email protected]\"\n__version__ = \"0.1\"\n__date__ = \"6/6/2017\"\n__credit__ = \"Pymatgen Development Team\"\n\n\nclass DiffusivityAnalyzer(MSONable):\n def __init__(self, structure, displacements, specie, temperature,\n time_step, step_skip, time_intervals_number=1000,\n spec_dict=None):\n \"\"\"\n Calculate MSD from pre-processed data, and implemented linear fitting to obtain diffusivity.\n\n :param structure (Structure): initial structure\n :param displacements (np.array): numpy array, shape is [n_ions, n_steps, axis]\n :param specie (str): species string, can be Li or Li+, make sure structure has oxidation\n state accordingly.\n :param temperature (float): temperature of MD\n :param time_step (float): time step in MD\n :param step_skip (int): Sampling frequency of the displacements (\n time_step is multiplied by this number to get the real time\n between measurements)\n :param time_intervals_number (int): number of time intervals. Default is 1000\n means there are ~1000 time intervals.\n :param spec_dict (dict): spec dict of linear fitting. Default is\n {'lower_bound': 4.5, 'upper_bound': 0.5, 'minimum_msd_diff': 4.5}\n lower_bound is in unit of Angstrom square\n upper_bound is in unit of total time. 0.5 means upper fitting bound is 0.5*t_total\n minimum_msd_diff is in unit of Angstrom square. msd[upper_bound] - msd[lower_bound] should larger\n than minimum_msd_diff to do linear fitting.\n \"\"\"\n spec_dict = spec_dict if spec_dict is not None else {'lower_bound': 4.5, 'upper_bound': 0.5,\n 'minimum_msd_diff': 4.5}\n if not {'lower_bound', 'upper_bound', 'minimum_msd_diff'} <= set(spec_dict.keys()):\n raise Exception(\"spec_dict does not have enough parameters.\")\n time_step_displacements = time_step * step_skip\n # prepare\n indices = []\n framework_indices = []\n for i, site in enumerate(structure):\n if site.specie.symbol == specie:\n indices.append(i)\n else:\n framework_indices.append(i)\n if len(indices) == 0:\n raise Exception(\"There is no specie {} in the structure\".format(specie))\n if len(framework_indices) == 0:\n dc = displacements\n else:\n framework_disp = displacements[framework_indices]\n drift = np.average(framework_disp, axis=0)[None, :, :]\n dc = displacements - drift\n df = structure.lattice.get_fractional_coords(dc)\n displacements_final_diffusion_ions = dc[indices]\n displacements_frac_final_diffusion_ions = df[indices]\n n_ions, n_steps, dim = displacements_final_diffusion_ions.shape\n\n # time intervals, dt\n dt_indices = np.arange(1, n_steps, max(int((n_steps - 1) / time_intervals_number), 1))\n dt = dt_indices * time_step_displacements\n\n # calculate msd\n # define functions, algorithm from\n # http://stackoverflow.com/questions/34222272/computing-mean-square-displacement-using-python-and-fft\n def autocorrelation_fft(x):\n N = x.shape[0]\n F = np.fft.fft(x, n=2 * N)\n PSD = F * F.conjugate()\n res = np.fft.ifft(PSD)\n res = (res[:N]).real\n n = N * np.ones(N) - np.arange(N)\n return res / n\n\n def one_ion_msd_fft(r, dt_indices):\n \"\"\"\n r (np.array, shape is typically [n_step,3], n_step is number of steps, 3 is 3 dimentions)\n \"\"\"\n # ------------ S1\n n_step, dim = r.shape\n r_square = np.square(r)\n r_square = np.append(r_square, np.zeros((1, dim)), axis=0) # (n_step+1, 3)\n S1_component = np.zeros((dim, n_step)) # (dim, n_step)\n r_square_sum = 2 * np.sum(r_square, axis=0) # (3)\n for i in range(n_step):\n r_square_sum = r_square_sum - r_square[i - 1, :] - r_square[n_step - i, :]\n S1_component[:, i] = r_square_sum / (n_step - i)\n S1 = np.sum(S1_component, axis=0)\n\n # ------------ S2\n S2_component = np.array([autocorrelation_fft(r[:, i]) for i in range(r.shape[1])]) # (dim, N)\n S2 = np.sum(S2_component, axis=0)\n\n # ------------ return\n return (S1 - 2 * S2)[dt_indices], (S1_component - 2 * S2_component)[:, dt_indices]\n\n n_dt = len(dt_indices)\n msd_by_ions = np.empty([0, n_dt]) # shape of n_ions * n_dt\n msd_component_by_ions = np.empty([3, 0, n_dt]) # shape of 3 * n_ions * n_dt\n for i in range(n_ions):\n msd_i, msd_component_i = one_ion_msd_fft(displacements_final_diffusion_ions[i, :, :], dt_indices)\n msd_by_ions = np.append(msd_by_ions,\n msd_i.reshape(1, n_dt),\n axis=0)\n msd_component_by_ions = np.append(msd_component_by_ions,\n msd_component_i.reshape(3, 1, n_dt),\n axis=1)\n msd = np.average(msd_by_ions, axis=0)\n msd_component = np.average(msd_component_by_ions, axis=1)\n\n # further things, 1. determine lower_index, upper_index 2. linear fitting, 3. error bar\n\n # one headache, how about error in different axis\n lower_bound_index = len(msd[msd < spec_dict['lower_bound']])\n upper_bound_index = int(len(msd) * spec_dict['upper_bound']) - 1\n \"\"\"\n if lower_bound_index >= upper_bound_index - 2:\n raise Exception(\"Maximum MSD is {:.2f}. \".format(max(msd)) + \\\n \"MSD array has shape of {}. \".format(msd.shape) + \\\n \"Lower bound index is {}, upper bound index is {}. \".format(lower_bound_index,\n upper_bound_index) + \\\n \"There is no enough data to fit. \" + \\\n \"Please consider extending your MD simulation or increasing the temperature.\")\n\n if msd[upper_bound_index] - msd[lower_bound_index] < spec_dict['minimum_msd_diff']:\n raise Exception(\n \"Maximum MSD is {:.2f}. \".format(max(msd)) + \\\n \"MSD at lower bound is {:.2f}, MSD at upper bound is {:.2f}. The MSD fitting range is too small. \" \\\n .format(msd[lower_bound_index], msd[upper_bound_index]) + \\\n \"Please consider extending your MD simulation or increasing the temperature.\")\n \"\"\"\n if lower_bound_index >= upper_bound_index - 2 or \\\n msd[upper_bound_index] - msd[lower_bound_index] < spec_dict['minimum_msd_diff']:\n slope = -1\n intercept = -1\n slope_components = np.zeros(dim)\n else:\n slope, intercept, _, _, _ = stats.linregress(dt[lower_bound_index:upper_bound_index + 1],\n msd[lower_bound_index:upper_bound_index + 1])\n slope_components = np.zeros(dim)\n for i in range(dim):\n s, _, _, _, _ = stats.linregress(dt[lower_bound_index:upper_bound_index + 1],\n msd_component[i, :][lower_bound_index:upper_bound_index + 1])\n slope_components[i] = s\n\n self.structure = structure\n self.indices = indices\n self.framework_indices = framework_indices\n self.drift = drift\n self.drift_maximum = np.max(np.abs(drift), axis=1)[0] # the maximum drift vector of the framework ions, shape is (3,)\n self.disp = displacements\n self.displacements_final_diffusion_ions = displacements_final_diffusion_ions\n self.specie = specie\n self.temperature = temperature\n self.time_step = time_step\n self.step_skip = step_skip\n self.time_step_displacements = time_step_displacements\n self.time_intervals_number = time_intervals_number\n self.spec_dict = spec_dict\n if len(framework_indices) == 0:\n self.max_framework_displacement = 0.0\n else:\n self.max_ion_displacements = np.max(np.sum(\n dc ** 2, axis=-1) ** 0.5, axis=1)\n self.max_framework_displacement = \\\n np.max(self.max_ion_displacements[framework_indices])\n\n self.dt = dt\n self.lower_bound = spec_dict['lower_bound']\n self.upper_bound = spec_dict['upper_bound']\n self.lower_bound_index = lower_bound_index\n self.upper_bound_index = upper_bound_index\n\n self.msd = msd\n self.msd_by_ions = msd_by_ions\n self.msd_component = msd_component\n self.diffusivity = slope / (20 * dim)\n self.diffusivity_components = slope_components / 20\n\n def get_summary_dict(self, oxidized_specie=None):\n \"\"\"\n A summary of information\n :param oxidized_specie (str): specie string with oxidation state. If provided or specie in initial\n function is oxidized, it will calculate conductivity based on nernst-einstein relationship.\n :return: dict of diffusion information\n keys: D, D_components, specie, step_skip, temperature, msd, msd_component, dt, time_intervals_number\n spec_dict\n \"\"\"\n d = {\"diffusivity\": self.diffusivity,\n \"diffusivity_components\": self.diffusivity_components,\n \"specie\": self.specie,\n \"step_skip\": self.step_skip,\n \"temperature\": self.temperature,\n \"msd\": self.msd,\n \"msd_component\": self.msd_component,\n \"dt\": self.dt,\n \"time_intervals_number\": self.time_intervals_number,\n \"spec_dict\": self.spec_dict,\n \"drift_maximum\": self.drift_maximum,\n \"max_framework_displacement\": self.max_framework_displacement\n }\n oxi = False\n if oxidized_specie:\n df_sp = Specie.from_string(oxidized_specie)\n oxi = True\n else:\n try:\n df_sp = Specie.from_string(self.specie)\n oxi = True\n except:\n pass\n if oxi:\n factor = get_conversion_factor(self.structure, df_sp, self.temperature)\n d['conductivity'] = factor * self.diffusivity\n d['conductivity_components'] = factor * self.diffusivity_components\n d['conversion_factor'] = factor\n d['oxidation_state'] = df_sp.oxi_state\n return d\n\n @classmethod\n def from_structures(cls, structures, specie, temperature,\n time_step, step_skip, time_intervals_number=1000,\n spec_dict=None):\n \"\"\"\n Convenient constructor that takes in a list of Structure objects to\n perform diffusion analysis.\n\n :param structures ([Structure]): list of Structure objects:\n :param specie (str): species string, like Li, Li+\n :param temperature (float): temperature of MD\n :param time_step (float): time step in MD\n :param step_skip (int): Sampling frequency of the displacements (\n time_step is multiplied by this number to get the real time\n between measurements)\n :param time_intervals_number (int): number of time intervals. Default is 1000\n means there are ~1000 time intervals.\n :param spec_dict (dict): spec dict of linear fitting. Default is\n {'lower_bound': 4.5, 'upper_bound': 0.5, 'minimum_msd_diff': 4.5}\n lower_bound is in unit of Angstrom square\n upper_bound is in unit of total time. 0.5 means upper fitting bound is 0.5*t_total\n minimum_msd_diff is in unit of Angstrom square. msd[upper_bound] - msd[lower_bound] should larger\n than minimum_msd_diff to do linear fitting.\n \"\"\"\n p = []\n for i, s in enumerate(structures):\n if i == 0:\n structure = s\n p.append(np.array(s.frac_coords)[:, None])\n\n p.insert(0, p[0])\n p = np.concatenate(p, axis=1)\n dp = p[:, 1:] - p[:, :-1]\n dp = dp - np.round(dp)\n f_disp = np.cumsum(dp, axis=1)\n disp = structure.lattice.get_cartesian_coords(f_disp)\n\n return cls(structure, disp, specie, temperature,\n time_step, step_skip=step_skip,\n time_intervals_number=time_intervals_number,\n spec_dict=spec_dict)\n\n @classmethod\n def from_vaspruns(cls, vaspruns, specie,\n time_intervals_number=1000,\n spec_dict=None):\n \"\"\"\n Convenient constructor that takes in a list of Vasprun objects to\n perform diffusion analysis.\n\n :param vaspruns ([Vasprun]): List of Vaspruns (ordered):\n :param specie (str): species string, like Li, Li+\n :param time_intervals_number (int): number of time intervals. Default is 1000\n means there are ~1000 time intervals.\n :param spec_dict (dict): spec dict of linear fitting. Default is\n {'lower_bound': 4.5, 'upper_bound': 0.5, 'minimum_msd_diff': 4.5}\n lower_bound is in unit of Angstrom square\n upper_bound is in unit of total time. 0.5 means upper fitting bound is 0.5*t_total\n minimum_msd_diff is in unit of Angstrom square. msd[upper_bound] - msd[lower_bound] should larger\n than minimum_msd_diff to do linear fitting.\n \"\"\"\n\n def get_structures(vaspruns):\n for i, vr in enumerate(vaspruns):\n if i == 0:\n step_skip = vr.ionic_step_skip or 1\n final_structure = vr.initial_structure\n temperature = vr.parameters['TEEND']\n time_step = vr.parameters['POTIM']\n yield step_skip, temperature, time_step\n # check that the runs are continuous\n fdist = pbc_diff(vr.initial_structure.frac_coords,\n final_structure.frac_coords)\n if np.any(fdist > 0.001):\n raise ValueError('initial and final structures do not '\n 'match.')\n final_structure = vr.final_structure\n\n assert (vr.ionic_step_skip or 1) == step_skip\n for s in vr.ionic_steps:\n yield s['structure']\n\n s = get_structures(vaspruns)\n step_skip, temperature, time_step = next(s)\n\n return cls.from_structures(structures=s, specie=specie,\n temperature=temperature, time_step=time_step, step_skip=step_skip,\n time_intervals_number=time_intervals_number, spec_dict=spec_dict)\n\n @classmethod\n def from_files(cls, filepaths, specie, step_skip=10, ncores=None,\n time_intervals_number=1000,\n spec_dict=None):\n \"\"\"\n Convenient constructor that takes in a list of vasprun.xml paths to\n perform diffusion analysis.\n\n :param filepaths ([str]): List of paths to vasprun.xml files of runs, ordered.\n :param specie (str): species string, like Li, Li+\n :param step_skip (int): Sampling frequency of the displacements (\n time_step is multiplied by this number to get the real time\n between measurements)\n :param ncores (int): Numbers of cores to use for multiprocessing. Can\n speed up vasprun parsing considerably. Defaults to None,\n which means serial. It should be noted that if you want to\n use multiprocessing, the number of ionic steps in all vasprun\n .xml files should be a multiple of the ionic_step_skip.\n Otherwise, inconsistent results may arise. Serial mode has no\n such restrictions.\n :param time_intervals_number (int): number of time intervals. Default is 1000\n means there are ~1000 time intervals.\n :param spec_dict (dict): spec dict of linear fitting. Default is\n {'lower_bound': 4.5, 'upper_bound': 0.5, 'minimum_msd_diff': 4.5}\n lower_bound is in unit of Angstrom square\n upper_bound is in unit of total time. 0.5 means upper fitting bound is 0.5*t_total\n minimum_msd_diff is in unit of Angstrom square. msd[upper_bound] - msd[lower_bound] should larger\n than minimum_msd_diff to do linear fitting.\n \"\"\"\n if ncores is not None and len(filepaths) > 1:\n import multiprocessing\n p = multiprocessing.Pool(ncores)\n vaspruns = p.imap(_get_vasprun,\n [(fp, step_skip) for fp in filepaths])\n analyzer = cls.from_vaspruns(vaspruns, specie=specie,\n time_intervals_number=time_intervals_number,\n spec_dict=spec_dict)\n p.close()\n p.join()\n return analyzer\n else:\n def vr(filepaths):\n offset = 0\n for p in filepaths:\n v = Vasprun(p, ionic_step_offset=offset,\n ionic_step_skip=step_skip)\n yield v\n # Recompute offset.\n offset = (-(v.nionic_steps - offset)) % step_skip\n\n return cls.from_vaspruns(vr(filepaths), specie=specie,\n time_intervals_number=time_intervals_number,\n spec_dict=spec_dict)\n\n def as_dict(self):\n return {\n \"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"structure\": self.structure.as_dict(),\n \"displacements\": self.disp.tolist(),\n \"specie\": self.specie,\n \"temperature\": self.temperature,\n \"time_step\": self.time_step,\n \"step_skip\": self.step_skip,\n \"time_intervals_number\": self.time_intervals_number,\n \"spec_dict\": self.spec_dict}\n\n @classmethod\n def from_dict(cls, d):\n structure = Structure.from_dict(d[\"structure\"])\n return cls(structure, np.array(d[\"displacements\"]), specie=d[\"specie\"],\n temperature=d[\"temperature\"], time_step=d[\"time_step\"],\n step_skip=d[\"step_skip\"], time_intervals_number=d[\"time_intervals_number\"],\n spec_dict=d['spec_dict'])\n\n\nclass ErrorAnalysisFromDiffusivityAnalyzer(object):\n def __init__(self, diffusivity_analyzer, site_distance=3.0):\n \"\"\"\n Estimate the relative standard deviation (RSD) of D from the equation:\n RSD = 3.43/sqrt(N_jump) + 0.04\n\n :param diffusivity_analyzer (DiffusivityAnalyzer object):\n :param site_distance (float): the site distance between diffusion ions (averagely)\n \"\"\"\n n_jump = len(diffusivity_analyzer.indices) * \\\n np.max(diffusivity_analyzer.msd) / (site_distance * site_distance)\n n_jump_component = len(diffusivity_analyzer.indices) * \\\n np.max(diffusivity_analyzer.msd_component, axis=1) / (site_distance * site_distance)\n RSD_D = 3.43 / np.sqrt(n_jump) + 0.04\n RSD_D_component = [None, None, None]\n for i in range(3):\n RSD_D_component[i] = 3.43 / np.sqrt(n_jump_component[i]) + 0.04\n self.diffusivity_analyzer = diffusivity_analyzer\n self.n_jump = n_jump\n self.n_jump_component = n_jump_component\n self.RSD_D = RSD_D\n self.RSD_D_component = np.array(RSD_D_component)\n\n def get_summary_dict(self, oxidized_specie=None):\n \"\"\"\n A summary of information\n :param oxidized_specie (str): specie string with oxidation state. If provided or specie in initial\n function is oxidized, it will calculate conductivity based on nernst-einstein relationship.\n :return: dict of diffusion information\n \"\"\"\n d = self.diffusivity_analyzer.get_summary_dict(oxidized_specie=oxidized_specie)\n d['n_jump'] = self.n_jump\n d['n_jump_component'] = self.n_jump_component\n d['diffusivity_relative_standard_deviation'] = self.RSD_D\n d['diffusivity_standard_deviation'] = self.RSD_D * d['diffusivity']\n d['diffusivity_component_relative_standard_deviation'] = self.RSD_D_component\n d['diffusivity_component_relative_standard_deviation'] = self.RSD_D_component * d['diffusivity_components']\n return d\n\n\ndef _get_vasprun(args):\n \"\"\"\n Internal method to support multiprocessing.\n \"\"\"\n return Vasprun(args[0], ionic_step_skip=args[1],\n parse_dos=False, parse_eigen=False)\n\n\nclass ArreheniusAnalyzer(object):\n def __init__(self, temperatures, diffusivities, diffusivity_errors=None):\n \"\"\"\n Fitting arrehenius relationship from temperatures, diffusivities and diffusivity_error\n :param temperatures (List): list of temperatures\n :param diffusivities (List): list of diffusivities at different temperatures\n :param diffusivity_errors (List): optional, list of diffusivity error at different temperatures\n \"\"\"\n slope_to_eV = -8.617e-5 * 1000 * np.log(10)\n\n def linear(x, k, b):\n return k * x + b\n\n x = np.array([1000.0 / i for i in temperatures])\n y = np.array([np.log10(i) for i in diffusivities])\n if diffusivity_errors is None:\n [slope, intercept], cov = curve_fit(linear, x, y)\n slope_sigma = np.sqrt(np.diag(cov))[0]\n intercept_sigma = np.sqrt(np.diag(cov))[1]\n y_error = None\n else:\n y_error = [np.log10(np.e) * diffusivity_errors[i] / diffusivities[i] for i in range(len(diffusivities))]\n [slope, intercept], cov = curve_fit(linear, x, y, sigma=y_error, absolute_sigma=True)\n slope_sigma = np.sqrt(np.diag(cov))[0]\n intercept_sigma = np.sqrt(np.diag(cov))[1]\n\n self.temperatures = temperatures\n self.diffusivities = diffusivities\n self.x = x # 1000/T\n self.y = y # log10(D)\n self.diffusivity_errors = diffusivity_errors\n self.y_error = y_error\n self.Ea = slope_to_eV * slope\n self.Ea_error = -1 * slope_to_eV * slope_sigma\n self.intercept = intercept\n self.intercept_sigma = intercept_sigma\n self.slope = slope\n self.slope_sigma = slope_sigma\n\n def predict_diffusivity(self, temperature):\n \"\"\"\n\n :param temperature (float): target temperature\n :return: corresponding diffusivity, and diffusivity range based on error of linear fitting\n \"\"\"\n logD = self.slope * (1000.0 / temperature) + self.intercept\n logD_sigma = np.sqrt(np.power(self.slope_sigma * (1000.0 / temperature), 2) + \\\n np.power(self.intercept_sigma, 2))\n D_min = np.power(10, logD - logD_sigma)\n D_max = np.power(10, logD + logD_sigma)\n return np.power(10, logD), [D_min, D_max]\n\n def predict_conductivity(self, temperature, structure, specie):\n \"\"\"\n\n :param temperature (float): target temperature\n :param structure (Structure): one structure, used to calculate convertion factor from diffusivity\n to conductivity\n :param specie (str): string of diffusion specie, should contain oxidation state, such as Li+, O2-\n :return: corresponding conductivity, and conductivity range based on error of linear fitting\n \"\"\"\n D, [D_min, D_max] = self.predict_diffusivity(temperature)\n factor = get_conversion_factor(structure, specie, temperature)\n return factor * D, [factor * D_min, factor * D_max]\n\n def get_arrhenius_plot(self):\n from pymatgen.util.plotting import pretty_plot\n plt = pretty_plot(12, 8)\n arr = np.power(10, self.slope * self.x + self.intercept)\n plt.plot(self.x, self.diffusivities, 'ko', self.x, arr, 'k--', markersize=10)\n plt.errorbar(self.x, self.diffusivities, yerr=self.diffusivity_errors,\n fmt='ko', ecolor='k', capthick=2, linewidth=2)\n ax = plt.axes()\n ax.set_yscale('log')\n plt.text(0.6, 0.85, \"E$_a$ = {:.3f} eV\".format(self.Ea),\n fontsize=30, transform=plt.axes().transAxes)\n plt.ylabel(\"D (cm$^2$/s)\")\n plt.xlabel(\"1000/T (K$^{-1}$)\")\n plt.tight_layout()\n return plt\n\n @classmethod\n def from_csv(cls, csv_file):\n with open(csv_file, 'r') as csvf:\n a = csv.reader(csvf, delimiter=str(\",\"))\n data_keys = []\n data_list = []\n for row_index, row in enumerate(a):\n if row_index == 0:\n data_keys = row\n data_list = [[] for _ in range(len(row))]\n else:\n for col_index, col in enumerate(row):\n data_list[col_index].append(float(col))\n data_dict = {data_keys[i]: data_list[i] for i in range(len(data_keys))}\n if set(data_keys) >= set(['T', 'D', 'D_error']):\n return cls(data_dict['T'], data_dict['D'], data_dict['D_error'])\n elif set(data_keys) >= set(['T', 'D']):\n return cls(data_dict['T'], data_dict['D'])\n else:\n raise Exception(\"Please make sure the header name in csv file is T, D, D_error(optional)\")\n\n\ndef get_conversion_factor(structure, specie, temperature):\n \"\"\"\n Conversion factor to convert between cm^2/s diffusivity measurements and\n mS/cm conductivity measurements based on number of atoms of diffusing\n species.\n :param structure (Structure): Input structure.\n :param specie (string/specie): Diffusing species string, must contain oxidation state.\n :param temperature (float): Temperature of the diffusion run in Kelvin.\n :return: Conversion factor.\n Conductivity (in mS/cm) = Conversion Factor * Diffusivity (in cm^2/s)\n \"\"\"\n if type(specie) is Specie:\n df_sp = specie\n else:\n try:\n df_sp = Specie.from_string(specie)\n except:\n raise Exception(\"Please provide oxidation decorated specie, like Li+, O2-\")\n z = df_sp.oxi_state\n el, occu = structure.composition.items()[0]\n if isinstance(el, Specie): # oxidation decorated structure\n n = structure.composition[specie]\n else:\n n = structure.composition[str(df_sp.element)]\n if n == 0:\n raise Exception(\"No specie {} in the structure composition: {}\".format(specie, structure.composition))\n vol = structure.volume * 1e-24 # units cm^3\n N_A = 6.022140857e+23\n e = 1.6021766208e-19\n R = 8.3144598\n return 1000 * n / (vol * N_A) * z ** 2 * (N_A * e) ** 2 \\\n / (R * temperature)\n" ]
[ [ "numpy.diag", "numpy.sqrt", "numpy.cumsum", "numpy.concatenate", "numpy.max", "numpy.round", "numpy.any", "scipy.optimize.curve_fit", "numpy.square", "numpy.arange", "numpy.zeros", "numpy.log", "numpy.power", "numpy.fft.ifft", "scipy.stats.linregress", "numpy.log10", "numpy.array", "numpy.sum", "numpy.abs", "numpy.fft.fft", "numpy.ones", "numpy.average", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
parlamentikon/parlamentikon
[ "bae9b23b656e89bb922d4b321554964413541327" ]
[ "parlamentikon/TabulkySchuze.py" ]
[ "\nimport pandas as pd\n\nfrom parlamentikon.utility import pretypuj, mask_by_values, format_to_datetime_and_report_skips\nfrom parlamentikon.Helpers import MItem\n\n#from parlamentikon.Snemovna import *\n#from parlamentikon.PoslanciOsoby import *\n\nfrom parlamentikon.setup_logger import log\n\n# Informace k tabulkám, viz. https://www.psp.cz/sqw/hp.sqw?k=1308\n\nclass TabulkaSchuzeMixin(object):\n def nacti_schuze(self):\n # Obsahuje záznamy o schůzích.\n # Pro každou schůzi jsou v tabulce nejvýše dva záznamy, jeden vztahující se k návrhu pořadu, druhý ke schválenému pořadu.\n # I v případě neschválení pořadu schůze jsou dva záznamy, viz schuze:pozvanka a schuze_stav:stav.\n path = f\"{self.parameters['data_dir']}/schuze.unl\"\n self.paths['schuze'] = path\n header = {\n 'id_schuze': MItem('Int64', 'Identifikátor schůze, není to primární klíč, je nutno používat i položku schuze:pozvanka. Záznamy schůzí stejného orgánu a stejného čísla (tj. schuze:id_org a schuze:schuze), mají stejné schuze:id_schuze a liší se pouze v schuze:pozvanka.'),\n 'id_org': MItem('Int64', 'Identifikátor orgánu, viz Organy:id_org.'),\n 'schuze': MItem('Int64', 'Číslo schůze.'),\n 'od_schuze': MItem('string', 'Předpokládaný začátek schůze; viz též tabulka schuze_stav'),\n 'do_schuze': MItem('string', 'Konec schůze. V případě schuze:pozvanka == 1 se nevyplňuje.'),\n 'aktualizace': MItem('string', 'Datum a čas poslední aktualizace.'),\n 'pozvanka__ORIG': MItem('Int64', 'Druh záznamu: null - schválený pořad, 1 - navržený pořad.')\n }\n\n _df = pd.read_csv(path, sep=\"|\", names = header, index_col=False, encoding='cp1250')\n df = pretypuj(_df, header, 'schuze')\n self.rozsir_meta(header, tabulka='schuze', vlastni=False)\n\n # Oprava známých chybných hodnot (očividných překlepů)\n #df.at[768, 'od_schuze'] = \"2020-05-31 09:00\"\n\n # Přidej sloupec 'od_schuze' typu datetime\n df['od_schuze'] = format_to_datetime_and_report_skips(df, 'od_schuze', to_format='%Y-%m-%d %H:%M')\n df['od_schuze'] = df['od_schuze'].dt.tz_localize(self.tzn)\n\n\n # Přidej sloupec 'do_schuze' typu datetime\n df['do_schuze'] = format_to_datetime_and_report_skips(df, 'do_schuze', to_format='%Y-%m-%d %H:%M')\n df['do_schuze'] = df['do_schuze'].dt.tz_localize(self.tzn)\n\n mask = {None: 'schválený pořad', 1: 'navržený pořad'}\n df['pozvanka'] = mask_by_values(df.pozvanka__ORIG, mask).astype('string')\n self.meta.nastav_hodnotu('pozvanka', dict(popis='Druh záznamu: [schválený pořad; navržený pořad].', tabulka='schuze', vlastni=True))\n\n self.tbl['schuze'], self.tbl['_schuze'] = df, _df\n\nclass TabulkaSchuzeStavMixin(object):\n def nacti_schuze_stav(self):\n path = f\"{self.parameters['data_dir']}/schuze_stav.unl\"\n self.paths['schuze_stav'] = path\n header = {\n 'id_schuze': MItem('Int64', 'Identifikátor schůze, viz Schuze:id_schuze.'),\n 'stav__ORIG': MItem('Int64', 'Stav schůze: 1 - OK, 2 - pořad schůze nebyl schválen a schůze byla ukončena.'),\n 'typ__ORIG': MItem('Int64', 'Typ schůze: 1 - řádná, 2 - mimořádná (navržená skupinou poslanců). Dle jednacího řádu nelze měnit navržený pořad mimořádné schůze.'),\n 'text_dt': MItem('string', 'Zvláštní určení začátku schůze: pokud je vyplněno, použije se namísto Schuze:od_schuze.'),\n 'text_st': MItem('string', 'Text stavu schůze, obvykle informace o přerušení.'),\n 'tm_line': MItem('string', 'Podobné jako SchuzeStav:text_st, pouze psáno na začátku s velkým písmenem a ukončeno tečkou.')\n }\n\n _df = pd.read_csv(path, sep=\"|\", names = header, index_col=False, encoding='cp1250')\n df = pretypuj(_df, header, 'schuze_stav')\n self.rozsir_meta(header, tabulka='schuze_stav', vlastni=False)\n\n mask = {1:\"OK\", 2:\"pořad neschválen, schůze ukončena\"}\n df['stav'] = mask_by_values(df.stav__ORIG, mask).astype('string')\n self.meta.nastav_hodnotu('stav', dict(popis='Stav schůze: [OK; pořad schůze nebyl schválen a schůze byla ukončena].', tabulka='schuze_stav', vlastni=True))\n\n mask = {1: \"řádná\", 2: \"mimořádná\"}\n df['typ'] = mask_by_values(df.typ__ORIG, mask).astype('string')\n self.meta.nastav_hodnotu('typ', dict(popis='Typ schůze. Typ schůze: [řádná; mimořádná (navržená skupinou poslanců)]. Dle jednacího řádu nelze měnit navržený pořad mimořádné schůze.', tabulka='schuze_stav', vlastni=True))\n\n self.tbl['schuze_stav'], self.tbl['_schuze_stav'] = df, _df\n\n\nclass TabulkaBodStavMixin(object):\n def nacti_bod_stav(self):\n path = f\"{self.parameters['data_dir']}/bod_stav.unl\"\n self.paths['bod_stav'] = path\n header = {\n 'id_bod_stav': MItem('Int64', 'Typ stavu bodu schůze: typ 3 - neprojednatelný znamená vyřazen z pořadu či neprojednatelný z důvodu legislativního procesu.'),\n 'popis': MItem('string', 'Popis stavu bodu.')\n }\n\n _df = pd.read_csv(path, sep=\"|\", names = header, index_col=False, encoding='cp1250')\n df = pretypuj(_df, header, 'bod_stav')\n self.rozsir_meta(header, tabulka='bod_stav', vlastni=False)\n\n df['id_bod_stav__KAT'] = df.id_bod_stav.astype(str).mask(df.id_bod_stav == 3, 'neprojednatelný')\n self.meta.nastav_hodnotu('id_bod_stav__KAT', dict(popis='Typ stavu bodu schůze.', tabulka='bod_stav', vlastni=True))\n\n self.tbl['bod_stav'], self.tbl['_bod_stav'] = df, _df\n\n\n# Obsahuje záznamy o bodech pořadu schůze. Body typu odpověď na písemnou interpelaci (bod_schuze:id_typ == 6) se obvykle nezobrazují, viz dále.\n#Při zobrazení bodu se použijí položky bod_schuze:uplny_naz. Pokud je bod_schuze:id_tisk nebo bod_schuze:id_sd vyplněno, pak se dále použije bod_schuze:uplny_kon, případně text závislý na bod_schuze.id_typ. Poté následuje bod_schuze:poznamka.\nclass TabulkaBodSchuzeMixin(object):\n def nacti_bod_schuze(self):\n path = f\"{self.parameters['data_dir']}/bod_schuze.unl\"\n self.paths['bod_schuze'] = path\n header = {\n 'id_bod': MItem('Int64', 'Identifikátor bodu pořadu schůze, není to primární klíč, je nutno používat i položku bod_schuze:pozvanka. Záznamy se stejným id_bod odkazují na stejný bod, i když číslo bodu může být rozdílné (během schvalování pořadu schůze se pořadí bodů může změnit).'),\n 'id_schuze': MItem('Int64', 'Identifikátor schůze, viz Schuze:id_schuze a též schuze:pozvanka.'),\n 'id_tisk': MItem('Int64', 'Identifikátor tisku, pokud se bod k němu vztahuje. V tomto případě lze využít bod_schuze:uplny_kon.'),\n 'id_typ': MItem('Int64', 'Typ bodu, resp. typ projednávání. Kromě bod_schuze:id_typ == 6, se jedná o typ stavu, viz stavy:id_typ a tabulka níže. Je-li bod_schuze:id_typ == 6, jedná se o jednotlivou odpověď na písemnou interpelaci a tento záznam se obykle nezobrazuje (navíc má stejné id_bodu jako bod odpovědi na písemné interpelace a může mít různé číslo bodu).'),\n 'bod': MItem('Int64', 'Číslo bodu. Pokud je menší než jedna, pak se při výpisu číslo bodu nezobrazuje.'),\n 'uplny_naz': MItem('string', 'Úplný název bodu.'),\n 'uplny_kon': MItem('string', 'Koncovka názvu bodu s identifikací čísla tisku nebo čísla sněmovního dokumentu, pokud jsou používány, viz BodSchuze:id_tisk a BodSchuze:id_sd.'),\n 'poznamka': MItem('string', 'Poznámka k bodu - obvykle obsahuje informaci o pevném zařazení bodu.'),\n 'id_bod_stav': MItem('Int64', 'Stav bodu pořadu, viz BodStav:id_bod_stav. U bodů návrhu pořadu se nepoužije.'),\n 'pozvanka': MItem('Int64', 'Rozlišení záznamu, viz Schuze:pozvanka'),\n 'rj': MItem('Int64', 'Režim dle par. 90, odst. 2 jednacího řádu.'),\n 'pozn2': MItem('string', 'Poznámka k bodu, zkrácený zápis'),\n 'druh_bodu': MItem('Int64', 'Druh bodu: 0 nebo null: normální, 1: odpovědi na ústní interpelace, 2: odpovědi na písemné interpelace, 3: volební bod'),\n 'id_sd': MItem('Int64', 'Identifikátor sněmovního dokumentu, viz sd_dokument:id_dokument. Pokud není null, při výpisu se zobrazuje BodSchuze:uplny_kon.'),\n 'zkratka': MItem('string', 'Zkrácený název bodu, neoficiální.')\n }\n\n _df = pd.read_csv(path, sep=\"|\", names = header, index_col=False, encoding='cp1250')\n df = pretypuj(_df, header, 'bod_schuze')\n self.rozsir_meta(header, tabulka='bod_schuze', vlastni=False)\n\n self.tbl['bod_schuze'], self.tbl['_bod_schuze'] = df, _df\n\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
pedrohma/python-ai-study
[ "362a33aaaf3e334c3e669458dec34a4fcf15029c" ]
[ "neural network/neural.py" ]
[ "from numpy import exp, array, random, dot\n\nclass NeuralNetwork():\n def __init__(self):\n random.seed(1)\n\n self.synaptic_weights = 2 * random.random((3,1)) - 1\n\n def __sigmoid(self, y):\n return 1/(1 + exp(-y))\n\n def __sigmoid_derivative(self, y):\n return y * (1 - y)\n\n def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):\n for iteration in range(number_of_training_iterations):\n output = self.think(training_set_inputs)\n\n error = training_set_outputs - output\n\n adjustment = dot(training_set_inputs.T, error * self.__sigmoid_derivative(output))\n\n self.synaptic_weights += adjustment\n\n def think(self, inputs):\n return self.__sigmoid(dot(inputs, self.synaptic_weights))\n\n\nif __name__ == \"__main__\":\n\n neural_network = NeuralNetwork()\n\n print(\"Random starting synaptic weights: \")\n print(neural_network.synaptic_weights)\n\n training_set_inputs = array([[0, 0, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]])\n training_set_outputs = array([[0, 1, 1, 0]]).T\n\n neural_network.train(training_set_inputs, training_set_outputs, 10000)\n\n print(\"New synaptic weights after training: \")\n print (neural_network.synaptic_weights)\n\n print (\"Considering new situation [1, 0, 0] -> ?: \")\n print (neural_network.think(array([1, 0, 0])))\n" ]
[ [ "numpy.dot", "numpy.random.random", "numpy.random.seed", "numpy.array", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Pragalbha-Patil/S-TASLC-Python
[ "d3d89f98cd7c2a5e8fbedccd0e40e6e15e0a8b43" ]
[ "main.py" ]
[ "from threading import Thread\n# import pygame\n# import speake3\nimport speech_recognition as sr\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nfrom easygui import *\nimport os\nfrom PIL import Image, ImageTk\nfrom itertools import count\nimport tkinter as tk\nimport string\nimport platform\n\ndef play_music():\n os.system(\"gnome-terminal -e 'play bg.mp3'\")\n\n\n#import selecting for recorded voice\n\n# obtain audio from the microphone\ndef func():\n r = sr.Recognizer()\n taslc_gif=['all the best', 'any questions', 'are you angry', 'are you busy', 'are you hungry', 'are you sick', 'be careful',\n 'can we meet tomorrow', 'did you book tickets', 'did you finish homework', 'do you go to office', 'do you have money',\n 'do you want something to drink', 'do you want tea or coffee', 'do you watch TV', 'dont worry', 'flower is beautiful',\n 'good afternoon', 'good evening', 'good morning', 'good night', 'good question', 'had your lunch', 'happy journey',\n 'hello what is your name', 'how many people are there in your family', 'i am a clerk', 'i am bore doing nothing', \n 'i am fine', 'i am sorry', 'i am thinking', 'i am tired', 'i dont understand anything', 'i go to a theatre', 'i love to shop',\n 'i had to say something but i forgot', 'i have headache', 'i like pink colour', 'i live in nagpur', 'lets go for lunch', 'my mother is a homemaker',\n 'my name is john', 'nice to meet you', 'no smoking please', 'open the door', 'please call an ambulance', 'please call me later',\n 'please clean the room', 'please give me your pen', 'please use dustbin dont throw garbage', 'please wait for sometime', 'shall I help you',\n 'shall we go together tommorow', 'sign language interpreter', 'sit down', 'stand up', 'take care', 'there was traffic jam', 'wait I am thinking',\n 'what are you doing', 'what is the problem', 'what is todays date', 'what is your age', 'what is your father do', 'what is your job',\n 'what is your mobile number', 'what is your name', 'whats up', 'when is your interview', 'when we will go', 'where do you stay',\n 'where is the bathroom', 'where is the police station', 'you are wrong','address','agra','how are you','ahemdabad', 'all', 'april', 'assam', 'august', 'australia', 'badoda', 'banana', 'banaras', 'banglore',\n'bihar','bihar','bridge','cat', 'chandigarh', 'chennai', 'christmas', 'church', 'clinic', 'coconut', 'crocodile','dasara',\n'deaf', 'december', 'deer', 'delhi', 'dollar', 'duck', 'febuary', 'friday', 'fruits', 'glass', 'grapes', 'gujrat', 'hello',\n'hindu', 'hyderabad', 'india', 'january', 'jesus', 'job', 'july', 'july', 'karnataka', 'kerala', 'krishna', 'litre', 'mango',\n'may', 'mile', 'monday', 'mumbai', 'museum', 'muslim', 'nagpur', 'october', 'orange', 'pakistan', 'pass', 'police station',\n'post office', 'pune', 'punjab', 'rajasthan', 'ram', 'restaurant', 'saturday', 'september', 'shop', 'sleep', 'southafrica',\n'story', 'sunday', 'tamil nadu', 'temperature', 'temple', 'thursday', 'toilet', 'tomato', 'town', 'tuesday', 'usa', 'village',\n'voice', 'wednesday', 'weight','water','are you all right']\n \n \n arr=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r',\n 's','t','u','v','w','x','y','z',' ']\n with sr.Microphone() as source:\n\n r.adjust_for_ambient_noise(source) \n i=0\n # thread = Thread(target = play_music)\n # thread.start()\n # thread.join()\n while True:\n print('Listening...')\n audio = r.listen(source)\n\n # recognize speech using Sphinx\n try:\n a=r.recognize_google(audio)\n # mixer.init()\n # mixer.music.load(\"bg.mp3\")\n # mixer.music.play()\n print(\"You said: \" + a.lower())\n \n for c in string.punctuation:\n a = a.replace(c,\"\")\n \n if(a.lower()=='goodbye'):\n print(\"Good bye!\")\n break\n \n elif(a.lower() in taslc_gif):\n \n class ImageLabel(tk.Label):\n \"\"\"a label that displays images, and plays them if they are gifs\"\"\"\n def load(self, im):\n if isinstance(im, str):\n im = Image.open(im)\n self.loc = 0\n self.frames = []\n\n try:\n for i in count(1):\n self.frames.append(ImageTk.PhotoImage(im.copy()))\n im.seek(i)\n except EOFError:\n pass\n\n try:\n self.delay = im.info['duration']\n except:\n self.delay = 100\n\n if len(self.frames) == 1:\n self.config(image=self.frames[0])\n else:\n self.next_frame()\n\n def unload(self):\n self.config(image=None)\n self.frames = None\n\n def next_frame(self):\n if self.frames:\n self.loc += 1\n self.loc %= len(self.frames)\n self.config(image=self.frames[self.loc])\n self.after(self.delay, self.next_frame)\n\n root = tk.Tk()\n lbl = ImageLabel(root)\n lbl.pack()\n lbl.load(r'/home/psp/Desktop/projects/S-TASLC-Python/TASLC_GIFS/{0}.gif'.format(a.lower()))\n root.mainloop()\n else:\n # os.system(\"espeak \"+a)\n # print(a)\n # print(len(a))\n # break\n for i in range(len(a)):\n #a[i]=a[i].lower()\n if(a[i] in arr):\n # print(a[i])\n if(a[i] == ' '): \n ImageAddress = 'letters_asl/ .jpg'\n ImageItself = Image.open(ImageAddress)\n ImageNumpyFormat = np.asarray(ImageItself)\n plt.imshow(ImageNumpyFormat)\n plt.draw()\n plt.pause(0.8) # pause how many seconds\n #plt.close()\n else:\n ImageAddress = 'letters_asl/'+a[i]+'.jpg'\n ImageItself = Image.open(ImageAddress)\n ImageNumpyFormat = np.asarray(ImageItself)\n plt.imshow(ImageNumpyFormat)\n plt.draw()\n plt.pause(0.8) # pause how many seconds\n #plt.close()\n else:\n continue\n\n except:\n print(\"Could not listen probably audio is too low to listen\")\n plt.close()\n#func()\nwhile 1:\n image = \"logo.jpg\"\n msg=\"S - TASLC - Speech to American Sign Language Converter\"\n choices = [\"Laptop Live Conversation mode\",\"Close\",\"Convert Recorded Voice\", \"RAS-PI Live Conversation mode\"]\n reply = buttonbox(msg,image=image,choices=choices)\n if reply ==choices[0]:\n func()\n if reply == choices[1]:\n quit()\n if reply==choices[2]:\n if(platform.system() == 'Windows'):\n os.system(\"python recorded.py\")\n else:\n os.system(\"python3 recorded.py\")\n break\n if reply==choices[3]:\n if(platform.system() == 'Windows'): \n os.system(\"python rasp_main.py\")\n else:\n os.system(\"python3 rasp_main.py\")\n quit()" ]
[ [ "matplotlib.pyplot.imshow", "numpy.asarray", "matplotlib.pyplot.draw", "matplotlib.pyplot.close", "matplotlib.pyplot.pause" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alexisgroshenry/NPM3D_DSNet
[ "d1a2ec071728dcb3c733ecdee3a27f4534b67f33" ]
[ "utils/eval_np.py" ]
[ "#!/usr/bin/env python3\n\n# This file is covered by the LICENSE file in the root of this project.\n\nimport numpy as np\nimport time\n\n\nclass PanopticEval:\n \"\"\" Panoptic evaluation using numpy\n \n authors: Andres Milioto and Jens Behley\n\n \"\"\"\n\n def __init__(self, n_classes, device=None, ignore=None, offset=2**32, min_points=30):\n self.n_classes = n_classes\n assert (device == None)\n self.ignore = np.array(ignore, dtype=np.int64)\n self.include = np.array([n for n in range(self.n_classes) if n not in self.ignore], dtype=np.int64)\n\n # print(\"[PANOPTIC EVAL] IGNORE: \", self.ignore)\n # print(\"[PANOPTIC EVAL] INCLUDE: \", self.include)\n\n self.reset()\n self.offset = offset # largest number of instances in a given scan\n self.min_points = min_points # smallest number of points to consider instances in gt\n self.eps = 1e-15\n\n def num_classes(self):\n return self.n_classes\n\n def merge(self, evaluator):\n self.px_iou_conf_matrix += evaluator.px_iou_conf_matrix\n self.pan_tp += evaluator.pan_tp\n self.pan_iou += evaluator.pan_iou\n self.pan_fp += evaluator.pan_fp\n self.pan_fn += evaluator.pan_fn\n\n self.evaluated_fnames += evaluator.evaluated_fnames\n\n def reset(self):\n # general things\n # iou stuff\n self.px_iou_conf_matrix = np.zeros((self.n_classes, self.n_classes), dtype=np.int64)\n # panoptic stuff\n self.pan_tp = np.zeros(self.n_classes, dtype=np.int64)\n self.pan_iou = np.zeros(self.n_classes, dtype=np.double)\n self.pan_fp = np.zeros(self.n_classes, dtype=np.int64)\n self.pan_fn = np.zeros(self.n_classes, dtype=np.int64)\n\n self.evaluated_fnames = []\n\n ################################# IoU STUFF ##################################\n def addBatchSemIoU(self, x_sem, y_sem):\n # idxs are labels and predictions\n idxs = np.stack([x_sem, y_sem], axis=0)\n\n # make confusion matrix (cols = gt, rows = pred)\n np.add.at(self.px_iou_conf_matrix, tuple(idxs), 1)\n\n def getSemIoUStats(self):\n # clone to avoid modifying the real deal\n conf = self.px_iou_conf_matrix.copy().astype(np.double)\n # remove fp from confusion on the ignore classes predictions\n # points that were predicted of another class, but were ignore\n # (corresponds to zeroing the cols of those classes, since the predictions\n # go on the rows)\n conf[:, self.ignore] = 0\n\n # get the clean stats\n tp = conf.diagonal()\n fp = conf.sum(axis=1) - tp\n fn = conf.sum(axis=0) - tp\n return tp, fp, fn\n\n def getSemIoU(self):\n tp, fp, fn = self.getSemIoUStats()\n # print(f\"tp={tp}\")\n # print(f\"fp={fp}\")\n # print(f\"fn={fn}\")\n intersection = tp\n union = tp + fp + fn\n union = np.maximum(union, self.eps)\n iou = intersection.astype(np.double) / union.astype(np.double)\n iou_mean = (intersection[self.include].astype(np.double) / union[self.include].astype(np.double)).mean()\n\n return iou_mean, iou # returns \"iou mean\", \"iou per class\" ALL CLASSES\n\n def getSemAcc(self):\n tp, fp, fn = self.getSemIoUStats()\n total_tp = tp.sum()\n total = tp[self.include].sum() + fp[self.include].sum()\n total = np.maximum(total, self.eps)\n acc_mean = total_tp.astype(np.double) / total.astype(np.double)\n\n return acc_mean # returns \"acc mean\"\n\n ################################# IoU STUFF ##################################\n ##############################################################################\n\n ############################# Panoptic STUFF ################################\n def addBatchPanoptic(self, x_sem_row, x_inst_row, y_sem_row, y_inst_row):\n # make sure instances are not zeros (it messes with my approach)\n x_inst_row = x_inst_row + 1\n y_inst_row = y_inst_row + 1\n\n # only interested in points that are outside the void area (not in excluded classes)\n for cl in self.ignore:\n # make a mask for this class\n gt_not_in_excl_mask = y_sem_row != cl\n # remove all other points\n x_sem_row = x_sem_row[gt_not_in_excl_mask]\n y_sem_row = y_sem_row[gt_not_in_excl_mask]\n x_inst_row = x_inst_row[gt_not_in_excl_mask]\n y_inst_row = y_inst_row[gt_not_in_excl_mask]\n\n # first step is to count intersections > 0.5 IoU for each class (except the ignored ones)\n for cl in self.include:\n # print(\"*\"*80)\n # print(\"CLASS\", cl.item())\n # get a class mask\n x_inst_in_cl_mask = x_sem_row == cl\n y_inst_in_cl_mask = y_sem_row == cl\n\n # get instance points in class (makes outside stuff 0)\n x_inst_in_cl = x_inst_row * x_inst_in_cl_mask.astype(np.int64)\n y_inst_in_cl = y_inst_row * y_inst_in_cl_mask.astype(np.int64)\n\n # generate the areas for each unique instance prediction\n unique_pred, counts_pred = np.unique(x_inst_in_cl[x_inst_in_cl > 0], return_counts=True)\n id2idx_pred = {id: idx for idx, id in enumerate(unique_pred)}\n matched_pred = np.array([False] * unique_pred.shape[0])\n # print(\"Unique predictions:\", unique_pred)\n\n # generate the areas for each unique instance gt_np\n unique_gt, counts_gt = np.unique(y_inst_in_cl[y_inst_in_cl > 0], return_counts=True)\n id2idx_gt = {id: idx for idx, id in enumerate(unique_gt)}\n matched_gt = np.array([False] * unique_gt.shape[0])\n # print(\"Unique ground truth:\", unique_gt)\n\n # generate intersection using offset\n valid_combos = np.logical_and(x_inst_in_cl > 0, y_inst_in_cl > 0)\n offset_combo = x_inst_in_cl[valid_combos] + self.offset * y_inst_in_cl[valid_combos]\n unique_combo, counts_combo = np.unique(offset_combo, return_counts=True)\n\n # generate an intersection map\n # count the intersections with over 0.5 IoU as TP\n gt_labels = unique_combo // self.offset\n pred_labels = unique_combo % self.offset\n gt_areas = np.array([counts_gt[id2idx_gt[id]] for id in gt_labels])\n pred_areas = np.array([counts_pred[id2idx_pred[id]] for id in pred_labels])\n intersections = counts_combo\n unions = gt_areas + pred_areas - intersections\n ious = intersections.astype(np.float) / unions.astype(np.float)\n\n\n tp_indexes = ious > 0.5\n self.pan_tp[cl] += np.sum(tp_indexes)\n self.pan_iou[cl] += np.sum(ious[tp_indexes])\n\n matched_gt[[id2idx_gt[id] for id in gt_labels[tp_indexes]]] = True\n matched_pred[[id2idx_pred[id] for id in pred_labels[tp_indexes]]] = True\n\n # count the FN\n self.pan_fn[cl] += np.sum(np.logical_and(counts_gt >= self.min_points, matched_gt == False))\n\n # count the FP\n self.pan_fp[cl] += np.sum(np.logical_and(counts_pred >= self.min_points, matched_pred == False))\n\n def getPQ(self):\n # first calculate for all classes\n sq_all = self.pan_iou.astype(np.double) / np.maximum(self.pan_tp.astype(np.double), self.eps)\n rq_all = self.pan_tp.astype(np.double) / np.maximum(\n self.pan_tp.astype(np.double) + 0.5 * self.pan_fp.astype(np.double) + 0.5 * self.pan_fn.astype(np.double),\n self.eps)\n pq_all = sq_all * rq_all\n\n # then do the REAL mean (no ignored classes)\n SQ = sq_all[self.include].mean()\n RQ = rq_all[self.include].mean()\n PQ = pq_all[self.include].mean()\n\n return PQ, SQ, RQ, pq_all, sq_all, rq_all\n\n ############################# Panoptic STUFF ################################\n ##############################################################################\n\n def addBatch(self, x_sem, x_inst, y_sem, y_inst): # x=preds, y=targets\n ''' IMPORTANT: Inputs must be batched. Either [N,H,W], or [N, P]\n '''\n # add to IoU calculation (for checking purposes)\n self.addBatchSemIoU(x_sem, y_sem)\n\n # now do the panoptic stuff\n self.addBatchPanoptic(x_sem, x_inst, y_sem, y_inst)\n\n def addBatch_w_fname(self, x_sem, x_inst, y_sem, y_inst, fname): # x=preds, y=targets\n ''' IMPORTANT: Inputs must be batched. Either [N,H,W], or [N, P]\n '''\n # add to IoU calculation (for checking purposes)\n self.addBatchSemIoU(x_sem, y_sem)\n\n # now do the panoptic stuff\n self.addBatchPanoptic(x_sem, x_inst, y_sem, y_inst)\n\n self.evaluated_fnames.append(fname)\n\n\ndef gen_psuedo_labels(n=50):\n # generate ground truth and prediction\n sem_pred = []\n inst_pred = []\n sem_gt = []\n inst_gt = []\n\n # some ignore stuff\n N_ignore = n\n sem_pred.extend([0 for i in range(N_ignore)])\n inst_pred.extend([0 for i in range(N_ignore)])\n sem_gt.extend([0 for i in range(N_ignore)])\n inst_gt.extend([0 for i in range(N_ignore)])\n\n # grass segment\n N_grass = n+1\n N_grass_pred = np.random.randint(0, N_grass) # rest is sky\n sem_pred.extend([1 for i in range(N_grass_pred)]) # grass\n sem_pred.extend([2 for i in range(N_grass - N_grass_pred)]) # sky\n inst_pred.extend([0 for i in range(N_grass)])\n sem_gt.extend([1 for i in range(N_grass)]) # grass\n inst_gt.extend([0 for i in range(N_grass)])\n\n # sky segment\n N_sky = n+2\n N_sky_pred = np.random.randint(0, N_sky) # rest is grass\n sem_pred.extend([2 for i in range(N_sky_pred)]) # sky\n sem_pred.extend([1 for i in range(N_sky - N_sky_pred)]) # grass\n inst_pred.extend([0 for i in range(N_sky)]) # first instance\n sem_gt.extend([2 for i in range(N_sky)]) # sky\n inst_gt.extend([0 for i in range(N_sky)]) # first instance\n\n # wrong dog as person prediction\n N_dog = n+3\n N_person = N_dog\n sem_pred.extend([3 for i in range(N_person)])\n inst_pred.extend([35 for i in range(N_person)])\n sem_gt.extend([4 for i in range(N_dog)])\n inst_gt.extend([22 for i in range(N_dog)])\n\n # two persons in prediction, but three in gt\n N_person = n+4\n sem_pred.extend([3 for i in range(6 * N_person)])\n inst_pred.extend([8 for i in range(4 * N_person)])\n inst_pred.extend([95 for i in range(2 * N_person)])\n sem_gt.extend([3 for i in range(6 * N_person)])\n inst_gt.extend([33 for i in range(3 * N_person)])\n inst_gt.extend([42 for i in range(N_person)])\n inst_gt.extend([11 for i in range(2 * N_person)])\n\n # gt and pred to numpy\n sem_pred = np.array(sem_pred, dtype=np.int64).reshape(1, -1)\n inst_pred = np.array(inst_pred, dtype=np.int64).reshape(1, -1)\n sem_gt = np.array(sem_gt, dtype=np.int64).reshape(1, -1)\n inst_gt = np.array(inst_gt, dtype=np.int64).reshape(1, -1)\n\n return sem_pred, inst_pred, sem_gt, inst_gt\n\nif __name__ == \"__main__\":\n # generate problem from He paper (https://arxiv.org/pdf/1801.00868.pdf)\n classes = 5 # ignore, grass, sky, person, dog\n cl_strings = [\"ignore\", \"grass\", \"sky\", \"person\", \"dog\"]\n ignore = [0] # only ignore ignore class\n min_points = 1 # for this example we care about all points\n\n sem_pred_0, inst_pred_0, sem_gt_0, inst_gt_0 = gen_psuedo_labels(50)\n sem_pred_1, inst_pred_1, sem_gt_1, inst_gt_1 = gen_psuedo_labels(51)\n\n # evaluator\n evaluator = PanopticEval(classes, ignore=ignore, min_points=1)\n evaluator.addBatch(sem_pred_0, inst_pred_0, sem_gt_0, inst_gt_0)\n evaluator.addBatch(sem_pred_1, inst_pred_1, sem_gt_1, inst_gt_1)\n\n evaluator_0 = PanopticEval(classes, ignore=ignore, min_points=1)\n evaluator_0.addBatch(sem_pred_0, inst_pred_0, sem_gt_0, inst_gt_0)\n \n evaluator_1 = PanopticEval(classes, ignore=ignore, min_points=1)\n evaluator_1.addBatch(sem_pred_1, inst_pred_1, sem_gt_1, inst_gt_1)\n\n pq, sq, rq, all_pq, all_sq, all_rq = evaluator.getPQ()\n iou, all_iou = evaluator.getSemIoU()\n\n # [PANOPTIC EVAL] IGNORE: [0]\n # [PANOPTIC EVAL] INCLUDE: [1 2 3 4]\n # TOTALS\n # PQ: 0.47916666666666663\n # SQ: 0.5520833333333333\n # RQ: 0.6666666666666666\n # IoU: 0.5476190476190476\n # Class ignore \t PQ: 0.0 SQ: 0.0 RQ: 0.0 IoU: 0.0\n # Class grass \t PQ: 0.6666666666666666 SQ: 0.6666666666666666 RQ: 1.0 IoU: 0.6666666666666666\n # Class sky \t PQ: 0.6666666666666666 SQ: 0.6666666666666666 RQ: 1.0 IoU: 0.6666666666666666\n # Class person \t PQ: 0.5833333333333333 SQ: 0.875 RQ: 0.6666666666666666 IoU: 0.8571428571428571\n # Class dog \t PQ: 0.0 SQ: 0.0 RQ: 0.0 IoU: 0.0\n\n print(\"TOTALS\")\n print(\"PQ:\", pq.item(), pq.item() == 0.47916666666666663)\n print(\"SQ:\", sq.item(), sq.item() == 0.5520833333333333)\n print(\"RQ:\", rq.item(), rq.item() == 0.6666666666666666)\n print(\"IoU:\", iou.item(), iou.item() == 0.5476190476190476)\n for i, (pq, sq, rq, iou) in enumerate(zip(all_pq, all_sq, all_rq, all_iou)):\n print(\"Class\", cl_strings[i], \"\\t\", \"PQ:\", pq.item(), \"SQ:\", sq.item(), \"RQ:\", rq.item(), \"IoU:\", iou.item())\n\n\n pq, sq, rq, all_pq, all_sq, all_rq = evaluator_0.getPQ()\n iou, all_iou = evaluator_0.getSemIoU()\n\n # [PANOPTIC EVAL] IGNORE: [0]\n # [PANOPTIC EVAL] INCLUDE: [1 2 3 4]\n # TOTALS\n # PQ: 0.47916666666666663\n # SQ: 0.5520833333333333\n # RQ: 0.6666666666666666\n # IoU: 0.5476190476190476\n # Class ignore \t PQ: 0.0 SQ: 0.0 RQ: 0.0 IoU: 0.0\n # Class grass \t PQ: 0.6666666666666666 SQ: 0.6666666666666666 RQ: 1.0 IoU: 0.6666666666666666\n # Class sky \t PQ: 0.6666666666666666 SQ: 0.6666666666666666 RQ: 1.0 IoU: 0.6666666666666666\n # Class person \t PQ: 0.5833333333333333 SQ: 0.875 RQ: 0.6666666666666666 IoU: 0.8571428571428571\n # Class dog \t PQ: 0.0 SQ: 0.0 RQ: 0.0 IoU: 0.0\n\n print(\"TOTALS\")\n print(\"PQ:\", pq.item(), pq.item() == 0.47916666666666663)\n print(\"SQ:\", sq.item(), sq.item() == 0.5520833333333333)\n print(\"RQ:\", rq.item(), rq.item() == 0.6666666666666666)\n print(\"IoU:\", iou.item(), iou.item() == 0.5476190476190476)\n for i, (pq, sq, rq, iou) in enumerate(zip(all_pq, all_sq, all_rq, all_iou)):\n print(\"Class\", cl_strings[i], \"\\t\", \"PQ:\", pq.item(), \"SQ:\", sq.item(), \"RQ:\", rq.item(), \"IoU:\", iou.item())\n\n\n pq, sq, rq, all_pq, all_sq, all_rq = evaluator_1.getPQ()\n iou, all_iou = evaluator_1.getSemIoU()\n\n # [PANOPTIC EVAL] IGNORE: [0]\n # [PANOPTIC EVAL] INCLUDE: [1 2 3 4]\n # TOTALS\n # PQ: 0.47916666666666663\n # SQ: 0.5520833333333333\n # RQ: 0.6666666666666666\n # IoU: 0.5476190476190476\n # Class ignore \t PQ: 0.0 SQ: 0.0 RQ: 0.0 IoU: 0.0\n # Class grass \t PQ: 0.6666666666666666 SQ: 0.6666666666666666 RQ: 1.0 IoU: 0.6666666666666666\n # Class sky \t PQ: 0.6666666666666666 SQ: 0.6666666666666666 RQ: 1.0 IoU: 0.6666666666666666\n # Class person \t PQ: 0.5833333333333333 SQ: 0.875 RQ: 0.6666666666666666 IoU: 0.8571428571428571\n # Class dog \t PQ: 0.0 SQ: 0.0 RQ: 0.0 IoU: 0.0\n\n print(\"TOTALS\")\n print(\"PQ:\", pq.item(), pq.item() == 0.47916666666666663)\n print(\"SQ:\", sq.item(), sq.item() == 0.5520833333333333)\n print(\"RQ:\", rq.item(), rq.item() == 0.6666666666666666)\n print(\"IoU:\", iou.item(), iou.item() == 0.5476190476190476)\n for i, (pq, sq, rq, iou) in enumerate(zip(all_pq, all_sq, all_rq, all_iou)):\n print(\"Class\", cl_strings[i], \"\\t\", \"PQ:\", pq.item(), \"SQ:\", sq.item(), \"RQ:\", rq.item(), \"IoU:\", iou.item())\n\n import pickle\n with open('test.pkl', 'wb') as f:\n pickle.dump(evaluator_0, f)\n\n with open('test.pkl', 'rb') as f:\n evaluator_read = pickle.load(f) \n evaluator_1.merge(evaluator_read)\n\n pq, sq, rq, all_pq, all_sq, all_rq = evaluator_1.getPQ()\n iou, all_iou = evaluator_1.getSemIoU()\n\n # [PANOPTIC EVAL] IGNORE: [0]\n # [PANOPTIC EVAL] INCLUDE: [1 2 3 4]\n # TOTALS\n # PQ: 0.47916666666666663\n # SQ: 0.5520833333333333\n # RQ: 0.6666666666666666\n # IoU: 0.5476190476190476\n # Class ignore \t PQ: 0.0 SQ: 0.0 RQ: 0.0 IoU: 0.0\n # Class grass \t PQ: 0.6666666666666666 SQ: 0.6666666666666666 RQ: 1.0 IoU: 0.6666666666666666\n # Class sky \t PQ: 0.6666666666666666 SQ: 0.6666666666666666 RQ: 1.0 IoU: 0.6666666666666666\n # Class person \t PQ: 0.5833333333333333 SQ: 0.875 RQ: 0.6666666666666666 IoU: 0.8571428571428571\n # Class dog \t PQ: 0.0 SQ: 0.0 RQ: 0.0 IoU: 0.0\n\n print(\"TOTALS\")\n print(\"PQ:\", pq.item(), pq.item() == 0.47916666666666663)\n print(\"SQ:\", sq.item(), sq.item() == 0.5520833333333333)\n print(\"RQ:\", rq.item(), rq.item() == 0.6666666666666666)\n print(\"IoU:\", iou.item(), iou.item() == 0.5476190476190476)\n for i, (pq, sq, rq, iou) in enumerate(zip(all_pq, all_sq, all_rq, all_iou)):\n print(\"Class\", cl_strings[i], \"\\t\", \"PQ:\", pq.item(), \"SQ:\", sq.item(), \"RQ:\", rq.item(), \"IoU:\", iou.item())\n" ]
[ [ "numpy.maximum", "numpy.logical_and", "numpy.unique", "numpy.stack", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jusjusjus/phac-python
[ "107c1e3f2f80972ff675754af9b38e271f5005b9" ]
[ "phac/hilbert.py" ]
[ "import numpy as np\n\nfrom .util import trapezoid, _hilbert\n\n\nclass SegmentedSignal:\n\n def __init__(self, nsegment: int, noverlap: int, n: int = None,\n arr: np.ndarray = None, dtype=np.float64):\n self.arr = np.zeros(n, dtype=dtype) if arr is None else arr\n self.nsegment = nsegment\n self.noverlap = noverlap\n\n @property\n def size(self) -> int:\n return self.arr.size\n\n @property\n def segment_minus_overlap(self) -> int:\n return self.nsegment-self.noverlap\n\n @property\n def num_segments(self) -> int:\n return int(np.ceil(self.size/self.segment_minus_overlap))\n\n def segment(self, n: int) -> np.ndarray:\n m = n*self.segment_minus_overlap\n return self.arr[m:m+self.nsegment]\n\n def add_to_segment(self, n: int, arr: np.ndarray):\n m = n*self.segment_minus_overlap\n self.arr[m:m+self.nsegment] += arr\n\n\ndef hilbert(arr, nsegment: int = 8192, noverlap: int = 1024) -> np.ndarray:\n # If size is smaller then segmentation size ..\n if arr.size < nsegment:\n return _hilbert(arr)\n\n arr = SegmentedSignal(nsegment, noverlap, arr=arr)\n ans = SegmentedSignal(nsegment, noverlap, n=arr.size, dtype=np.complex)\n trapz = trapezoid(nsegment, noverlap)\n\n # first segment is not cut in the beginning\n segment = _hilbert(arr.segment(0))\n segment[noverlap:] *= trapz[noverlap:]\n ans.add_to_segment(0, segment)\n # rest of the segments are treated as regular (This is not 100% correct\n # for the last segment where special cases should be treated.)\n for i in range(1, arr.num_segments):\n transformed = _hilbert(arr.segment(i))\n ans.add_to_segment(i, trapz[:transformed.size]*transformed)\n\n return ans.arr\n" ]
[ [ "numpy.ceil", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jeremyjordan/flower-classifier
[ "812003a12cefcc9bc8d68aa8167d3dc95b562d5f" ]
[ "tests/models/test_baseline.py" ]
[ "import pytest\nimport torch\n\nfrom flower_classifier.models.baseline import BaselineResnet\n\nN_CLASSES = 102\n\n\[email protected](scope=\"module\")\ndef network():\n network = BaselineResnet(n_classes=N_CLASSES)\n return network\n\n\[email protected](\n \"batch_size, img_height, img_width\",\n [(1, 256, 256), (8, 100, 300), (1, 33, 33)],\n)\ndef test_expected_input_shape(network: torch.nn.Module, batch_size: int, img_height: int, img_width: int):\n example_input_array = torch.zeros(batch_size, 3, img_height, img_width)\n _ = network(example_input_array)\n\n\[email protected](\"img_height, img_width\", [(4, 4), (32, 32)])\ndef test_input_too_small(network: torch.nn.Module, img_height: int, img_width: int):\n example_input_array = torch.zeros(1, 3, img_height, img_width)\n with pytest.raises(ValueError):\n _ = network(example_input_array)\n\n\[email protected](\n \"img_height, img_width\",\n [(256, 256), (32, 48)],\n)\ndef test_input_no_batch_dim(network: torch.nn.Module, img_height: int, img_width: int):\n example_input_array = torch.zeros(3, img_height, img_width)\n with pytest.raises(RuntimeError):\n _ = network(example_input_array)\n\n\[email protected](\"batch_size, img_height, img_width\", [(1, 256, 256), (8, 100, 300), (1, 33, 33)])\ndef test_expected_output_shape(network: torch.nn.Module, batch_size: int, img_height: int, img_width: int):\n example_input_array = torch.zeros(batch_size, 3, img_height, img_width)\n outputs = network(example_input_array)\n assert outputs.shape[0] == batch_size\n assert outputs.shape[1] == N_CLASSES\n" ]
[ [ "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
uoguelph-mlrg/Theano-MPI
[ "4bf0ebc167967dc3cb0969d4b12e304ef11d724a" ]
[ "theanompi/models/alex_net.py" ]
[ "# This version of alex_net.py is modified based on the theano_alexnet project. See the original project here:\n# https://github.com/uoguelph-mlrg/theano_alexnet, and its copy right:\n# Copyright (c) 2014, Weiguang Ding, Ruoyan Wang, Fei Mao and Graham Taylor\n# All rights reserved.\n\nimport numpy as np\n\nimport hickle as hkl\n\n# model hyperparams\nn_epochs = 70\nmomentum = 0.90\nweight_decay = 0.0005\nbatch_size = 128\nfile_batch_size = 128\nlearning_rate = 0.01\n# size=1 converge at 1320 6.897861; if avg and not scale lr by size, then size=2 will converge at: 2360 6.898975\nlr_policy = 'step'\nlr_step = [20, 40, 60]\n\nuse_momentum = True\nuse_nesterov_momentum = False\n\n#cropping hyperparams\ninput_width = 227\ninput_height = 227\n\n# apparently, training converges better with batch_crop_mirror=False. \n# 1200 6.898191 vs 1320 6.892865\nbatch_crop_mirror = False \nrand_crop = True\n\nimage_mean = 'img_mean'\ndataname = 'imagenet'\n\n# conv\nlib_conv='cudnn' # cudnn or corrmm\n\nmonitor_grad = False\n\nseed_weight_on_pid = False\n\nclass AlexNet(object):\n\n def __init__(self, config):\n\n self.verbose = config['verbose']\n self.rank = config['rank'] # will be used in sharding and distinguish rng\n self.size = config['size']\n self.no_paraload=False\n try: \n self.no_paraload = config['no_paraload']\n except:\n pass\n \n import theano\n theano.config.on_unused_input = 'warn'\n self.name = 'AlexNet'\n \n # data\n from theanompi.models.data import ImageNet_data\n self.data = ImageNet_data(verbose=False)\n self.channels = self.data.channels # 'c' mean(R,G,B) = (103.939, 116.779, 123.68)\n self.input_width = input_width # '0' single scale training 224\n self.input_height = input_height # '1' single scale training 224\n # if self.size>1: # only use avg\n# self.batch_size = batch_size/self.size\n# else: # TODO find out if this works better\n self.batch_size = batch_size # 'b\n self.file_batch_size = file_batch_size\n self.n_softmax_out = self.data.n_class\n \n # mini batching and other data parallel common routine\n self.data.batch_data(file_batch_size)\n self.data.extend_data(rank=self.rank, size=self.size)\n self.data.shuffle_data(mode='train', common_seed=1234)\n self.data.shuffle_data(mode='val')\n self.data.shard_data(mode='train', rank=self.rank, size=self.size) # to update data.n_batch_train\n self.data.shard_data(mode='val', rank=self.rank, size=self.size) # to update data.n_batch_val\n #self.data.shuffle_data()\n \n # training related\n self.n_epochs = n_epochs\n self.epoch = 0\n self.step_idx = 0\n self.mu = momentum # def: 0.9 # momentum\n self.use_momentum = use_momentum\n self.use_nesterov_momentum = use_nesterov_momentum\n self.eta = weight_decay #0.0002 # weight decay\n self.monitor_grad = monitor_grad\n \n self.base_lr = np.float32(learning_rate)\n self.shared_lr = theano.shared(self.base_lr)\n self.shared_x = theano.shared(np.zeros((\n 3,\n self.input_width,#self.data.width, \n self.input_height,#self.data.height,\n file_batch_size\n ), \n dtype=theano.config.floatX), \n borrow=True) \n self.shared_y = theano.shared(np.zeros((file_batch_size,), \n dtype=int), borrow=True) \n # slice batch if needed\n import theano.tensor as T \n subb_ind = T.iscalar('subb') # sub batch index\n self.subb_ind = subb_ind\n self.shared_x_slice = self.shared_x[:,:,:,subb_ind*self.batch_size:(subb_ind+1)*self.batch_size]\n self.shared_y_slice = self.shared_y[subb_ind*self.batch_size:(subb_ind+1)*self.batch_size]\n \n # ##################### BUILD NETWORK ##########################\n # allocate symbolic variables for the data\n # 'rand' is a random array used for random cropping/mirroring of data\n \n self.build_model()\n self.output = self.output_layer.output\n from theanompi.models.layers2 import get_params, get_layers, count_params\n self.layers = get_layers(lastlayer = self.output_layer)\n self.params,self.weight_types = get_params(self.layers)\n count_params(self.params, verbose=self.verbose)\n self.grads = T.grad(self.cost,self.params)\n\n # To be compiled\n self.compiled_train_fn_list = []\n self.train_iter_fn = None\n self.val_iter_fn = None\n \n # iter related\n self.n_subb = file_batch_size//batch_size\n self.current_t = 0 # current filename pointer in the filename list\n self.last_one_t = False # if pointer is pointing to the last filename in the list\n self.subb_t = 0 # sub-batch index\n \n self.current_v=0\n self.last_one_v=False\n self.subb_v=0\n \n # preprocessing\n self.batch_crop_mirror = batch_crop_mirror\n self.input_width = input_width\n \n if self.data.para_load and not self.no_paraload:\n \n self.data.spawn_load()\n self.data.para_load_init(self.shared_x, input_width, input_height, \n rand_crop, batch_crop_mirror)\n \n \n def build_model(self):\n \n if self.verbose: print(self.name)\n\n # start graph construction from scratch\n import theano.tensor as T\n if seed_weight_on_pid:\n import theanompi.models.layers2 as layers\n import os\n layers.rng = np.random.RandomState(os.getpid())\n from theanompi.models.layers2 import (ConvPoolLRN,Dropout,FC, \n Dimshuffle, Crop, Subtract,\n Softmax,Flatten,LRN, Constant, Normal)\n \n \n self.x = T.ftensor4('x')\n self.y = T.lvector('y')\n self.lr = T.scalar('lr')\n \n # subtract_layer = Subtract(input=self.x,\n # input_shape=(self.channels,\n # self.data.width,\n # self.data.height,\n # self.batch_size),\n # subtract_arr = self.data.rawdata[4],\n # printinfo = self.verbose\n # )\n #\n # crop_layer = Crop(input=subtract_layer,\n # output_shape=(self.channels,\n # self.input_width,\n # self.input_height,\n # self.batch_size),\n # flag_batch=batch_crop_mirror,\n # printinfo = self.verbose\n # )\n \n convpool_layer1 = ConvPoolLRN(input=self.x, #crop_layer,\n input_shape=(self.channels,\n self.input_width,\n self.input_height,\n self.batch_size),\n \n filter_shape=(3, 11, 11, 96),\n convstride=4, padsize=0, group=1,\n poolsize=3, poolstride=2,\n b=0.0, lrn=True,\n lib_conv=lib_conv,\n printinfo = self.verbose\n #output_shape = (96, 27, 27, batch_size)\n )\n\n convpool_layer2 = ConvPoolLRN(input=convpool_layer1,\n #input_shape=(96, 27, 27, batch_size),\n filter_shape=(96, 5, 5, 256),\n convstride=1, padsize=2, group=2,\n poolsize=3, poolstride=2,\n b=0.1, lrn=True,\n lib_conv=lib_conv,\n printinfo = self.verbose\n #output_shape=(256, 13, 13, batch_size),\n )\n\n\n convpool_layer3 = ConvPoolLRN(input=convpool_layer2,\n #input_shape=(256, 13, 13, batch_size),\n filter_shape=(256, 3, 3, 384),\n convstride=1, padsize=1, group=1,\n poolsize=1, poolstride=0,\n b=0.0, lrn=False,\n lib_conv=lib_conv,\n printinfo = self.verbose\n #output_shape=(384, 13, 13, batch_size),\n )\n\n convpool_layer4 = ConvPoolLRN(input=convpool_layer3,\n #input_shape=(384, 13, 13, batch_size),\n filter_shape=(384, 3, 3, 384),\n convstride=1, padsize=1, group=2,\n poolsize=1, poolstride=0,\n b=0.1, lrn=False,\n lib_conv=lib_conv,\n printinfo = self.verbose\n #output_shape=(384, 13, 13, batch_size),\n )\n\n convpool_layer5 = ConvPoolLRN(input=convpool_layer4,\n #input_shape=(384, 13, 13, batch_size),\n filter_shape=(384, 3, 3, 256),\n convstride=1, padsize=1, group=2,\n poolsize=3, poolstride=2,\n b=0.0, lrn=False,\n lib_conv=lib_conv,\n printinfo = self.verbose\n #output_shape=(256, 6, 6, batch_size),\n )\n shuffle = Dimshuffle(input=convpool_layer5,\n new_axis_order=(3,0,1,2),\n printinfo=self.verbose\n )\n \n fc_layer6_input = Flatten(input=shuffle,\n #input_shape=(batch_size, 256, 6, 6),\n axis = 2,\n printinfo=self.verbose\n )\n \n fc_layer6 = FC(input=fc_layer6_input, \n # n_in=9216,\n n_out=4096,\n W=Normal((fc_layer6_input.output_shape[1], 4096), std=0.005),\n b=Constant((4096,), val=0.1),\n printinfo = self.verbose\n )\n\n dropout_layer6 = Dropout(input=fc_layer6, \n # n_in=4096,\n n_out=fc_layer6.output_shape[1], \n prob_drop=0.5,\n printinfo = self.verbose)\n\n fc_layer7 = FC(input=dropout_layer6, \n # n_in=4096,\n n_out=4096,\n W = Normal((dropout_layer6.output_shape[1], 4096), std=0.005),\n b = Constant((4096,), val=0.1),\n printinfo = self.verbose\n )\n\n dropout_layer7 = Dropout(input=fc_layer7, \n #n_in=4096, \n n_out=fc_layer7.output_shape[1],\n prob_drop=0.5,\n printinfo = self.verbose)\n\n softmax_layer8 = Softmax(input=dropout_layer7, \n #n_in=4096, \n n_out=self.n_softmax_out,\n W = Normal((dropout_layer7.output_shape[1], \n self.n_softmax_out), mean=0, std=0.01),\n b = Constant((self.n_softmax_out,),val=0),\n printinfo = self.verbose)\n \n self.output_layer = softmax_layer8\n \n self.cost = softmax_layer8.negative_log_likelihood(self.y) \n self.error = softmax_layer8.errors(self.y)\n self.error_top_5 = softmax_layer8.errors_top_x(self.y)\n \n \n \n def compile_train(self, *args):\n \n # args is a list of dictionaries\n \n if self.verbose: print('compiling training function...')\n \n import theano\n \n for arg_list in args:\n self.compiled_train_fn_list.append(theano.function(**arg_list))\n \n if self.monitor_grad:\n \n norms = [grad.norm(L=2) for grad in self.grads]\n import theano.tensor as T\n norms = T.log10(norms)\n \n self.get_norm = theano.function([self.subb_ind], [T.sum(norms), T.max(norms)],\n givens=[(self.x, self.shared_x_slice), \n (self.y, self.shared_y_slice)]\n )\n def compile_inference(self):\n\n if self.verbose: print('compiling inference function...')\n \n import theano\n \n self.inf_fn = theano.function([self.x],self.output)\n \n def compile_val(self):\n\n if self.verbose: print('compiling validation function...')\n \n import theano\n \n self.val_fn = theano.function([self.subb_ind], [self.cost,self.error,self.error_top_5], updates=[], \n givens=[(self.x, self.shared_x_slice),\n (self.y, self.shared_y_slice)]\n )\n \n def compile_iter_fns(self):\n \n import time\n \n start = time.time()\n \n from theanompi.lib.opt import pre_model_iter_fn\n\n pre_model_iter_fn(self,self.size)\n \n if self.verbose: print('Compile time: %.3f s' % (time.time()-start))\n \n def reset_iter(self, mode):\n \n '''used at the begininig of another mode'''\n \n if mode=='train':\n\n self.current_t = 0\n self.subb_t=0\n self.last_one_t = False\n else:\n\n self.current_v = 0\n self.subb_v=0\n self.last_one_v = False\n \n if self.data.para_load:\n \n self.data.icomm.isend(mode,dest=0,tag=40)\n \n def train_iter(self, count,recorder):\n \n '''use the train_iter_fn compiled'''\n '''use parallel loading for large or remote data'''\n\n \n if self.current_t==0 and self.subb_t == 0: \n self.data.shuffle_data(mode='train',common_seed=self.epoch)\n self.data.shard_data(mode='train',rank=self.rank, size=self.size)\n \n img= self.data.train_img_shard\n labels = self.data.train_labels_shard\n\n mode = 'train'\n function = self.train_iter_fn\n \n # print len(img), 'current_t: %d, subb_t: %d' % (self.current_t,self.subb_t)\n \n if self.subb_t == 0: # load the whole file into shared_x when loading sub-batch 0 of each file.\n \n recorder.start()\n \n # parallel loading of shared_x\n if self.data.para_load:\n \n icomm = self.data.icomm\n \n if self.current_t == 0:\n \n # 3.0 give mode signal to adjust loading mode between train and val\n icomm.isend('train',dest=0,tag=40)\n # 3.1 give load signal to load the very first file\n icomm.isend(img[self.current_t],dest=0,tag=40)\n \n \n if self.current_t == self.data.n_batch_train - 1:\n self.last_one_t = True\n # Only to get the last copy_finished signal from load\n icomm.isend(img[self.current_t],dest=0,tag=40) \n else:\n self.last_one_t = False\n # 4. give preload signal to load next file\n icomm.isend(img[self.current_t+1],dest=0,tag=40)\n \n # 5. wait for the batch to be loaded into shared_x\n msg = icomm.recv(source=0,tag=55) #\n assert msg == 'copy_finished'\n \n \n else:\n \n img_mean = self.data.rawdata[4]\n img_std = self.data.rawdata[5]\n import hickle as hkl\n arr = (hkl.load(img[self.current_t]) - img_mean)/255./img_std\n\n from theanompi.models.data.utils import crop_and_mirror\n\n arr = crop_and_mirror(arr, mode, \n rand_crop, \n batch_crop_mirror, \n input_width)\n \n self.shared_x.set_value(arr)\n \n if self.current_t == self.data.n_batch_train - 1:\n self.last_one_t = True\n else:\n self.last_one_t = False\n \n \n # direct loading of shared_y\n self.shared_y.set_value(labels[self.current_t])\n \n \n recorder.end('wait')\n \n recorder.start()\n \n if self.verbose: \n if self.monitor_grad: \n #print np.array(self.get_norm(self.subb_t))\n print(np.array(self.get_norm(self.subb_t)).tolist()[:2])\n \n cost,error= function(self.subb_t)\n \n for p in self.params: p.container.value.sync()\n \n recorder.train_error(count, cost, error)\n recorder.end('calc')\n\n \n if (self.subb_t+1)//self.n_subb == 1: # test if next sub-batch is in another file\n \n if self.last_one_t == False:\n self.current_t+=1\n else:\n self.current_t=0\n \n self.subb_t=0\n else:\n self.subb_t+=1\n \n def val_iter(self, count,recorder):\n \n '''use the val_iter_fn compiled'''\n \n if self.current_v==0 and self.subb_v == 0:\n self.data.shuffle_data(mode='val')\n self.data.shard_data(mode='val',rank=self.rank, size=self.size)\n \n img= self.data.val_img_shard\n labels = self.data.val_labels_shard\n \n mode='val'\n function=self.val_iter_fn\n \n if self.subb_v == 0: # load the whole file into shared_x when loading sub-batch 0 of each file.\n \n # parallel loading of shared_x\n if self.data.para_load:\n \n icomm = self.data.icomm\n \n if self.current_v == 0:\n \n # 3.0 give mode signal to adjust loading mode between train and val\n icomm.isend('val',dest=0,tag=40)\n # 3.1 give load signal to load the very first file\n icomm.isend(img[self.current_v],dest=0,tag=40)\n \n \n if self.current_v == self.data.n_batch_val - 1:\n \n self.last_one_v = True\n # Only to get the last copy_finished signal from load\n icomm.isend(img[self.current_v],dest=0,tag=40) \n \n else:\n \n self.last_one_v = False\n # 4. give preload signal to load next file\n icomm.isend(img[self.current_v+1],dest=0,tag=40)\n \n \n # 5. wait for the batch to be loaded into shared_x\n msg = icomm.recv(source=0,tag=55) #\n assert msg == 'copy_finished'\n \n \n else:\n \n img_mean = self.data.rawdata[4]\n img_std = self.data.rawdata[5]\n import hickle as hkl\n arr = (hkl.load(img[self.current_v]) - img_mean)/255./img_std\n\n from theanompi.models.data.utils import crop_and_mirror\n\n arr = crop_and_mirror(arr, mode, \n rand_crop, \n batch_crop_mirror, \n input_width)\n \n # arr = np.rollaxis(arr,0,4)\n \n self.shared_x.set_value(arr)\n \n \n # direct loading of shared_y \n self.shared_y.set_value(labels[self.current_v])\n \n \n if self.current_v == self.data.n_batch_val - 1:\n self.last_one_v = True\n else:\n self.last_one_v = False\n \n from theanompi.models.layers2 import Dropout, Crop \n Dropout.SetDropoutOff()\n Crop.SetRandCropOff()\n cost,error,error_top5 = function(self.subb_v)\n Dropout.SetDropoutOn()\n Crop.SetRandCropOn()\n \n recorder.val_error(count, cost, error, error_top5)\n \n if (self.subb_v+1)//self.n_subb == 1: # test if next sub-batch is in another file\n \n if self.last_one_v == False:\n self.current_v+=1\n else:\n self.current_v=0\n \n self.subb_v=0\n else:\n self.subb_v+=1\n \n def adjust_hyperp(self, epoch):\n \n 'to be called once per epoch'\n \n if lr_policy == 'step':\n \n if epoch in lr_step: \n \n tuned_base_lr = self.shared_lr.get_value() /10.\n \n self.shared_lr.set_value(np.float32(tuned_base_lr))\n \n def cleanup(self):\n \n if self.data.para_load:\n \n self.data.para_load_close()\n \nif __name__ == '__main__':\n \n raise RuntimeError('to be tested using test_model.py:\\n$ python test_model.py alex_net AlexNet')\n\n" ]
[ [ "numpy.zeros", "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
skyInGitHub/audio_adversarial_examples
[ "b5f63fc30cc2c59b4812d00fa974b92afd6e2a0b" ]
[ "classify.py" ]
[ "## classify.py -- actually classify a sequence with DeepSpeech\n##\n## Copyright (C) 2017, Nicholas Carlini <[email protected]>.\n##\n## This program is licenced under the BSD 2-Clause licence,\n## contained in the LICENCE file in this directory.\n\nimport numpy as np\nimport tensorflow as tf\nimport argparse\n\nimport scipy.io.wavfile as wav\n\nimport time\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = ''\nimport sys\nfrom collections import namedtuple\nsys.path.append(\"DeepSpeech\")\nimport DeepSpeech\n\ntry:\n import pydub\n import struct\nexcept:\n print(\"pydub was not loaded, MP3 compression will not work\")\n\nfrom tf_logits import get_logits\n\n\n# These are the tokens that we're allowed to use.\n# The - token is special and corresponds to the epsilon\n# value in CTC decoding, and can not occur in the phrase.\ntoks = \" abcdefghijklmnopqrstuvwxyz'-\"\n\ndef main_classify(input, restore_path):\n # parser = argparse.ArgumentParser(description=None)\n # parser.add_argument('--in', type=str, dest=\"input\",\n # required=True,\n # help=\"Input audio .wav file(s), at 16KHz (separated by spaces)\")\n # parser.add_argument('--restore_path', type=str,\n # required=True,\n # help=\"Path to the DeepSpeech checkpoint (ending in model0.4.1)\")\n # args = parser.parse_args()\n # while len(sys.argv) > 1:\n # sys.argv.pop()\n with tf.Session() as sess:\n if input.split(\".\")[-1] == 'mp3':\n raw = pydub.AudioSegment.from_mp3(input)\n audio = np.array([struct.unpack(\"<h\", raw.raw_data[i:i+2])[0] for i in range(0,len(raw.raw_data),2)])\n elif input.split(\".\")[-1] == 'wav' or input.split(\".\")[-1] == 'WAV':\n _, audio = wav.read(input)\n else:\n raise Exception(\"Unknown file format\")\n N = len(audio)\n new_input = tf.placeholder(tf.float32, [1, N])\n lengths = tf.placeholder(tf.int32, [1])\n\n with tf.variable_scope(\"\", reuse=tf.AUTO_REUSE):\n logits = get_logits(new_input, lengths)\n\n saver = tf.train.Saver()\n saver.restore(sess, restore_path)\n\n decoded, _ = tf.nn.ctc_beam_search_decoder(logits, lengths, merge_repeated=False, beam_width=500)\n\n # print('logits shape', logits.shape)\n length = (len(audio)-1)//320\n l = len(audio)\n r = sess.run(decoded, {new_input: [audio],\n lengths: [length]})\n\n print(\"-\"*80)\n print(\"-\"*80)\n\n wav_name = input.split('/')[-1]\n print(\"Classification of \", wav_name)\n print(\"\".join([toks[x] for x in r[0].values]))\n # print(\"-\"*80)\n # print(\"-\"*80)\n\n output_text = str(\"\".join([toks[x] for x in r[0].values]))\n return wav_name, output_text\n\n\ndef main():\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('--in', type=str, dest=\"input\",\n required=True,\n help=\"Input audio .wav file(s), at 16KHz (separated by spaces)\")\n parser.add_argument('--restore_path', type=str,\n required=True,\n help=\"Path to the DeepSpeech checkpoint (ending in model0.4.1)\")\n args = parser.parse_args()\n while len(sys.argv) > 1:\n sys.argv.pop()\n with tf.Session() as sess:\n if args.input.split(\".\")[-1] == 'mp3':\n raw = pydub.AudioSegment.from_mp3(args.input)\n audio = np.array([struct.unpack(\"<h\", raw.raw_data[i:i+2])[0] for i in range(0,len(raw.raw_data),2)])\n elif args.input.split(\".\")[-1] == 'wav' or args.input.split(\".\")[-1] == 'WAV':\n _, audio = wav.read(args.input)\n else:\n raise Exception(\"Unknown file format\")\n N = len(audio)\n new_input = tf.placeholder(tf.float32, [1, N])\n lengths = tf.placeholder(tf.int32, [1])\n\n with tf.variable_scope(\"\", reuse=tf.AUTO_REUSE):\n logits = get_logits(new_input, lengths)\n\n saver = tf.train.Saver()\n saver.restore(sess, args.restore_path)\n\n decoded, _ = tf.nn.ctc_beam_search_decoder(logits, lengths, merge_repeated=False, beam_width=500)\n\n print('logits shape', logits.shape)\n length = (len(audio)-1)//320\n l = len(audio)\n r = sess.run(decoded, {new_input: [audio],\n lengths: [length]})\n\n print(\"-\"*80)\n print(\"-\"*80)\n\n print(\"Classification:\")\n print(\"\".join([toks[x] for x in r[0].values]))\n print(\"-\"*80)\n print(\"-\"*80)\n\n output_text = \"\".join([toks[x] for x in r[0].values])\n return output_text\n\n\nif __name__ == \"__main__\":\n\n# parser = argparse.ArgumentParser(description=None)\n# parser.add_argument('--in', type=str, dest=\"input\",\n# required=True,\n# help=\"Input audio .wav file(s), at 16KHz (separated by spaces)\")\n# parser.add_argument('--restore_path', type=str,\n# required=True,\n# help=\"Path to the DeepSpeech checkpoint (ending in model0.4.1)\")\n# args = parser.parse_args()\n \n# output_text = main_classify(args.input, args.restore_path)\n# print(output_text)\n\n main()\n" ]
[ [ "tensorflow.nn.ctc_beam_search_decoder", "tensorflow.placeholder", "tensorflow.variable_scope", "tensorflow.Session", "tensorflow.train.Saver", "scipy.io.wavfile.read" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [ "1.10" ] } ]
senysenyseny16/pytorch
[ "e65a1edabb2b8e2bec752cba69cc3f434626a482" ]
[ "torch/fx/graph_module.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.overrides\nfrom torch.nn.modules.module import _addindent\nfrom torch.package import PackageImporter, PackageExporter\nimport linecache\nfrom typing import Type, Dict, List, Any, Union, Optional, Set\nfrom .graph import Graph, _is_from_torch, _custom_builtins, PythonCode\nfrom torch.package import Importer, sys_importer\nimport copy\nimport itertools\nimport sys\nimport traceback\nfrom pathlib import Path\nimport os\nimport warnings\n\n# normal exec loses the source code, however we can patch\n# the linecache module to still recover it.\n# using exec_with_source will add it to our local cache\n# and then tools like TorchScript will be able to get source info.\n_next_id = 0\ndef exec_with_source(src: str, globals: Dict[str, Any]):\n global _next_id\n key = f'<eval_with_key_{_next_id}>'\n _next_id += 1\n _eval_cache[key] = [line + '\\n' for line in src.splitlines()]\n exec(compile(src, key, 'exec'), globals)\n\n# patch linecache so that any code we exec using exec_with_source\n# works with inspect\n_eval_cache : Dict[str, List[str]] = {}\n_orig_getlines = linecache.getlines\ndef patched_getline(*args, **kwargs):\n if args[0] in _eval_cache:\n return _eval_cache[args[0]]\n return _orig_getlines(*args, **kwargs)\nlinecache.getlines = patched_getline\n\n\ndef _forward_from_src(src: str, globals: Dict[str, Any]):\n # avoid mutating the passed in dict\n globals_copy = globals.copy()\n exec_with_source(src, globals_copy)\n forward_fn = globals_copy['forward']\n del globals_copy['forward']\n return forward_fn\n\n\ndef _format_import_statement(name: str, obj: Any, importer: Importer) -> str:\n if name in _custom_builtins:\n return _custom_builtins[name].import_str\n if _is_from_torch(name):\n return 'import torch'\n module_name, attr_name = importer.get_name(obj)\n return f'from {module_name} import {attr_name} as {name}'\n\n\ndef _format_import_block(globals: Dict[str, Any], importer: Importer):\n import_strs: Set[str] = set()\n for name, obj in globals.items():\n import_strs.add(_format_import_statement(name, obj, importer))\n return '\\n'.join(import_strs)\n\n\ndef reduce_graph_module(body: Dict[Any, Any], import_block: str) -> torch.nn.Module:\n # BC: attribute name was changed from `code` to `_code` to facilitate\n # making `code` into a property and adding a docstring to it\n fn_src = body.get('_code') or body['code']\n forward = _forward_from_src(import_block + fn_src, {})\n return _deserialize_graph_module(forward, body, None)\n\n\ndef reduce_package_graph_module(importer: PackageImporter,\n body: Dict[Any, Any],\n generated_module_name: str) -> torch.nn.Module:\n forward = importer.import_module(generated_module_name).forward\n return _deserialize_graph_module(forward, body, importer)\n\n\ndef _deserialize_graph_module(forward, body: Dict[Any, Any], importer: Optional[PackageImporter]) -> torch.nn.Module:\n \"\"\"\n Deserialize a GraphModule given the dictionary of the original module,\n using the code to reconstruct the graph. We delete the actual graph before\n saving the dictionary so that changes to the in-memory graph format do not\n get serialized.\n \"\"\"\n # We create a dummy class here because symbolic_trace pulls the forward()\n # function off of the class, rather than the instance\n class CodeOnlyModule(torch.nn.Module):\n def __init__(self, body):\n super().__init__()\n self.__dict__ = body\n\n # Try to retrieve the forward source in a backward-compatible way\n CodeOnlyModule.forward = forward\n\n from ._symbolic_trace import Tracer\n\n # we shouldn't trace into any of the submodules, they were not\n # because they were not traced in the original GraphModule\n class KeepModules(Tracer):\n def is_leaf_module(self, _: torch.nn.Module, __: str) -> bool:\n return True\n\n com = CodeOnlyModule(body)\n return GraphModule(com, KeepModules().trace(com))\n\n# copy an attribute value with qualified name 'target' from 'from_module' to 'to_module'\n# This installs empty Modules where none exist yet if they are subpaths of target\ndef _copy_attr(from_module: torch.nn.Module, to_module: torch.nn.Module, target: str):\n *prefix, field = target.split('.')\n for item in prefix:\n f = getattr(from_module, item)\n t = getattr(to_module, item, None)\n if f is t:\n # we have already installed one of its parents\n # (e.g. target = root.linear.weight, but we have already installed root.linear)\n # once we install a parent, we no longer need to copy the children\n # since all the needed properties will already be present\n return\n\n if t is None:\n t = torch.nn.Module()\n setattr(to_module, item, t)\n from_module, to_module = f, t\n\n orig = getattr(from_module, field)\n # If it is a tensor and not a parameter attribute of a module, it should be a named buffer.\n # So, we register it as a named buffer in the target module.\n if isinstance(orig, torch.Tensor) and not isinstance(orig, torch.nn.Parameter):\n to_module.register_buffer(field, orig)\n else:\n setattr(to_module, field, orig)\n\n# Assign attribute 'from_obj' to the qualified name 'target' on 'to_module\n# This installs empty Modules where none exist yet if they are subpaths of target\ndef _assign_attr(from_obj: Any, to_module: torch.nn.Module, target: str):\n *prefix, field = target.split('.')\n for item in prefix:\n t = getattr(to_module, item, None)\n\n if t is None:\n t = torch.nn.Module()\n setattr(to_module, item, t)\n to_module = t\n\n # If it is a tensor and not a parameter attribute of a module, it should be a named buffer.\n # So, we register it as a named buffer in the target module.\n if isinstance(from_obj, torch.Tensor) and not isinstance(from_obj, torch.nn.Parameter):\n to_module.register_buffer(field, from_obj)\n else:\n setattr(to_module, field, from_obj)\n\nclass GraphModule(torch.nn.Module):\n \"\"\"\n GraphModule is an nn.Module generated from an fx.Graph. Graphmodule has a\n ``graph`` attribute, as well as ``code`` and ``forward`` attributes generated\n from that ``graph``.\n\n .. warning::\n\n When ``graph`` is reassigned, ``code`` and ``forward`` will be automatically\n regenerated. However, if you edit the contents of the ``graph`` without reassigning\n the ``graph`` attribute itself, you must call ``recompile()`` to update the generated\n code.\n\n \"\"\"\n def __new__(cls: 'Type[GraphModule]', *args, **kwargs):\n # each instance of a graph module needs its own forward method\n # so create a new singleton class for each instance.\n # it is a subclass of the user-defined class, the only difference\n # is an extra layer to install the forward method\n\n class GraphModuleImpl(cls): # type: ignore[misc, valid-type]\n pass\n return super().__new__(GraphModuleImpl)\n\n def __init__(self,\n root: Union[torch.nn.Module, Dict[str, Any]],\n graph: Graph,\n class_name: str = 'GraphModule'):\n \"\"\"\n Construct a GraphModule.\n\n Args:\n\n root (Union[torch.nn.Module, Dict[str, Any]):\n ``root`` can either be an nn.Module instance or a Dict mapping strings to any attribute type.\n In the case that ``root`` is a Module, any references to Module-based objects (via qualified\n name) in the Graph's Nodes' ``target`` field will be copied over from the respective place\n within ``root``'s Module hierarchy into the GraphModule's module hierarchy.\n In the case that ``root`` is a dict, the qualified name found in a Node's ``target`` will be\n looked up directly in the dict's keys. The object mapped to by the Dict will be copied\n over into the appropriate place within the GraphModule's module hierarchy.\n\n graph (Graph): ``graph`` contains the nodes this GraphModule should use for code generation\n\n class_name (str): ``name`` denotes the name of this GraphModule for debugging purposes. If it's unset, all\n error messages will report as originating from ``GraphModule``. It may be helpful to set this\n to ``root``'s original name or a name that makes sense within the context of your transform.\n\n \"\"\"\n super().__init__()\n self.__class__.__name__ = class_name\n if isinstance(root, torch.nn.Module):\n if hasattr(root, 'training'):\n self.training = root.training\n for node in graph.nodes:\n if node.op in ['get_attr', 'call_module']:\n assert isinstance(node.target, str)\n _copy_attr(root, self, node.target)\n elif isinstance(root, dict):\n targets_to_copy = []\n for node in graph.nodes:\n if node.op in ['get_attr', 'call_module']:\n assert isinstance(node.target, str)\n if node.target not in root:\n raise RuntimeError('Node ' + str(node) + ' referenced target ' + node.target +\n ' but that target was not provided in ``root``!')\n targets_to_copy.append(node.target)\n # Sort targets in ascending order of the # of atoms.\n # This will ensure that less deeply nested attributes are assigned\n # before more deeply nested attributes. For example, foo.bar\n # will be assigned before foo.bar.baz. Otherwise, we might assign\n # the user-provided ``foo.bar`` and wipe out the previously-assigned\n # ``foo.bar.baz``\n targets_to_copy.sort(key=lambda t: t.count('.'))\n for target_to_copy in targets_to_copy:\n _assign_attr(root[target_to_copy], self, target_to_copy)\n else:\n raise RuntimeError('Unsupported type ' + str(root) + ' passed for root!')\n\n self.graph = graph\n\n # TorchScript breaks trying to compile the graph setter because of the\n # continued string literal. Issue here: https://github.com/pytorch/pytorch/issues/44842\n #\n # Shouldn't be an issue since these methods shouldn't be used in TorchScript anyway\n __jit_unused_properties__ = ['graph']\n\n @property\n def graph(self) -> Graph:\n \"\"\"\n Return the ``Graph`` underlying this ``GraphModule``\n \"\"\"\n return self._graph\n\n @graph.setter\n def graph(self, g : Graph) -> None:\n \"\"\"\n Set the underlying ``Graph`` for this ``GraphModule``. This will internally\n recompile the ``GraphModule`` so that the generated ``forward()`` function\n corresponds to ``g``\n \"\"\"\n assert isinstance(g, Graph), f'Expected a Graph instance, but got {type(g)}'\n self._graph = g\n g.owning_module = self\n self.recompile()\n\n def to_folder(self, folder: Union[str, os.PathLike], module_name : str = \"FxModule\"):\n \"\"\"Dumps out module to ``folder`` with ``module_name`` so that it can be\n imported with ``from <folder> import <module_name>``\n\n Args:\n\n folder (Union[str, os.PathLike]): The folder to write the code out to\n\n module_name (str): Top-level name to use for the ``Module`` while\n writing out the code\n \"\"\"\n folder = Path(folder)\n Path(folder).mkdir(exist_ok=True)\n torch.save(self.state_dict(), folder / 'state_dict.pt')\n tab = \" \" * 4\n model_str = f\"\"\"\nimport torch\nfrom torch.nn import *\nclass {module_name}(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\"\"\"\n\n def _gen_model_repr(module_name: str, module: torch.nn.Module) -> Optional[str]:\n safe_reprs = [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]\n if type(module) in safe_reprs:\n return f\"{module.__repr__()}\"\n else:\n return None\n\n blobified_modules = []\n for module_name, module in self.named_children():\n module_str = _gen_model_repr(module_name, module)\n if module_str is None:\n module_file = folder / f'{module_name}.pt'\n torch.save(module, module_file)\n blobified_modules.append(module_name)\n module_repr = module.__repr__().replace('\\r', ' ').replace('\\n', ' ')\n module_str = f\"torch.load(r'{module_file}') # {module_repr}\"\n model_str += f\"{tab*2}self.{module_name} = {module_str}\\n\"\n\n for buffer_name, buffer in self._buffers.items():\n model_str += f\"{tab*2}self.register_buffer('{buffer_name}', torch.empty({list(buffer.shape)}))\\n\"\n\n for param_name, param in self._parameters.items():\n model_str += f\"{tab*2}self.{param_name} = torch.nn.Parameter(torch.empty({list(buffer.shape)}))\\n\"\n\n model_str += f\"{tab*2}self.load_state_dict(torch.load(r'{folder}/state_dict.pt'))\\n\"\n model_str += f\"{_addindent(self.code, 4)}\\n\"\n\n module_file = folder / 'module.py'\n module_file.write_text(model_str)\n\n init_file = folder / '__init__.py'\n init_file.write_text('from .module import *')\n\n if len(blobified_modules) > 0:\n warnings.warn(\"Was not able to save the following children modules as reprs -\"\n f\"saved as pickled files instead: {blobified_modules}\")\n\n def add_submodule(self, target: str, m: torch.nn.Module) -> bool:\n \"\"\"\n Adds the given submodule to ``self``.\n\n This installs empty Modules where none exist yet if they are\n subpaths of ``target``.\n\n Args:\n target: The fully-qualified string name of the new submodule\n (See example in ``nn.Module.get_submodule`` for how to\n specify a fully-qualified string.)\n m: The submodule itself; the actual object we want to\n install in the current Module\n\n Return:\n bool: Whether or not the submodule could be inserted. For\n this method to return True, each object in the chain\n denoted by ``target`` must either a) not exist yet,\n or b) reference an ``nn.Module`` (not a parameter or\n other attribute)\n\n \"\"\"\n *prefix, field = target.split('.')\n mod: torch.nn.Module = self\n\n for item in prefix:\n\n submod = getattr(mod, item, None)\n\n if submod is None:\n submod = torch.nn.Module()\n setattr(mod, item, submod)\n\n if not isinstance(submod, torch.nn.Module):\n return False\n\n mod = submod\n\n mod.add_module(field, m)\n return True\n\n def delete_submodule(self, target: str) -> bool:\n \"\"\"\n Deletes the given submodule from ``self``.\n\n The module will not be deleted if ``target`` is not a valid\n target.\n\n Args:\n target: The fully-qualified string name of the new submodule\n (See example in ``nn.Module.get_submodule`` for how to\n specify a fully-qualified string.)\n\n Returns:\n bool: Whether or not the target string referenced a\n submodule we want to delete. A return value of ``False``\n means that the ``target`` was not a valid reference to\n a submodule.\n \"\"\"\n atoms = target.split(\".\")\n path, target_submod = atoms[:-1], atoms[-1]\n mod: torch.nn.Module = self\n\n # Get the parent module\n for item in path:\n\n if not hasattr(mod, item):\n return False\n\n mod = getattr(mod, item)\n\n if not isinstance(mod, torch.nn.Module):\n return False\n\n if not hasattr(mod, target_submod):\n return False\n\n if not isinstance(getattr(mod, target_submod), torch.nn.Module):\n return False\n\n delattr(mod, target_submod)\n return True\n\n def delete_all_unused_submodules(self) -> None:\n \"\"\"\n Deletes all unused submodules from ``self``.\n\n A Module is considered \"used\" if any one of the following is\n true:\n 1. It has children that are used\n 2. Its forward is called directly via a ``call_module`` node\n 3. It has a non-Module attribute that is used from a\n ``get_attr`` node\n\n This method can be called to clean up an ``nn.Module`` without\n manually calling ``delete_submodule`` on each unused submodule.\n \"\"\"\n used: List[str] = []\n\n for node in self.graph.nodes:\n\n if node.op == \"call_module\" or node.op == \"get_attr\":\n\n # A list of strings representing the different parts\n # of the path. For exmaple, `foo.bar.baz` gives us\n # [\"foo\", \"bar\", \"baz\"]\n fullpath = node.target.split(\".\")\n\n # If we're looking at multiple parts of a path, join\n # join them with a dot. Otherwise, return that single\n # element without doing anything to it.\n def join_fn(x: str, y: str) -> str:\n return '.'.join([x, y] if y else [x])\n\n # Progressively collect all the names of intermediate\n # modules. For example, if we have the target\n # `foo.bar.baz`, we'll add `foo`, `foo.bar`, and\n # `foo.bar.baz` to the list.\n for path in itertools.accumulate(fullpath, join_fn):\n used.append(path)\n\n to_delete = [name for name, _ in self.named_modules()\n if name not in used]\n\n for name in to_delete:\n self.delete_submodule(name)\n\n @property\n def code(self) -> str:\n \"\"\"\n Return the Python code generated from the ``Graph`` underlying this\n ``GraphModule``.\n \"\"\"\n if not hasattr(self, '_code'):\n raise RuntimeError('Code has not been generated! Please report a bug to PyTorch')\n return self._code\n\n def recompile(self) -> PythonCode:\n \"\"\"\n Recompile this GraphModule from its ``graph`` attribute. This should be\n called after editing the contained ``graph``, otherwise the generated\n code of this ``GraphModule`` will be out of date.\n \"\"\"\n if self._graph._pytree_info is not None:\n self._in_spec = self._graph._pytree_info.in_spec\n self._out_spec = self._graph._pytree_info.out_spec\n python_code = self._graph.python_code(root_module='self')\n self._code = python_code.src\n\n cls = type(self)\n cls.forward = _forward_from_src(self._code, python_code.globals)\n\n cls_call = cls.__call__\n\n # Previously, if an error occurred when valid\n # symbolically-traced code was run with an invalid input, the\n # user would see the source of the error as coming from\n # `File \"<eval_with_key_N\">`, where N is some number. We use\n # this function to generate a more informative error message. We\n # return the traceback itself, a message explaining that the\n # error occurred in a traced Module's generated forward\n # function, and five lines of context surrounding the faulty\n # line\n def generate_error_message(frame_summary: traceback.FrameSummary) -> str:\n # auxiliary variables (for readability)\n err_lineno = frame_summary.lineno\n err_line_len = len(frame_summary.line)\n all_src_lines = _eval_cache[frame_summary.filename]\n\n # constituent substrings of the error message\n tb_repr = traceback.format_exc()\n custom_msg = (\"Call using an FX-traced Module, \"\n f\"line {err_lineno} of the traced Module's \"\n \"generated forward function:\")\n before_err = \"\".join(all_src_lines[err_lineno - 2 : err_lineno])\n marker = \"~\" * err_line_len + \"~~~ <--- HERE\"\n err_and_after_err = \"\\n\".join(all_src_lines[err_lineno : err_lineno + 2])\n\n # joined message\n return \"\\n\".join([tb_repr, custom_msg, before_err, marker, err_and_after_err])\n\n def wrapped_call(self, *args, **kwargs):\n try:\n return cls_call(self, *args, **kwargs)\n except Exception as e:\n assert e.__traceback__\n topmost_framesummary: traceback.FrameSummary = \\\n traceback.StackSummary.extract(traceback.walk_tb(e.__traceback__))[-1] # type: ignore[arg-type]\n if \"eval_with_key\" in topmost_framesummary.filename:\n print(generate_error_message(topmost_framesummary),\n file=sys.stderr)\n raise e.with_traceback(None)\n\n cls.__call__ = wrapped_call\n\n return python_code\n\n def __reduce_package__(self, exporter: PackageExporter):\n generated_module_name = f'fx-generated._{exporter.get_unique_id()}'\n python_code = self.recompile()\n import_block = _format_import_block(python_code.globals, exporter.importer)\n module_code = import_block + self.code\n exporter.save_source_string(generated_module_name, module_code)\n\n dict_without_graph = self.__dict__.copy()\n del dict_without_graph['_graph']\n return (reduce_package_graph_module, (dict_without_graph, generated_module_name))\n\n def __reduce__(self):\n \"\"\"\n Serialization of GraphModule. We serialize only the generated code, not\n the underlying ``Graph``. This is because ``Graph`` does not have on-disk\n backward-compatibility guarantees, whereas Python source code does.\n On the deserialization side, we symbolically trace through the generated\n code to regenerate the underlying ``Graph``\n \"\"\"\n dict_without_graph = self.__dict__.copy()\n python_code = self.recompile()\n import_block = _format_import_block(python_code.globals, sys_importer)\n del dict_without_graph['_graph']\n return (reduce_graph_module, (dict_without_graph, import_block))\n\n # because __reduce__ is defined for serialization,\n # we need to define deepcopy otherwise it will call __reduce__\n # and cause symbolic tracing to occur every time we try to copy the object\n def __deepcopy__(self, memo):\n fake_mod = torch.nn.Module()\n fake_mod.__dict__ = copy.deepcopy(self.__dict__)\n graph_copy = copy.deepcopy(self.graph)\n return GraphModule(fake_mod, graph_copy)\n\n def __copy__(self):\n return GraphModule(self, self.graph)\n\n def __str__(self) -> str:\n orig_str = super().__str__()\n return '\\n'.join([orig_str, self._code])\n\n# workarounds for issues in __torch_function__\n\n# WAR for __torch_function__ not handling tensor lists,\n# fix is in https://github.com/pytorch/pytorch/pull/34725\n# orig_cat = torch.cat\n# def patched_cat(*args, **kwargs):\n# tensors = args[0]\n# for t in tensors:\n# if isinstance(t, Proxy):\n# return t.__torch_function__(patched_cat, (), args, kwargs)\n# return orig_cat(*args, **kwargs)\n# patched_cat.__module__ = 'torch'\n# patched_cat.__name__ = 'cat'\n# torch.cat = patched_cat\n" ]
[ [ "torch.nn.Module", "torch.nn.modules.module._addindent", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
youngerous/BERT-sentiment-classifier
[ "8c6a466e0a23e3b60212be15437b877b7e17bcb6" ]
[ "main.py" ]
[ "import multiprocessing\nfrom argparse import ArgumentParser, Namespace\nfrom typing import Tuple\n\nimport nlp\nimport pytorch_lightning as pl\nimport torch\nimport transformers\nfrom pytorch_lightning.loggers import TensorBoardLogger\n\n\nclass IMDBSentimentClassifier(pl.LightningModule):\n def __init__(self, args: Namespace) -> None:\n super(IMDBSentimentClassifier, self).__init__()\n self.lr = args.lr\n self.momentum = args.momentum\n self.batch_size = args.batch_size\n self.loss = torch.nn.CrossEntropyLoss(reduction=\"none\")\n\n self.pretrain = args.pretrain\n self.model = transformers.BertForSequenceClassification.from_pretrained(\n self.pretrain\n )\n self.seq_length = args.seq_length\n\n self.split = args.split\n self.debug = True if args.debug else False\n self.save_hyperparameters()\n\n def prepare_data(self) -> None:\n tokenizer = transformers.BertTokenizer.from_pretrained(self.pretrain)\n\n def _prepare_ds(split: str) -> torch.utils.data.Dataset:\n dset = nlp.load_dataset(\n \"imdb\", split=f\"{split}[:{self.batch_size if self.debug else '5%' }]\"\n )\n\n class TokenizedDataset(torch.utils.data.Dataset):\n \"\"\" This is a slow data processing method.\n \n huggingface nlp library does not handle pickling data shards\n when using tokenizer.map() function.\n \"\"\"\n\n def __init__(self, seq_length: int) -> None:\n super(TokenizedDataset, self).__init__()\n self.dset = dset\n self.max_length = seq_length\n\n def __len__(self) -> int:\n return len(self.dset)\n\n def _tokenize(self, x: dict) -> dict:\n x[\"label\"] = torch.tensor(x[\"label\"])\n x[\"input_ids\"] = torch.tensor(\n tokenizer.encode(\n x[\"text\"],\n max_length=self.max_length,\n pad_to_max_length=True,\n truncation=True,\n )\n )\n return x\n\n def __getitem__(self, idx: int) -> dict:\n return self._tokenize(self.dset[idx])\n\n return TokenizedDataset(self.seq_length)\n\n self.train_ds, self.test_ds = map(_prepare_ds, (\"train\", \"test\"))\n\n def forward(self, input_ids):\n mask = (input_ids != 0).float()\n (logits,) = self.model(input_ids, mask)\n return logits\n\n def training_step(self, batch, batch_idx):\n logits = self(batch[\"input_ids\"])\n loss = self.loss(logits, batch[\"label\"]).mean()\n return {\"loss\": loss, \"log\": {\"train_loss\": loss}}\n\n def validation_step(self, batch, batch_idx):\n logits = self(batch[\"input_ids\"])\n loss = self.loss(logits, batch[\"label\"])\n\n # accuracy\n correct = 0\n _, predicted = torch.max(logits, 1)\n correct += predicted.eq(batch[\"label\"]).sum().item()\n acc = torch.tensor(100.0 * (correct / batch[\"label\"].size(0)))\n if self.on_gpu:\n acc = acc.cuda(batch[\"input_ids\"].device.index)\n\n return {\"loss\": loss, \"acc\": acc}\n\n def validation_epoch_end(self, outputs):\n loss = torch.cat([output[\"loss\"] for output in outputs], 0).mean()\n acc = torch.stack([output[\"acc\"] for output in outputs], 0).mean()\n out = {\"val_loss\": loss, \"val_acc\": acc}\n return {**out, \"log\": out}\n\n def train_dataloader(self) -> torch.utils.data.DataLoader:\n return torch.utils.data.DataLoader(\n self.train_ds, batch_size=self.batch_size, drop_last=True, shuffle=True\n )\n\n def val_dataloader(self) -> torch.utils.data.DataLoader:\n return torch.utils.data.DataLoader(\n self.test_ds, batch_size=self.batch_size, drop_last=False, shuffle=False\n )\n\n def configure_optimizers(self) -> torch.optim:\n return torch.optim.SGD(self.parameters(), lr=self.lr, momentum=self.momentum)\n\n @staticmethod\n def add_model_specific_args(parent_parser) -> ArgumentParser:\n parser = ArgumentParser(parents=[parent_parser])\n parser.add_argument(\"--epoch\", default=10, type=int)\n parser.add_argument(\n \"-j\",\n \"--workers\",\n default=4,\n type=int,\n metavar=\"N\",\n help=\"number of data loading workers (default: 4)\",\n )\n parser.add_argument(\n \"-b\",\n \"--batch-size\",\n default=8,\n type=int,\n metavar=\"N\",\n help=\"mini-batch size (default: 8), this is the total \"\n \"batch size of all GPUs on the current node when \"\n \"using Data Parallel or Distributed Data Parallel\",\n )\n parser.add_argument(\n \"--lr\",\n \"--learning-rate\",\n default=0.001,\n type=float,\n metavar=\"LR\",\n help=\"initial learning rate\",\n dest=\"lr\",\n )\n parser.add_argument(\n \"--momentum\", default=0.9, type=float, metavar=\"M\", help=\"momentum\"\n )\n\n parser.add_argument(\"--pretrain\", type=str, default=\"bert-base-uncased\")\n parser.add_argument(\"--seq_length\", default=512)\n parser.add_argument(\"--split\", default=\"train\")\n return parser\n\n\ndef main(args: Namespace) -> None:\n if args.seed is not None:\n pl.seed_everything(args.seed)\n\n if args.distributed_backend == \"ddp\":\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / max(1, args.gpus))\n args.workers = int(args.workers / max(1, args.gpus))\n\n model = IMDBSentimentClassifier(args)\n trainer = pl.Trainer.from_argparse_args(args)\n\n if args.evaluate:\n trainer.test(model)\n else:\n trainer.fit(model)\n\n\ndef run_cli() -> None:\n parent_parser = ArgumentParser(\n add_help=False\n ) # MUST add_help=False in order to conflict among child parsers\n parent_parser = pl.Trainer.add_argparse_args(\n parent_parser\n ) # extends existing argparse by default 'Trainer' attributes.\n parent_parser.add_argument(\n \"-d\", \"--debug\", action=\"store_true\", help=\"turn on debugging mode\"\n )\n parent_parser.add_argument(\n \"-e\",\n \"--evaluate\",\n dest=\"evaluate\",\n action=\"store_true\",\n help=\"evaluate model on validation set\",\n )\n parent_parser.add_argument(\n \"--seed\", type=int, default=711, help=\"seed for initializing training.\"\n )\n parser = IMDBSentimentClassifier.add_model_specific_args(parent_parser)\n parser.set_defaults(profiler=True, deterministic=True, max_epochs=90, gpus=1)\n args = parser.parse_args()\n main(args)\n\n\nif __name__ == \"__main__\":\n run_cli()\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.max", "torch.cat", "torch.utils.data.DataLoader", "torch.tensor", "torch.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
potato1996/IMGCaptioning
[ "23eff63e506cb785e2e4483f37f830ecc54edc3b" ]
[ "Decoder.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom torch.autograd import Variable\n\n\nclass Decoder(nn.Module):\n \"\"\" Decoder part(training) -- A RNN Decoder to produce the target captioning \"\"\"\n\n def __init__(self, vocab_size, input_size=512, hidden_size=512, num_layers=1, max_dec_len=16, drop_rate=0.2):\n \"\"\"\n Args:\n vocab_size (int) - Size of the vocabulary => given by xxx.py\n input_size (int) - Default: 512 - Size of the input to the LSTM\n hidden_size (int) - Default: 512 - Size of the output(and also the size of hidden state) of the LSTM\n num_layers (int) - Default: 1 - Number of layers in LSTM\n max_dec_len (int) - Default: 16 - Max decoding length\n drop_rate (float) - Default: 0.2 - drop out rate\n \n Returns:\n None\n \"\"\"\n super(Decoder, self).__init__()\n \"\"\" For LSTM, embedding size = input size, output size = state size = hidden size \"\"\"\n\n self.vocab_size = vocab_size\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.max_dec_len = max_dec_len\n self.drop_rate = drop_rate\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\n \"\"\"1. input embedding layer convert the input word index to a vector - word2vec\"\"\"\n self.input_embedding = nn.Embedding(vocab_size, input_size)\n\n \"\"\"2. LSTM layers, we will need to feed the output from Encoder as the first input to the LSTM, follow by <start> and other words\"\"\"\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)\n\n \"\"\"3. A single FC layer at the output of LSTM, mapping back into word\"\"\"\n self.output_fc = nn.Linear(hidden_size, vocab_size)\n\n \"\"\"4. Drop out layer before the laster FC\"\"\"\n self.dropout = nn.Dropout(self.drop_rate)\n\n self.init_weights()\n\n def init_weights(self):\n initrange = 0.1\n\n self.input_embedding.weight.data.uniform_(-initrange, initrange)\n\n self.output_fc.weight.data.uniform_(-initrange, initrange)\n self.output_fc.bias.data.fill_(0)\n\n def forward(self, img_embedding, input_caption, input_caption_lengths):\n \"\"\"\n Args:\n img_embedding (Tensor) - with size (batch_size, input_size)\n input_caption (Tensor) - with size (batch_size, max_seq_len)\n input_caption_lengths (List) - Indicate the VALID lengths in the second dimension in input_caption. Thus len(input_caption_lengths) should be batch_size\n \n Returns:\n outputs (Tensor) - result of current batch of sequences, with size (batch_size, max_seq_len + 1, hidden_size)\n \"\"\"\n\n # 0. Size checking\n batch_size = img_embedding.size(0)\n assert img_embedding.size(1) == self.input_size, \"ERROR: img embedding size mismatch\"\n assert input_caption.size(0) == batch_size, \"ERROR: input caption batch size mismatch\"\n assert len(input_caption_lengths) == batch_size, \"ERROR: input_caption_lengths size mismatch\"\n\n # 1. Embed input caption(indices) into features\n input_embedding = self.input_embedding(input_caption) # (batch, max_len, input_size)\n\n # 2. put image features as the first input\n embeddings = torch.cat((img_embedding.unsqueeze(1), input_embedding), 1) # (batch, max_len + 1, input_size)\n\n # (3). Wo don't need to sort them here. We have already sorted them in out data loader \n # input_caption_lengths, perm_index = input_caption_lengths.sort(0, decending=True)\n # embeddings = embeddings[perm_index]\n\n # 4. Pack the sequence length into the input of LSTM\n packed = pack_padded_sequence(embeddings, input_caption_lengths, batch_first=True)\n\n # 5. flow through LSTM\n outputs, _ = self.lstm(packed)\n\n # 6. Unpack the sequence\n outputs, _ = pad_packed_sequence(outputs, batch_first=True) # (batch, max_len + 1, hidden_size)\n\n # (7). Corresponding to 3, we don't need to sort them back\n # _, unperm_index = perm_index.sort(0)\n # outputs = outputs[unperm_index] \n\n # 8. map back into vocab..\n outputs = self.output_fc(self.dropout(outputs)) # (batch, max_len + 1, vocab_size)\n\n # Maybe we will need to put softmax here?\n\n return outputs\n\n def sample(self, img_embedding, beam_width = 3):\n \"\"\" \n Inference code for Decoder\n - due to the nature of LSTM, we need to use a complete different buch of code\n \"\"\"\n # During inference we only allows batch_size = 1\n assert img_embedding.size(0) == 1, \"ERROR: only allows batch_size=1 at inference time\"\n \n if beam_width > 1:\n return self.sample_beam(img_embedding, beam_width)\n\n \"\"\" The codes below uses greedy search \"\"\"\n\n hiddens = None\n prediction_ids = []\n inputs = img_embedding.unsqueeze(1) # (1, 1, input_size)\n for i in range(self.max_dec_len + 1):\n \"\"\" produce the prediction of current symbol \"\"\"\n if i == 0:\n outputs, hiddens = self.lstm(inputs)\n else:\n if i == 1:\n # Assuming that 1 is the index of <start>\n inputs = torch.tensor([1], dtype=torch.long).to(self.device)\n inputs = inputs.unsqueeze(1) # (1, 1)\n inputs = self.input_embedding(inputs) # (1, 1, input_size)\n\n outputs, hiddens = self.lstm(inputs, hiddens) # (1, 1, hidden_size)\n\n outputs = self.output_fc(outputs.squeeze(1)) # (1, vocab_size)\n _, predicted = outputs.max(1) # (1)\n prediction_ids.append(predicted.cpu().data.tolist()[0])\n\n \"\"\" feed current symbol as the input of the next symbol \"\"\"\n inputs = self.input_embedding(predicted.view(1, 1)) # (1, 1, input_size)\n #inputs = inputs.unsqueeze(1) # (1, 1, input_size)\n\n return prediction_ids\n \n def sample_beam(self, img_embedding, beam_width):\n hypos = []\n \n # 1. the first input should be image\n inputs = img_embedding.unsqueeze(1) # (1, 1, input_size)\n\n # 2. run the first input through the lstm\n _, hiddens = self.lstm(inputs)\n\n # 3. The first input of sentense is <start>(1)\n inputs = torch.tensor([1], dtype=torch.long).to(self.device)\n inputs = inputs.unsqueeze(1) # (1, 1)\n inputs = self.input_embedding(inputs) # (1, 1, input_size)\n\n # 4. run the lstm through start\n outputs, hiddens = self.lstm(inputs, hiddens) # (1, 1, hidden_size)\n outputs = self.output_fc(outputs.squeeze(1)) #(1, vocab_size)\n\n # Get the starters right after <start>\n track_table = [[] for i in range(beam_width)]\n\n outputs = outputs.view(-1)\n scores = -F.log_softmax(outputs)\n curr_beam_scores, topk_idx = scores.topk(beam_width, largest=False) #[beam_width]\n\n next_word_idx = topk_idx % self.vocab_size\n prev_beam_id = topk_idx / self.vocab_size\n\n for beam_id in range(beam_width):\n track_table[beam_id].append((next_word_idx[beam_id].cpu().data.tolist(), prev_beam_id[beam_id].cpu().data.tolist()))\n hiddens = (hiddens[0].expand((1, beam_width, self.hidden_size)), hiddens[1].expand((1, beam_width, self.hidden_size)))\n \n # start beam search\n for seq_id in range(self.max_dec_len):\n # calculate next input\n inputs = next_word_idx.unsqueeze(1) #(beam, 1)\n inputs = self.input_embedding(inputs) #(beam, 1, input_size)\n\n # calculate next hidden\n next_hidden_0 = torch.zeros(hiddens[0].size()).to(self.device)\n next_hidden_1 = torch.zeros(hiddens[1].size()).to(self.device)\n for beam_id in range(beam_width):\n next_hidden_0[0][beam_id][:] = hiddens[0][0][prev_beam_id[beam_id]][:]\n next_hidden_1[0][beam_id][:] = hiddens[1][0][prev_beam_id[beam_id]][:]\n\n # run through lstm\n outputs, hiddens = self.lstm(inputs, (next_hidden_0, next_hidden_1))\n outputs = self.output_fc(outputs.squeeze(1))\n scores = -F.log_softmax(outputs, dim=1) # (beam, vocab_size)\n \n scores = curr_beam_scores.view(beam_width, 1).expand(beam_width, self.vocab_size) * scores\n\n # We have reached the max_dec_len, now back tracking\n if seq_id == self.max_dec_len-1:\n for beam_id in range(beam_width):\n prediction_ids = []\n next_beam_id = beam_id\n for track_seq_id in range(seq_id, -1, -1):\n track_word, track_beam_id = track_table[next_beam_id][track_seq_id]\n prediction_ids.append(track_word)\n next_beam_id = track_beam_id\n prediction_ids.reverse()\n hypos.append((scores[beam_id][2].cpu().data.tolist(), prediction_ids))\n break\n\n \n curr_beam_scores, topk_idx = scores.view(-1).topk(beam_width, largest=False) # (beam)\n \n next_word_idx = topk_idx % self.vocab_size\n prev_beam_id = topk_idx / self.vocab_size\n \n for beam_id in range(beam_width):\n track_table[beam_id].append((next_word_idx[beam_id].cpu().data.tolist(), prev_beam_id[beam_id].cpu().data.tolist()))\n\n # back tracking, if we meet end\n if next_word_idx[beam_id].cpu().data.tolist() == 2 or next_word_idx[beam_id].cpu().data.tolist() == 19:\n prediction_ids = []\n next_beam_id = beam_id\n for track_seq_id in range(seq_id + 1, -1, -1):\n track_word, track_beam_id = track_table[next_beam_id][track_seq_id]\n prediction_ids.append(track_word)\n next_beam_id = track_beam_id\n prediction_ids.reverse()\n hypos.append((curr_beam_scores[beam_id].cpu().data.tolist(), prediction_ids))\n \n hypos.sort(key=lambda x:x[0])\n return hypos[0][1]\n" ]
[ [ "torch.nn.Dropout", "torch.nn.functional.log_softmax", "torch.nn.LSTM", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.Embedding", "torch.tensor", "torch.nn.Linear", "torch.nn.utils.rnn.pad_packed_sequence", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
03pie/SMPCUP2017
[ "956f97fce8620b3b0c35e6b3757347ede30c64ba", "956f97fce8620b3b0c35e6b3757347ede30c64ba" ]
[ "make_idf.py", "feature_extraction2.py" ]
[ "from sklearn.feature_extraction.text import TfidfVectorizer\nimport pandas as pd\nfrom segmentation import segment, stop_words\n# make_Tfidfvectorizer\ndef make_idf(corpus):\n\tvectorizer = TfidfVectorizer(stop_words=stop_words)\n\tvectorizer.fit_transform(corpus)\n\treturn vectorizer\n\nif __name__ == '__main__':\n with open(\"./data/blog_article_original.txt\", \"r\", encoding='utf-8') as fblog:\n text = fblog.readlines()\n # segmentation\n corpus = [' '.join(line) for line in segment(text)]\n # make idf.txt\n vec = make_idf(corpus)\n pd.DataFrame({'col1':vec.get_feature_names(), 'col2':vec.idf_}).to_csv(\"./model/idf.txt\", encoding='utf-8', sep=' ', index=None, header=None)\n", "import pandas as pd\nfrom datetime import datetime\n\nimport numpy as np\nimport scipy.stats as ss\nfrom sklearn import preprocessing\n\n\n\ndata_root = '/media/jyhkylin/本地磁盘1/study/数据挖掘竞赛/SMPCUP2017/'\npost_data = pd.read_table(data_root+'SMPCUP2017dataset/2_Post.txt' ,sep='\\001' ,names=['userID' ,'blogID' ,'date'])\nbrowse_data = pd.read_table(data_root+'SMPCUP2017dataset/3_Browse.txt' ,sep='\\001' ,names=['userID' ,'blogID' ,'date'])\ncomment_data = pd.read_table(data_root+'SMPCUP2017dataset/4_Comment.txt' ,sep='\\001' ,names=['userID' ,'blogID' ,'date'])\nvoteup_data = pd.read_table(data_root+'SMPCUP2017dataset/5_Vote-up.txt' ,sep='\\001' ,names=['userID' ,'blogID' ,'date'])\nvotedown_data = pd.read_table(data_root+'SMPCUP2017dataset/6_Vote-down.txt' ,sep='\\001' ,names=['userID' ,'blogID' ,'date'])\nfavorite_data = pd.read_table(data_root+'SMPCUP2017dataset/7_Favorite.txt' ,sep='\\001' ,names=['userID' ,'blogID' ,'date'])\nfollow_data = pd.read_table(data_root+'SMPCUP2017dataset/8_Follow.txt' ,sep='\\001' ,names=['userID1' ,'userID2'])\nletter_data = pd.read_table(data_root+'SMPCUP2017dataset/9_Letter.txt' ,sep='\\001' ,names=['userID1' ,'userID2' ,'date'])\n\nnames = locals()\nmainAct = ['post' ,'browse']\nsecondAct = ['comment' ,'voteup' ,'votedown' ,'favorite']\nrelAct = ['follow' ,'letter']\npassiveAct = ['browse' ,'comment' ,'voteup' ,'votedown' ,'favorite' ,'follow']\nuserList = list()\nfor act in mainAct+secondAct:\n userList= userList + names['%s_data'%act]['userID'].values.tolist() \nuserList = list(set(userList))\nactData = pd.DataFrame(index=userList)\nactData = actData.sort_index()\n\n#user-month form\nfor act in mainAct+secondAct:\n try:\n names['%s_data'%act]['date'] = names['%s_data'%act]['date'].map(lambda x: datetime.strptime(x ,'%Y-%m-%d %H:%M:%S.0') )\n except:\n try:\n names['%s_data'%act]['date'] = names['%s_data'%act]['date'].map(lambda x: datetime.strptime(x ,'%Y%m%d %H:%M:%S') )\n except:\n names['%s_data'%act]['date'] = names['%s_data'%act]['date'].map(lambda x: datetime.strptime(x ,'%Y-%m-%d %H:%M:%S') )\n names['%s_data'%act]['month'] = names['%s_data'%act]['date'].map(lambda x: x.month)\n names['%s_data'%act] = pd.DataFrame(names['%s_data'%act] ,columns=['userID' ,'blogID' ,'month'])\n names['%s_data'%act]['category'] = act\n\nfor act in mainAct+secondAct:\n names['%sTimeM'%act] = names['%s_data'%act].groupby(['userID' ,'month']).size().unstack()\n names['%sTimeM'%act] = names['%sTimeM'%act].fillna(0)\n\nletter_data['date'] = letter_data['date'].map(lambda x: datetime.strptime(x ,'%Y-%m-%d %H:%M:%S.0') )\nletter_data['date'] = letter_data['date'].map(lambda x: x.month)\nletter_data = letter_data.rename(columns={'date':'month'})\nletter_data = letter_data.drop_duplicates()\nvoteup_data = voteup_data.drop_duplicates()\nvotedown_data = votedown_data.drop_duplicates()\n\n#month matrix of convolution\nfor act in secondAct:\n names['%sTimePre'%act] = names['%sTimeM'%act]\nfor act in secondAct:\n names['%sTimeM'%act] = names['%sTimeM'%act] / browseTimeM\n names['%sTimeM'%act] = names['%sTimeM'%act].dropna(how='all')\n names['%sTimeM'%act] = names['%sTimeM'%act].fillna(0)\n names['%sTimeM'%act][names['%sTimeM'%act]>1] = 1\nsecondActSumTimeM = commentTimeM + voteupTimeM + votedownTimeM + favoriteTimeM\nsecondActSumTimeM = secondActSumTimeM.fillna(0)\nsecondActSumTimeM = commentTimeM.add(secondActSumTimeM ,fill_value=0) + \\\n voteupTimeM.add(secondActSumTimeM ,fill_value=0) + votedownTimeM.add(secondActSumTimeM ,fill_value=0) \\\n + favoriteTimeM.add(secondActSumTimeM ,fill_value=0)\n \nsecondActSumTimePre = commentTimePre + voteupTimePre + votedownTimePre + favoriteTimePre\nsecondActSumTimePre = secondActSumTimePre.fillna(0)\nsecondActSumTimePre = commentTimePre.add(secondActSumTimePre ,fill_value=0) + \\\n voteupTimePre.add(secondActSumTimePre ,fill_value=0) + votedownTimePre.add(secondActSumTimePre ,fill_value=0) \\\n + favoriteTimePre.add(secondActSumTimePre ,fill_value=0)\nsecondAct.append('secondActSum')\n\n#all behavior\nmainActSumTimeM = postTimeM + browseTimeM\nmainActSumTimeM = mainActSumTimeM.fillna(0)\nmainActSumTimeM = postTimeM.add(mainActSumTimeM ,fill_value=0) + browseTimeM.add(mainActSumTimeM ,fill_value=0)\nmainAct.append('mainActSum')\n\nallActSumTimeM = secondActSumTimePre + mainActSumTimeM\nallActSumTimeM = allActSumTimeM.fillna(0)\nallActSumTimeM = mainActSumTimeM.add(allActSumTimeM ,fill_value=0) + secondActSumTimePre.add(allActSumTimeM ,fill_value=0)\nmainAct.append('allActSum')\n\n\n#firt and second half year statistic \nfor act in mainAct:\n actData['%sFirstYear'%act] = names['%sTimeM'%act][1]\n actData['%sSecondYear'%act] = names['%sTimeM'%act][7]\n for i in range(2,7):\n actData['%sFirstYear'%act] += names['%sTimeM'%act][i]\n actData['%sSecondYear'%act] = names['%sTimeM'%act][i+6]\n\nfor act in secondAct:\n actData['%sFirstYear'%act] = names['%sTimeM'%act][1]\n actData['%sSecondYear'%act] = names['%sTimeM'%act][7]\n for i in range(2,7):\n actData['%sFirstYear'%act] += names['%sTimeM'%act][i]\n actData['%sSecondYear'%act] = names['%sTimeM'%act][i+6]\n\nfor act in secondAct:\n actData['%sPreFirstYear'%act] = names['%sTimePre'%act][1]\n actData['%sPreSecondYear'%act] = names['%sTimePre'%act][7]\n for i in range(2,7):\n actData['%sPreFirstYear'%act] += names['%sTimePre'%act][i]\n actData['%sPreSecondYear'%act] = names['%sTimePre'%act][i+6]\n\nactData = actData.fillna(0)\nstadata = pd.read_csv(data_root+'SMPCUP2017dataset/actStatisticData.csv').sort_index()\nstadata_new = pd.merge(actData ,stadata ,left_index=True ,right_on='userID' ,how='left')\n#stadataScale_new = stadata_new.apply(lambda x: (x - np.median(x)) / (np.std(x)))\n\nstadata_new.to_csv(data_root+'SMPCUP2017dataset/actStatisticData_new1.csv' ,index=False)\n#stadataScale_new.to_csv(data_root+'SMPCUP2017dataset/actStatisticDataScale_new1.csv' ,index=False)\n\n\n\n" ]
[ [ "sklearn.feature_extraction.text.TfidfVectorizer" ], [ "pandas.read_table", "pandas.merge", "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
christophschuhmann/BLIP
[ "498f963762db65e7290eea02573e1749f955b3d0" ]
[ "caps.py" ]
[ "#@title Captioning Images of various Types { vertical-output: true }\n#hide\nimport os\nimport glob\nrep_pen=1.4\nfiles= glob.glob(\"./images/*.jpg\")\ntarget_dir= \"./captions/\"\ntry:\n os.mkdir(target_dir)\nexcept:\n pass\n\n\nfrom PIL import Image\nimport numpy as np\nimport torch\nimport clip\nimport language_tool_python\n\n\n\n\ndef cos_sim_2d(x, y):\n norm_x = x / np.linalg.norm(x, axis=1, keepdims=True)\n norm_y = y / np.linalg.norm(y, axis=1, keepdims=True)\n return np.matmul(norm_x, norm_y.T)\n\n\ndef clip_rank(image_pil,text_list, model,preprocess, clip_model=\"ViT-L/14\"):\n\n\n #device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n #model, preprocess = clip.load(clip_model, device=device)\n #model2, preprocess2 = clip.load(\"RN50x64\", device=device)\n\n \n\n\n similarities= []\n image = preprocess(image_pil).unsqueeze(0).to(device)\n #image2 = preprocess2(image_pil).unsqueeze(0).to(device)\n\n with torch.no_grad():\n image_features = model.encode_image(image).cpu().detach().numpy()\n #image_features2 = model2.encode_image(image2).cpu().detach().numpy()\n\n\n \n #print(cos_sim_2d(text_features, image_features))\n for txt in text_list:\n text = clip.tokenize(txt ).to(device)\n text_features = model.encode_text(text).cpu().detach().numpy()\n\n\n #text_features2 = model2.encode_text(text).cpu().detach().numpy()\n sim_= float(cos_sim_2d(text_features, image_features)[0]) \n\n #sim_= float(cos_sim_2d(text_features, image_features)[0]) + float(cos_sim_2d(text_features2, image_features2)[0])\n similarities.append(sim_)\n return similarities\n\n\n\n\n\nimport sys\nif 'google.colab' in sys.modules:\n print('Running in Colab.')\n #!pip3 install transformers==4.15.0 timm==0.4.12 fairscale==0.4.4\n #!git clone https://github.com/salesforce/BLIP\n #%cd BLIP\nfrom PIL import Image\nimport requests\nimport torch\nfrom torchvision import transforms\nfrom torchvision.transforms.functional import InterpolationMode\nimport time\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nfrom models.blip import blip_decoder\nimport glob\nimage_size = 384\ntransform = transforms.Compose([\n transforms.Resize((image_size,image_size),interpolation=InterpolationMode.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))\n ]) \n\n\n\n\n\ntool = language_tool_python.LanguageTool('en-US')\n\nmodel_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth'\n \nmodel = blip_decoder(pretrained=model_url, image_size=384, vit='large')\nmodel.eval()\nmodel = model.to(device)\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nmodel_vit_L, preprocess_vit_L = clip.load(\"ViT-L/14\", device=device)\nmodel_res64, preprocess_res64 = clip.load(\"RN50x64\", device=device)\n\nwith torch.no_grad():\n for f in files[:40]:\n start= time.time()\n print(f)\n raw_image = Image.open(f).convert('RGB') \n w,h = raw_image.size\n\n #display(raw_image.resize((200,int(200* h/w))))\n raw_image.save(target_dir+f.split(\"/\")[-1])\n image = transform(raw_image).unsqueeze(0).to(device) \n \n\n captions = []\n print(\"time before BLIP\")\n print(time.time()-start)\n start_blip= time.time()\n for topP in [0.1, 0.2, 0.3, 0.4, 0.5,0.6, 0.7]:\n #[0.05,0.1, 0.15, 0.2,0.25, 0.3,0.35, 0.4, 0.45, 0.5,0.55, 0.6,0.65, 0.7,0.75, 0.8,0.85, 0.9, 0.95]\n\n \n\n caption = model.generate(image, sample=True, max_length=30, min_length=10,top_p=topP,repetition_penalty=rep_pen)\n #def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0)\n captions.append(caption)\n\n for beam_n in [1,2,3,4,5,6,7,8]:\n #[0.05,0.1, 0.15, 0.2,0.25, 0.3,0.35, 0.4, 0.45, 0.5,0.55, 0.6,0.65, 0.7,0.75, 0.8,0.85, 0.9, 0.95]\n\n\n\n caption = model.generate(image, sample=False, num_beams=beam_n, max_length=30, min_length=10,repetition_penalty=rep_pen)\n #def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0)\n captions.append(caption)\n\n\n\n\n\n for topP in [0.1, 0.2, 0.3, 0.4, 0.5,0.6, 0.7]:\n #[0.05,0.1, 0.15, 0.2,0.25, 0.3,0.35, 0.4, 0.45, 0.5,0.55, 0.6,0.65, 0.7,0.75, 0.8,0.85, 0.9, 0.95]\n\n\n\n caption = model.generate(image, sample=True, max_length=45, min_length=30,top_p=topP,repetition_penalty=rep_pen)\n #def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0)\n captions.append(caption)\n\n for beam_n in [1,2,3,4,5,6,7,8]:\n #[0.05,0.1, 0.15, 0.2,0.25, 0.3,0.35, 0.4, 0.45, 0.5,0.55, 0.6,0.65, 0.7,0.75, 0.8,0.85, 0.9, 0.95]\n\n\n\n caption = model.generate(image, sample=False, num_beams=beam_n, max_length=45, min_length=30,repetition_penalty=rep_pen)\n #def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0)\n captions.append(caption)\n print(\"After BLIP\")\n print(time.time()-start_blip)\n \"\"\"\n for topP in [0.1, 0.2, 0.3, 0.4, 0.5,0.6, 0.7,0.8]:\n #[0.05,0.1, 0.15, 0.2,0.25, 0.3,0.35, 0.4, 0.45, 0.5,0.55, 0.6,0.65, 0.7,0.75, 0.8,0.85, 0.9, 0.95]\n\n with torch.no_grad():\n\n caption = model.generate(image, sample=True, max_length=60, min_length=45,top_p=topP,repetition_penalty=rep_pen)\n #def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0)\n captions.append(caption)\n\n for beam_n in [1,2,3,4,5,6]:\n \n #[0.05,0.1, 0.15, 0.2,0.25, 0.3,0.35, 0.4, 0.45, 0.5,0.55, 0.6,0.65, 0.7,0.75, 0.8,0.85, 0.9, 0.95]\n\n with torch.no_grad():\n\n caption = model.generate(image, sample=False, num_beams=beam_n, max_length=60, min_length=45,repetition_penalty=rep_pen)\n #def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0)\n captions.append(caption)\n \"\"\"\n best_cannidates=[]\n start_clip=time.time()\n sims= clip_rank(raw_image,captions,model_vit_L,preprocess_vit_L )\n argmax_ = np.argmax(np.asarray(sims))\n #print(\"Caption with highest sim\")\n #print (captions[argmax_][0])\n best_cannidates.append(captions[argmax_][0])\n #print(sims[argmax_])\n del sims[argmax_]\n del captions[argmax_]\n argmax_ = np.argmax(np.asarray(sims))\n #print(\"Caption with 2nd highest sim\")\n #print (captions[argmax_][0])\n best_cannidates.append(captions[argmax_][0])\n #print(sims[argmax_])\n del sims[argmax_]\n del captions[argmax_]\n argmax_ = np.argmax(np.asarray(sims))\n #print(\"Caption with 3nd highest sim\")\n #print (captions[argmax_][0])\n best_cannidates.append(captions[argmax_][0])\n del sims[argmax_]\n del captions[argmax_]\n argmax_ = np.argmax(np.asarray(sims))\n #print(\"Caption with 3nd highest sim\")\n #print (captions[argmax_][0])\n best_cannidates.append(captions[argmax_][0])\n #print(sims[argmax_])\n\n sims= clip_rank(raw_image,best_cannidates,model_res64,preprocess_res64)\n print(\"After CLIP\")\n print(time.time()-start_clip)\n start_textpro=time.time()\n argmax_ = np.argmax(np.asarray(sims))\n print(\"BEST CAPTION AFTER RANKING WITH CLIP ViT L 14 & RESNET50x64:\")\n print (best_cannidates[argmax_])\n text_result= best_cannidates[argmax_]+\"\\n\"\n matches = tool.check(text_result)\n text_result = language_tool_python.utils.correct(text_result, matches)\n\n del sims[argmax_]\n del best_cannidates[argmax_]\n argmax_ = np.argmax(np.asarray(sims))\n\n\n print(\"2ND BEST CAPTION AFTER RANKING WITH CLIP ViT L 14 & RESNET50x64:\")\n print (best_cannidates[argmax_])\n\n\n text_file = open(target_dir+f.split(\"/\")[-1].split(\".\")[0]+\".txt\", \"w\")\n \n matches = tool.check(best_cannidates[argmax_])\n final_text =text_result + language_tool_python.utils.correct(best_cannidates[argmax_], matches)\n #write string to file\n n = text_file.write(final_text)\n print(\"After grammar correction\")\n print(final_text)\n print(time.time()- start_textpro) \n #close file\n text_file.close()\n print(time.time()-start)\n\n" ]
[ [ "numpy.asarray", "numpy.matmul", "numpy.linalg.norm", "torch.no_grad", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
Hyperion-shuo/Agent-Ticket
[ "c9df0eba1250ac5c0b8372c191374c020f586b42" ]
[ "Wang/BrainDDPG.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport gym\nimport time\n\n\nnp.random.seed(1)\ntf.set_random_seed(1)\n\n##################### hyper parameters ####################\n\n# MAX_EPISODES = 200\n# MAX_EP_STEPS = 200\n# LR_A = 0.001 # learning rate for actor\n# LR_C = 0.001 # learning rate for critic\n# GAMMA = 0.9 # reward discount\nREPLACEMENT = [\n dict(name='soft', tau=0.01),\n dict(name='hard', rep_iter_a=600, rep_iter_c=500)\n][0] # you can try different target replacement strategies\nMEMORY_CAPACITY = 10000\n\nBATCH_SIZE = 32\nRENDER = False\nOUTPUT_GRAPH = False\nENV_NAME = 'Pendulum-v0'\n\n############################### DDPG #####################################\nclass DDPG(object):\n def __init__(self, a_dim, s_dim, a_bound, LR_A=0.001, LR_C=0.001, GAMMA=0.9 ,TAU=0.01):\n self.memory = np.zeros((MEMORY_CAPACITY, s_dim * 2 + a_dim + 1), dtype=np.float32)\n self.pointer = 0\n self.sess = tf.Session()\n\n self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,\n self.S = tf.placeholder(tf.float32, [None, s_dim], 's')\n self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')\n self.R = tf.placeholder(tf.float32, [None, 1], 'r')\n self.MEMORY_CAPACITY = MEMORY_CAPACITY\n self.var = 0.25\n\n self.a = self._build_a(self.S, )\n q = self._build_c(self.S, self.a, )\n a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Actor')\n c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Critic')\n ema = tf.train.ExponentialMovingAverage(decay=1 - TAU) # soft replacement\n\n def ema_getter(getter, name, *args, **kwargs):\n return ema.average(getter(name, *args, **kwargs))\n\n target_update = [ema.apply(a_params), ema.apply(c_params)] # soft update operation\n a_ = self._build_a(self.S_, reuse=True, custom_getter=ema_getter) # replaced target parameters\n q_ = self._build_c(self.S_, a_, reuse=True, custom_getter=ema_getter)\n\n a_loss = - tf.reduce_mean(q) # maximize the q\n self.atrain = tf.train.AdamOptimizer(LR_A).minimize(a_loss, var_list=a_params)\n\n with tf.control_dependencies(target_update): # soft replacement happened at here\n q_target = self.R + GAMMA * q_\n td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)\n self.ctrain = tf.train.AdamOptimizer(LR_C).minimize(td_error, var_list=c_params)\n\n self.sess.run(tf.global_variables_initializer())\n\n def choose_action(self, s):\n\n action_prob = self.sess.run(self.a, {self.S: s})[0]\n action_prob = np.clip(np.random.normal(action_prob, self.var), 0, 1)\n p = np.array([1-action_prob,action_prob])\n print(p)\n print(\"Var:\",self.var)\n action = np.random.choice(range(2), p=p.ravel())\n\n return action\n\n def learn(self):\n indices = np.random.choice(MEMORY_CAPACITY, size=BATCH_SIZE)\n bt = self.memory[indices, :]\n bs = bt[:, :self.s_dim]\n ba = bt[:, self.s_dim: self.s_dim + self.a_dim]\n br = bt[:, -self.s_dim - 1: -self.s_dim]\n bs_ = bt[:, -self.s_dim:]\n\n self.var *= .9995\n\n self.sess.run(self.atrain, {self.S: bs})\n self.sess.run(self.ctrain, {self.S: bs, self.a: ba, self.R: br, self.S_: bs_})\n\n def store_transition(self, s, a, r, s_):\n transition = np.hstack((s, a, [r], s_))\n index = self.pointer % MEMORY_CAPACITY # replace the old memory with new memory\n self.memory[index, :] = transition\n self.pointer += 1\n\n def _build_a(self, s, reuse=None, custom_getter=None):\n trainable = True if reuse is None else False\n with tf.variable_scope('Actor', reuse=reuse, custom_getter=custom_getter):\n l1 = tf.layers.dense(s, 30, activation=tf.nn.relu, name='l1', trainable=trainable)\n # l2 = tf.layers.dense(l1, self.a_dim, activation=tf.nn.relu, name='a', trainable=trainable)\n l2 = tf.layers.dense(l1, 15, activation=tf.nn.relu, name='a', trainable=trainable)\n a = tf.layers.dense(\n inputs=l2,\n units=2, # output units\n activation=tf.nn.softmax, # get action probabilities\n kernel_initializer=tf.random_normal_initializer(0., .1), # weights\n bias_initializer=tf.constant_initializer(0.1), # biases\n name='acts_prob'\n )\n # print(tf.multiply(l2, self.a_bound, name='scaled_a'))\n # print(np.array(a[:,1:2]).shape)\n return a[:,1:2]\n\n def _build_c(self, s, a, reuse=None, custom_getter=None):\n trainable = True if reuse is None else False\n with tf.variable_scope('Critic', reuse=reuse, custom_getter=custom_getter):\n n_l1 = 30\n w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], trainable=trainable)\n w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)\n b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)\n net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)\n return tf.layers.dense(net, 1, trainable=trainable) # Q(s,a)" ]
[ [ "tensorflow.get_variable", "tensorflow.control_dependencies", "tensorflow.train.ExponentialMovingAverage", "tensorflow.train.AdamOptimizer", "numpy.hstack", "tensorflow.get_collection", "tensorflow.layers.dense", "tensorflow.Session", "tensorflow.random_normal_initializer", "numpy.zeros", "tensorflow.matmul", "numpy.random.choice", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.set_random_seed", "numpy.array", "tensorflow.losses.mean_squared_error", "numpy.random.seed", "tensorflow.reduce_mean", "tensorflow.constant_initializer", "numpy.random.normal", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
guilhermevarela/ilu
[ "e4db9744c28f9e04ae82c884f131ee8cd9601cc8" ]
[ "tests/ql/test_dpq.py" ]
[ "import os\nimport pickle\nimport unittest\n\nimport numpy as np\nfrom ilurl.core.params import QLParams, Bounds\nfrom ilurl.core.ql.dpq import DPQ\n\n\nclass TestDPQUpdate(unittest.TestCase):\n '''Tests update Q\n Builds a simple process MDP\n\n actions:\n left ((0,)) and right ((1,))\n\n states:\n terminal ((0, 0), (1, 1))\n start ((1, 0))\n\n rewards:\n -1, 0, +1\n (1,) (1,)\n ---> +0 --->+1\n (0, 0) (0, 1) (1, 0) (1, 1)\n -1 <--- +0 <---\n (0,) (0,)\n '''\n def setUp(self):\n ql_params = QLParams(\n alpha=0.5,\n gamma=1.0,\n states=('count',),\n actions=('fast_green', 'slow_green')\n )\n # This shouldn't be used like that but\n # outside testing scenarios\n ql_params.states = Bounds(rank=2, depth=2)\n ql_params.actions = Bounds(rank=1, depth=2)\n self.dpq = DPQ(ql_params)\n # self.rands: list[5000]\n # With 5000 3-digit random numbers (0,1)\n with open('tests/data/rands.pickle', 'rb') as f:\n self.rands = pickle.load(f)\n\n def test_update(self):\n # episodes\n ri = 0\n for i in range(500):\n state = (0, 1)\n # rotate random numbers\n ri = 0 if ri == len(self.rands) else ri\n\n for r in self.rands[ri:]:\n actions, values = zip(*self.dpq.Q[state].items())\n idx = np.argmax(values)\n if r < 0.1:\n # choose randomly == flip bit\n idx = 0 if idx == 1 else 0\n\n action = actions[idx]\n\n # act using list\n if state == (0, 1):\n if action == (0, ):\n reward = -1\n next_state = (0, 0)\n else:\n reward = 0\n next_state = (1, 0)\n elif state == (1, 0):\n if action == (0, ):\n reward = -1\n next_state = (0, 1)\n else:\n reward = 1\n next_state = (1, 1)\n\n self.dpq.update(state, action, reward, next_state)\n state = next_state\n ri += 1\n # terminal states\n if state in ((0, 0), (1, 1)):\n break\n self.assertLess(self.dpq.Q[(0, 1)][(0, )], -0.999)\n self.assertGreater(self.dpq.Q[(0, 1)][(1, )], 0.9999999999)\n self.assertLess(self.dpq.Q[(1, 0)][(0, )], 1e-3)\n self.assertGreater(self.dpq.Q[(1, 0)][(0, )], -1e-3)\n self.assertGreater(self.dpq.Q[(1, 0)][(1, )], 0.999)\n" ]
[ [ "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xymyeah/models
[ "7bc7f4e1a1800efd15de9b90c054c9ab5aba4ad8" ]
[ "fluid/DeepASR/train.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport numpy as np\nimport argparse\nimport time\n\nimport paddle.v2.fluid as fluid\nimport data_utils.augmentor.trans_mean_variance_norm as trans_mean_variance_norm\nimport data_utils.augmentor.trans_add_delta as trans_add_delta\nimport data_utils.augmentor.trans_splice as trans_splice\nimport data_utils.data_reader as reader\nfrom data_utils.util import lodtensor_to_ndarray\nfrom model_utils.model import stacked_lstmp_model\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Training for stacked LSTMP model.\")\n parser.add_argument(\n '--batch_size',\n type=int,\n default=32,\n help='The sequence number of a batch data. (default: %(default)d)')\n parser.add_argument(\n '--minimum_batch_size',\n type=int,\n default=1,\n help='The minimum sequence number of a batch data. '\n '(default: %(default)d)')\n parser.add_argument(\n '--stacked_num',\n type=int,\n default=5,\n help='Number of lstmp layers to stack. (default: %(default)d)')\n parser.add_argument(\n '--proj_dim',\n type=int,\n default=512,\n help='Project size of lstmp unit. (default: %(default)d)')\n parser.add_argument(\n '--hidden_dim',\n type=int,\n default=1024,\n help='Hidden size of lstmp unit. (default: %(default)d)')\n parser.add_argument(\n '--pass_num',\n type=int,\n default=100,\n help='Epoch number to train. (default: %(default)d)')\n parser.add_argument(\n '--print_per_batches',\n type=int,\n default=100,\n help='Interval to print training accuracy. (default: %(default)d)')\n parser.add_argument(\n '--learning_rate',\n type=float,\n default=0.002,\n help='Learning rate used to train. (default: %(default)f)')\n parser.add_argument(\n '--device',\n type=str,\n default='GPU',\n choices=['CPU', 'GPU'],\n help='The device type. (default: %(default)s)')\n parser.add_argument(\n '--parallel', action='store_true', help='If set, run in parallel.')\n parser.add_argument(\n '--mean_var',\n type=str,\n default='data/global_mean_var_search26kHr',\n help=\"The path for feature's global mean and variance. \"\n \"(default: %(default)s)\")\n parser.add_argument(\n '--train_feature_lst',\n type=str,\n default='data/feature.lst',\n help='The feature list path for training. (default: %(default)s)')\n parser.add_argument(\n '--train_label_lst',\n type=str,\n default='data/label.lst',\n help='The label list path for training. (default: %(default)s)')\n parser.add_argument(\n '--val_feature_lst',\n type=str,\n default='data/val_feature.lst',\n help='The feature list path for validation. (default: %(default)s)')\n parser.add_argument(\n '--val_label_lst',\n type=str,\n default='data/val_label.lst',\n help='The label list path for validation. (default: %(default)s)')\n parser.add_argument(\n '--init_model_path',\n type=str,\n default=None,\n help=\"The model (checkpoint) path which the training resumes from. \"\n \"If None, train the model from scratch. (default: %(default)s)\")\n parser.add_argument(\n '--checkpoints',\n type=str,\n default='./checkpoints',\n help=\"The directory for saving checkpoints. Do not save checkpoints \"\n \"if set to ''. (default: %(default)s)\")\n parser.add_argument(\n '--infer_models',\n type=str,\n default='./infer_models',\n help=\"The directory for saving inference models. Do not save inference \"\n \"models if set to ''. (default: %(default)s)\")\n args = parser.parse_args()\n return args\n\n\ndef print_arguments(args):\n print('----------- Configuration Arguments -----------')\n for arg, value in sorted(vars(args).iteritems()):\n print('%s: %s' % (arg, value))\n print('------------------------------------------------')\n\n\ndef train(args):\n \"\"\"train in loop.\n \"\"\"\n\n # paths check\n if args.init_model_path is not None and \\\n not os.path.exists(args.init_model_path):\n raise IOError(\"Invalid initial model path!\")\n if args.checkpoints != '' and not os.path.exists(args.checkpoints):\n os.mkdir(args.checkpoints)\n if args.infer_models != '' and not os.path.exists(args.infer_models):\n os.mkdir(args.infer_models)\n\n prediction, avg_cost, accuracy = stacked_lstmp_model(\n hidden_dim=args.hidden_dim,\n proj_dim=args.proj_dim,\n stacked_num=args.stacked_num,\n class_num=1749,\n parallel=args.parallel)\n\n optimizer = fluid.optimizer.Momentum(\n learning_rate=args.learning_rate, momentum=0.9)\n optimizer.minimize(avg_cost)\n\n # program for test\n test_program = fluid.default_main_program().clone()\n with fluid.program_guard(test_program):\n test_program = fluid.io.get_inference_program([avg_cost, accuracy])\n\n place = fluid.CPUPlace() if args.device == 'CPU' else fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n\n # resume training if initial model provided.\n if args.init_model_path is not None:\n fluid.io.load_persistables(exe, args.init_model_path)\n\n ltrans = [\n trans_add_delta.TransAddDelta(2, 2),\n trans_mean_variance_norm.TransMeanVarianceNorm(args.mean_var),\n trans_splice.TransSplice()\n ]\n\n feature_t = fluid.LoDTensor()\n label_t = fluid.LoDTensor()\n\n # validation\n def test(exe):\n # If test data not found, return invalid cost and accuracy\n if not (os.path.exists(args.val_feature_lst) and\n os.path.exists(args.val_label_lst)):\n return -1.0, -1.0\n # test data reader\n test_data_reader = reader.DataReader(args.val_feature_lst,\n args.val_label_lst)\n test_data_reader.set_transformers(ltrans)\n test_costs, test_accs = [], []\n for batch_id, batch_data in enumerate(\n test_data_reader.batch_iterator(args.batch_size,\n args.minimum_batch_size)):\n # load_data\n (features, labels, lod) = batch_data\n feature_t.set(features, place)\n feature_t.set_lod([lod])\n label_t.set(labels, place)\n label_t.set_lod([lod])\n\n cost, acc = exe.run(test_program,\n feed={\"feature\": feature_t,\n \"label\": label_t},\n fetch_list=[avg_cost, accuracy],\n return_numpy=False)\n test_costs.append(lodtensor_to_ndarray(cost)[0])\n test_accs.append(lodtensor_to_ndarray(acc)[0])\n return np.mean(test_costs), np.mean(test_accs)\n\n # train data reader\n train_data_reader = reader.DataReader(args.train_feature_lst,\n args.train_label_lst, -1)\n train_data_reader.set_transformers(ltrans)\n # train\n for pass_id in xrange(args.pass_num):\n pass_start_time = time.time()\n for batch_id, batch_data in enumerate(\n train_data_reader.batch_iterator(args.batch_size,\n args.minimum_batch_size)):\n # load_data\n (features, labels, lod) = batch_data\n feature_t.set(features, place)\n feature_t.set_lod([lod])\n label_t.set(labels, place)\n label_t.set_lod([lod])\n\n cost, acc = exe.run(fluid.default_main_program(),\n feed={\"feature\": feature_t,\n \"label\": label_t},\n fetch_list=[avg_cost, accuracy],\n return_numpy=False)\n\n if batch_id > 0 and (batch_id % args.print_per_batches == 0):\n print(\"\\nBatch %d, train cost: %f, train acc: %f\" %\n (batch_id, lodtensor_to_ndarray(cost)[0],\n lodtensor_to_ndarray(acc)[0]))\n # save the latest checkpoint\n if args.checkpoints != '':\n model_path = os.path.join(args.checkpoints,\n \"deep_asr.latest.checkpoint\")\n fluid.io.save_persistables(exe, model_path)\n else:\n sys.stdout.write('.')\n sys.stdout.flush()\n # run test\n val_cost, val_acc = test(exe)\n\n # save checkpoint per pass\n if args.checkpoints != '':\n model_path = os.path.join(\n args.checkpoints,\n \"deep_asr.pass_\" + str(pass_id) + \".checkpoint\")\n fluid.io.save_persistables(exe, model_path)\n # save inference model\n if args.infer_models != '':\n model_path = os.path.join(\n args.infer_models,\n \"deep_asr.pass_\" + str(pass_id) + \".infer.model\")\n fluid.io.save_inference_model(model_path, [\"feature\"],\n [prediction], exe)\n # cal pass time\n pass_end_time = time.time()\n time_consumed = pass_end_time - pass_start_time\n # print info at pass end\n print(\"\\nPass %d, time consumed: %f s, val cost: %f, val acc: %f\\n\" %\n (pass_id, time_consumed, val_cost, val_acc))\n\n\nif __name__ == '__main__':\n args = parse_args()\n print_arguments(args)\n\n train(args)\n" ]
[ [ "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sdban/tf-quant-finance
[ "dfe6b80d7c1146ae51ceb3ced92a83d1d4520697" ]
[ "tf_quant_finance/rates/analytics/cashflows.py" ]
[ "# Lint as: python3\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Collection of functions to compute properties of cashflows.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\n\ndef present_value(cashflows,\n discount_factors,\n dtype=None,\n name=None):\n \"\"\"Computes present value of a stream of cashflows given discount factors.\n\n\n ```python\n\n # 2 and 3 year bonds with 1000 face value and 4%, 6% semi-annual coupons.\n # Note that the first four entries in the cashflows are the cashflows of\n # the first bond (group=0) and the next six are the cashflows of the second\n # bond (group=1).\n cashflows = [[20, 20, 20, 1020, 0, 0],\n [30, 30, 30, 30, 30, 1030]]\n\n # Corresponding discount factors for the cashflows\n discount_factors = [[0.96, 0.93, 0.9, 0.87, 1.0, 1.0],\n [0.97, 0.95, 0.93, 0.9, 0.88, 0.86]]\n\n present_values = present_value(\n cashflows, discount_factors, dtype=np.float64)\n # Expected: [943.2, 1024.7]\n ```\n\n Args:\n cashflows: A real `Tensor` of shape `batch_shape + [n]`. The set of\n cashflows of underlyings. `n` is the number of cashflows per bond\n and `batch_shape` is the number of bonds. Bonds with different number\n of cashflows should be padded to a common number `n`.\n discount_factors: A `Tensor` of the same `dtype` as `cashflows` and of\n compatible shape. The set of discount factors corresponding to the\n cashflows.\n dtype: `tf.Dtype`. If supplied the dtype for the input and output `Tensor`s.\n Default value: None which maps to the default dtype inferred from\n `cashflows`.\n name: Python str. The name to give to the ops created by this function.\n Default value: None which maps to 'present_value'.\n\n Returns:\n Real `Tensor` of shape `batch_shape`. The present values of the cashflows.\n \"\"\"\n name = name or 'present_value'\n with tf.name_scope(name):\n cashflows = tf.convert_to_tensor(cashflows, dtype=dtype, name='cashflows')\n dtype = dtype or cashflows.dtype\n discount_factors = tf.convert_to_tensor(\n discount_factors, dtype=dtype, name='discount_factors')\n discounted = cashflows * discount_factors\n return tf.math.reduce_sum(discounted, axis=-1)\n\n\ndef pv_from_yields(cashflows,\n times,\n yields,\n groups=None,\n dtype=None,\n name=None):\n \"\"\"Computes present value of cashflows given yields.\n\n For a more complete description of the terminology as well as the mathematics\n of pricing bonds, see Ref [1]. In particular, note that `yields` here refers\n to the yield of the bond as defined in Section 4.4 of Ref [1]. This is\n sometimes also referred to as the internal rate of return of a bond.\n\n #### Example\n\n The following example demonstrates the present value computation for two\n bonds. Both bonds have 1000 face value with semi-annual coupons. The first\n bond has 4% coupon rate and 2 year expiry. The second has 6% coupon rate and\n 3 year expiry. The yields to maturity (ytm) are 7% and 5% respectively.\n\n ```python\n dtype = np.float64\n\n # The first element is the ytm of the first bond and the second is the\n # yield of the second bond.\n yields_to_maturity = np.array([0.07, 0.05], dtype=dtype)\n\n # 2 and 3 year bonds with 1000 face value and 4%, 6% semi-annual coupons.\n # Note that the first four entries in the cashflows are the cashflows of\n # the first bond (group=0) and the next six are the cashflows of the second\n # bond (group=1).\n cashflows = np.array([20, 20, 20, 1020, 30, 30, 30, 30, 30, 1030],\n dtype=dtype)\n\n # The times of the cashflows.\n times = np.array([0.5, 1, 1.5, 2, 0.5, 1, 1.50, 2, 2.5, 3], dtype=dtype)\n\n # Group entries take values between 0 and 1 (inclusive) as there are two\n # bonds. One needs to assign each of the cashflow entries to one group or\n # the other.\n groups = np.array([0] * 4 + [1] * 6)\n\n # Produces [942.712, 1025.778] as the values of the two bonds.\n present_values = pv_from_yields(\n cashflows, times, yields_to_maturity, groups=groups, dtype=dtype)\n ```\n\n #### References:\n\n [1]: John C. Hull. Options, Futures and Other Derivatives. Ninth Edition.\n June 2006.\n\n Args:\n cashflows: Real rank 1 `Tensor` of size `n`. The set of cashflows underlying\n the bonds.\n times: Real positive rank 1 `Tensor` of size `n`. The set of times at which\n the corresponding cashflows occur quoted in years.\n yields: Real rank 1 `Tensor` of size `1` if `groups` is None or of size `k`\n if the maximum value in the `groups` is of `k-1`. The continuously\n compounded yields to maturity/internal rate of returns corresponding to\n each of the cashflow groups. The `i`th component is the yield to apply to\n all the cashflows with group label `i` if `groups` is not None. If\n `groups` is None, then this is a `Tensor` of size `[1]` and the only\n component is the yield that applies to all the cashflows.\n groups: Optional int `Tensor` of size `n` containing values between 0 and\n `k-1` where `k` is the number of related cashflows.\n Default value: None. This implies that all the cashflows are treated as a\n single group.\n dtype: `tf.Dtype`. If supplied the dtype for the input and output `Tensor`s.\n Default value: None which maps to the default dtype inferred from\n `cashflows`.\n name: Python str. The name to give to the ops created by this function.\n Default value: None which maps to 'pv_from_yields'.\n\n Returns:\n Real rank 1 `Tensor` of size `k` if groups is not `None` else of size `[1]`.\n The present value of the cashflows. The `i`th component is the present\n value of the cashflows in group `i` or to the entirety of the cashflows\n if `groups` is None.\n \"\"\"\n with tf.compat.v1.name_scope(\n name,\n default_name='pv_from_yields',\n values=[cashflows, times, yields, groups]):\n cashflows = tf.convert_to_tensor(cashflows, dtype=dtype, name='cashflows')\n times = tf.convert_to_tensor(times, dtype=dtype, name='times')\n yields = tf.convert_to_tensor(yields, dtype=dtype, name='yields')\n cashflow_yields = yields\n if groups is not None:\n groups = tf.convert_to_tensor(groups, name='groups')\n cashflow_yields = tf.gather(yields, groups)\n discounted = cashflows * tf.math.exp(-times * cashflow_yields)\n if groups is not None:\n return tf.math.segment_sum(discounted, groups)\n return tf.math.reduce_sum(discounted, keepdims=True)\n\n\ndef yields_from_pv(cashflows,\n times,\n present_values,\n groups=None,\n tolerance=1e-8,\n max_iterations=10,\n dtype=None,\n name=None):\n \"\"\"Computes yields to maturity from present values of cashflows.\n\n For a complete description of the terminology as well as the mathematics\n of computing bond yields, see Ref [1]. Note that `yields` here refers\n to the yield of the bond as defined in Section 4.4 of Ref [1]. This is\n sometimes also referred to as the internal rate of return of a bond.\n\n #### Example\n\n The following example demonstrates the yield computation for two\n bonds. Both bonds have 1000 face value with semi-annual coupons. The first\n bond has 4% coupon rate and 2 year expiry. The second has 6% coupon rate and\n 3 year expiry. The true yields to maturity (ytm) are 7% and 5% respectively.\n\n ```python\n dtype = np.float64\n\n # The first element is the present value (PV) of the first bond and the\n # second is the PV of the second bond.\n present_values = np.array([942.71187528177757, 1025.7777300221542],\n dtype=dtype)\n\n # 2 and 3 year bonds with 1000 face value and 4%, 6% semi-annual coupons.\n # Note that the first four entries in the cashflows are the cashflows of\n # the first bond (group=0) and the next six are the cashflows of the second\n # bond (group=1).\n cashflows = np.array([20, 20, 20, 1020, 30, 30, 30, 30, 30, 1030],\n dtype=dtype)\n\n # The times of the cashflows.\n times = np.array([0.5, 1, 1.5, 2, 0.5, 1, 1.50, 2, 2.5, 3], dtype=dtype)\n\n # Group entries take values between 0 and 1 (inclusive) as there are two\n # bonds. One needs to assign each of the cashflow entries to one group or\n # the other.\n groups = np.array([0] * 4 + [1] * 6)\n\n # Expected yields = [0.07, 0.05]\n yields = yields_from_pv(\n cashflows, times, present_values, groups=groups, dtype=dtype)\n ```\n\n #### References:\n\n [1]: John C. Hull. Options, Futures and Other Derivatives. Ninth Edition.\n June 2006.\n\n Args:\n cashflows: Real rank 1 `Tensor` of size `n`. The set of cashflows underlying\n the bonds.\n times: Real positive rank 1 `Tensor` of size `n`. The set of times at which\n the corresponding cashflows occur quoted in years.\n present_values: Real rank 1 `Tensor` of size `k` where `k-1` is the maximum\n value in the `groups` arg if supplied. If `groups` is not supplied, then\n this is a `Tensor` of size `1`. The present values corresponding to each\n of the cashflow groups. The `i`th component is the present value of all\n the cashflows with group label `i` (or the present value of all the\n cashflows if `groups=None`).\n groups: Optional int `Tensor` of size `n` containing values between 0 and\n `k-1` where `k` is the number of related cashflows.\n Default value: None. This implies that all the cashflows are treated as a\n single group.\n tolerance: Positive real scalar `Tensor`. The tolerance for the estimated\n yields. The yields are computed using a Newton root finder. The iterations\n stop when the inferred yields change by less than this tolerance or the\n maximum iterations are exhausted (whichever is earlier).\n Default value: 1e-8.\n max_iterations: Positive scalar int `Tensor`. The maximum number of\n iterations to use to compute the yields. The iterations stop when the max\n iterations is exhausted or the tolerance is reached (whichever is\n earlier). Supply `None` to remove the limit on the number of iterations.\n Default value: 10.\n dtype: `tf.Dtype`. If supplied the dtype for the input and output `Tensor`s.\n Default value: None which maps to the default dtype inferred from\n `cashflows`.\n name: Python str. The name to give to the ops created by this function.\n Default value: None which maps to 'yields_from_pv'.\n\n Returns:\n Real rank 1 `Tensor` of size `k`. The yield to maturity of the cashflows.\n The `i`th component is the yield to maturity of the cashflows in group\n `i`.\n \"\"\"\n with tf.compat.v1.name_scope(\n name,\n default_name='yields_from_pv',\n values=[\n cashflows, times, present_values, groups, tolerance, max_iterations\n ]):\n cashflows = tf.convert_to_tensor(cashflows, dtype=dtype, name='cashflows')\n times = tf.convert_to_tensor(times, dtype=dtype, name='times')\n present_values = tf.convert_to_tensor(\n present_values, dtype=dtype, name='present_values')\n if groups is None:\n groups = tf.zeros_like(cashflows, dtype=tf.int32, name='groups')\n else:\n groups = tf.convert_to_tensor(groups, name='groups')\n\n def pv_and_duration(yields):\n cashflow_yields = tf.gather(yields, groups)\n discounted = cashflows * tf.math.exp(-times * cashflow_yields)\n durations = tf.math.segment_sum(discounted * times, groups)\n pvs = tf.math.segment_sum(discounted, groups)\n return pvs, durations\n\n yields0 = tf.zeros_like(present_values)\n\n def _cond(should_stop, yields):\n del yields\n return tf.math.logical_not(should_stop)\n\n def _body(should_stop, yields):\n del should_stop\n pvs, durations = pv_and_duration(yields)\n delta_yields = (pvs - present_values) / durations\n next_should_stop = (tf.math.reduce_max(tf.abs(delta_yields)) <= tolerance)\n return (next_should_stop, yields + delta_yields)\n\n loop_vars = (tf.convert_to_tensor(False), yields0)\n _, estimated_yields = tf.while_loop(\n _cond,\n _body,\n loop_vars,\n shape_invariants=(tf.TensorShape([]), tf.TensorShape([None])),\n maximum_iterations=max_iterations,\n parallel_iterations=1)\n return estimated_yields\n\n__all__ = ['present_value', 'pv_from_yields', 'yields_from_pv']\n" ]
[ [ "tensorflow.compat.v2.zeros_like", "tensorflow.compat.v2.math.exp", "tensorflow.compat.v2.TensorShape", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.math.reduce_sum", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.compat.v1.name_scope", "tensorflow.compat.v2.gather", "tensorflow.compat.v2.math.logical_not", "tensorflow.compat.v2.math.segment_sum", "tensorflow.compat.v2.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
davidharvey1986/rrg
[ "26b4658f14279af21af1a61d57e9936daf315a71", "26b4658f14279af21af1a61d57e9936daf315a71" ]
[ "lib/RRGtools/cluster_member_removal.py", "lib/RRGtools/astro_tools.py" ]
[ "'''\nRemove cluster members from catlogue\n\nThis algorithm will take 2 filters from HST and match the catalogues\nIt will then look and the color-magnitude plot and find the red sequence.\nFrom this it wil remove galaxies that appear to be in the cluster from the catalogue\n\n\nIt requires that the pyRRG code has been run on two different bands of the same cluster\n'''\n\nfrom astropy.io import fits\nimport RRGtools as tools\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom numpy.lib.recfunctions import append_fields as append_rec\nimport os as os\ndef clusterMemberRemove( red_band, blue_band, outname='cluster_mem_rem.fits'):\n '''\n INPUTS:\n - red_band is the name of the observation in the redder band\n - blue_band is the name of the observation in the bluer band\n\n OUTPUTS\n - a joint catalogue with the two magnitudes, but with the cluster members \n removed.\n\n NOTE: It changes the name of the magnitudes within the structure, so the \n name of the file is assumed to be ${OBJECTNAME}_${FILTER}_drz_sci.fits\n '''\n if not ( os.path.isfile( red_band ) & \\\n os.path.isfile( blue_band ) ):\n raise ValueError(\"File not found\")\n \n \n joint_catalogue = tools.run_match( red_band, blue_band)\n\n #This assumes the name of the file is in the order ${OBJECTNAME}_${FILTER}_drz_sci.fits\n red_band_filter = red_band.split('_')[1]\n blue_band_filter = blue_band.split('_')[1]\n joint_catalogue[1].data = append_rec( joint_catalogue[1].data, \\\n red_band_filter, \\\n joint_catalogue[1].data['MAG_AUTO_1'], \\\n usemask=False, asrecarray=True)\n joint_catalogue[1].data = append_rec( joint_catalogue[1].data,\\\n blue_band_filter, \\\n joint_catalogue[1].data['MAG_AUTO_2'], \\\n usemask=False, asrecarray=True)\n\n color = joint_catalogue[1].data[red_band_filter] - \\\n joint_catalogue[1].data[blue_band_filter]\n plt.plot( joint_catalogue[1].data[red_band_filter], color, 'b*')\n plt.xlim(10,30)\n plt.ylim(-10,10)\n plt.show(block=False)\n\n\n while True:\n UpperThreshold = np.float(input('Please input the upper threshold of the red-sequence: '))\n LowerThreshold = np.float(input('Please input the lower threshold of the red-sequence: '))\n MagThreshold = np.float(input('Please input the magnitude threshold of the red-sequence: '))\n\n clusterMembers = (color < UpperThreshold) & \\\n (color > LowerThreshold) & \\\n (joint_catalogue[1].data[red_band_filter] < MagThreshold)\n plt.plot( joint_catalogue[1].data[red_band_filter], color, 'b*')\n\n plt.plot( joint_catalogue[1].data[red_band_filter][clusterMembers], \\\n color[clusterMembers], 'r*')\n plt.ylim(np.min([-2,LowerThreshold*2.]), np.max([2,UpperThreshold*2.]))\n plt.xlim(10, np.max([MagThreshold*1.5,28]))\n plt.draw()\n done = input('Are you happy with this? (Yes or No): ')\n if done == 'Yes':\n break\n plt.close()\n\n final_data = joint_catalogue[1].data[ clusterMembers == False ]\n final_cat_names = np.array(joint_catalogue[1].data.columns.names)\n joint_columns = []\n \n for i in range(len(final_cat_names)):\n if '_1' in final_cat_names[i]:\n final_cat_name = '_'.join(final_cat_names[i].split('_')[:-1])\n else:\n final_cat_name = final_cat_names[i]\n joint_columns.append( \\\n fits.Column(name=final_cat_name, \\\n array=final_data[final_cat_names[i]], \\\n format=final_data[final_cat_names[i]].dtype))\n \n final_cat = fits.BinTableHDU.from_columns(joint_columns)\n \n final_cat.writeto( outname, clobber=True)\n return fits.open(outname)\n \n \n \n \n", "import numpy as np\n\ndef ra_separation( ra1, dec1, ra2, dec2, abs=False):\n '''\n ;PURPOSE : TO DETERMINE THE SEPARATION OF TWO POSITIONS\n ; ASSUMES SMALL ANGLES\n\n\n ;INPUTS : \n ; RA1 : VECTOR OR SCALAR OFTHE RIGHT ASCENSION OF \n ; THE FIRST POSITION IN DEGREES\n ; RA2 : VECTOR OR SCALAR OFTHE RIGHT ASCENSION OF \n ; THE SECOND POSITION IN DEGREES\n ; DEC : THE DECLINATION OF THE TWO POSITIONS\n\n\n ;KEYWORDS :\n ; DEC1 : DECLINATION OF THE SECOND HALO, THE DEFAULT\n ; IS TO HAVE THE HALOS AT THE SAME DECLINATION\n ; ABS : RETURN THE ABSOLUTE VALUE \n\n \n ;RETURNS : \n ; SEPARATION : THE ANGULAR SEPARATION OF THE TWO HALOS,\n ; POSITIVE IS POSITIVE IN THE SKY (negative east)\n '''\n #convert to radians first\n \n ra1_rad = ra1*np.pi/180.\n ra2_rad = ra2*np.pi/180.\n\n dec1_rad = dec1*np.pi/180\n dec2_rad = dec2*np.pi/180\n\n #using the small angle approximation\n\n separation = np.sqrt( ((ra1_rad-ra2_rad)*np.cos(dec1_rad))**2+\n (dec1_rad-dec2_rad)**2)*206265.\n\n \n if abs == False:\n try:\n separation[ ra1 > ra2 ] *= -1\n except:\n if ra1 > ra2:\n separation *= -1\n\n\n \n \n return separation\n\n \ndef bin_etang( radial, etang, nbins=20, \\\n cut=None, xlim=None, log_bin=False,\n weight=None, **kwargs):\n '''\n PURPOSE : Program to bin up the tangential ellipiticity\n\n INPUTS :\n - DIST_RAD : The distance the gal is away from the x-axis\n - DIST_LONG : The distance the gal is away from the y-axis\n - E1 : The componet of the shape parallel and perp to the x-axis\n - E2 : The component of the shape 45 deg the x-axis\n - ANGLE : The angle the galaxy is wrt to x-axis\n \n KEYWORDS :\n - NBINS : Number of bins\n - CUT : The distance to cut at \n\n '''\n #Get the radial range of the data\n if xlim is None:\n xlim = [np.min(radial), np.max(radial)]\n\n #Determine the tangential shear\n if weight is None:\n weight = np.ones( len(etang), float)\n \n if log_bin:\n logspace = np.linspace(np.log10(xlim[0]), np.log10(xlim[1]), nbins+1)\n bins = 10**(logspace)\n else:\n bins = np.linspace( xlim[0], xlim[1], nbins+1)\n\n e_binned = np.zeros((2, nbins), float)\n \n for i in range(nbins):\n\n ind = (radial > bins[i]) & (radial < bins[i+1]) \n e_binned[0, i] = np.sum( etang[ ind ]*weight[ ind ] )/np.sum( weight[ ind ])\n \n e_binned[1, i] = np.std( etang[ind] )/np.sqrt(len(etang[ind]))\n\n \n return (bins[:-1]+bins[1:])/2., e_binned\n" ]
[ [ "numpy.min", "matplotlib.pyplot.ylim", "matplotlib.pyplot.draw", "matplotlib.pyplot.plot", "numpy.max", "matplotlib.pyplot.xlim", "matplotlib.pyplot.close", "numpy.array", "numpy.lib.recfunctions.append_fields", "matplotlib.pyplot.show" ], [ "numpy.linspace", "numpy.min", "numpy.cos", "numpy.max", "numpy.std", "numpy.log10", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [ "1.11", "1.19", "1.24", "1.16", "1.23", "1.20", "1.7", "1.12", "1.21", "1.22", "1.14", "1.6", "1.13", "1.9", "1.17", "1.10", "1.18", "1.15", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lintondf/COVIDtoTimeSeries
[ "b676a87ecf414cae3bea742e946d0e6458d641cf" ]
[ "covid/Analysis2.py" ]
[ "'''\nCreated on Apr 9, 2020\n\n@author: D. F. Linton, Blue Lightning Development, LLC\n'''\n\nimport os\nfrom pathlib import Path\nimport warnings\nimport io\nimport requests\nimport urllib.parse\nimport numpy as np\nimport scipy.stats as stats\nfrom scipy.stats import norm\nfrom seasonal import fit_seasons, adjust_seasons, fit_trend\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom pandas.plotting import register_matplotlib_converters\nimport statsmodels.api as sm\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\n\nfrom datetime import datetime\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\n\nfrom scipy.optimize.minpack import curve_fit\nfrom scipy.integrate import trapz\nfrom sortedcontainers import SortedSet\nfrom Population import loadPopulation, loadStatePopulations\nfrom Deaths import updateDeaths\nfrom IHME import IHME\n# from astropy.wcs.docstrings import row\n\n# from adjustText import adjust_text \n\nregister_matplotlib_converters()\n\nY_UPPER = 1.10 # upper y limit for DDGR charts\nnD = 3 # 3DRR\nhome = os.path.expanduser('~')\npathToRepository = home + '/GITHUB/COVID-19'\nihme = IHME(home)\n\ncountries = ['Afghanistan', 'Albania', 'Algeria', 'Andorra', 'Angola', 'Antigua and Barbuda', 'Argentina', 'Armenia', 'Aruba', 'Australia', 'Austria', 'Azerbaijan', 'Bahamas', 'Bahamas, The', 'Bahrain', 'Bangladesh', 'Barbados', 'Belarus', 'Belgium', 'Belize', 'Benin', 'Bhutan', 'Bolivia', 'Bosnia and Herzegovina', 'Botswana', 'Brazil', 'Brunei', 'Bulgaria', 'Burkina Faso', 'Burma', 'Burundi', 'Cabo Verde', 'Cambodia', 'Cameroon', 'Canada', 'Cape Verde', 'Cayman Islands', 'Central African Republic', 'Chad', 'Channel Islands', 'Chile', 'China', 'Colombia', 'Congo (Brazzaville)', 'Congo (Kinshasa)', 'Costa Rica', \"Cote d'Ivoire\", 'Croatia', 'Cruise Ship', 'Cuba', 'Curacao', 'Cyprus', 'Czech Republic', 'Czechia', 'Denmark', 'Diamond Princess', 'Djibouti', 'Dominica', 'Dominican Republic', 'East Timor', 'Ecuador', 'Egypt', 'El Salvador', 'Equatorial Guinea', 'Eritrea', 'Estonia', 'Eswatini', 'Ethiopia', 'Faroe Islands', 'Fiji', 'Finland', 'France', 'French Guiana', 'Gabon', 'Gambia', 'Gambia, The', 'Georgia', 'Germany', 'Ghana', 'Gibraltar', 'Greece', 'Greenland', 'Grenada', 'Guadeloupe', 'Guam', 'Guatemala', 'Guernsey', 'Guinea', 'Guinea-Bissau', 'Guyana', 'Haiti', 'Holy See', 'Honduras', 'Hong Kong', 'Hong Kong SAR', 'Hungary', 'Iceland', 'India', 'Indonesia', 'Iran', 'Iran (Islamic Republic of)', 'Iraq', 'Ireland', 'Israel', 'Italy', 'Ivory Coast', 'Jamaica', 'Japan', 'Jersey', 'Jordan', 'Kazakhstan', 'Kenya', 'Korea, South', 'Kosovo', 'Kuwait', 'Kyrgyzstan', 'Laos', 'Latvia', 'Lebanon', 'Liberia', 'Libya', 'Liechtenstein', 'Lithuania', 'Luxembourg', 'MS Zaandam', 'Macao SAR', 'Macau', 'Madagascar', 'Mainland China', 'Malaysia', 'Maldives', 'Mali', 'Malta', 'Martinique', 'Mauritania', 'Mauritius', 'Mayotte', 'Mexico', 'Moldova', 'Monaco', 'Mongolia', 'Montenegro', 'Morocco', 'Mozambique', 'Namibia', 'Nepal', 'Netherlands', 'New Zealand', 'Nicaragua', 'Niger', 'Nigeria', 'North Ireland', 'North Macedonia', 'Norway', 'Oman', 'Others', 'Pakistan', 'Palestine', 'Panama', 'Papua New Guinea', 'Paraguay', 'Peru', 'Philippines', 'Poland', 'Portugal', 'Puerto Rico', 'Qatar', 'Republic of Ireland', 'Republic of Korea', 'Republic of Moldova', 'Republic of the Congo', 'Reunion', 'Romania', 'Russia', 'Russian Federation', 'Rwanda', 'Saint Barthelemy', 'Saint Kitts and Nevis', 'Saint Lucia', 'Saint Martin', 'Saint Vincent and the Grenadines', 'San Marino', 'Saudi Arabia', 'Senegal', 'Serbia', 'Seychelles', 'Sierra Leone', 'Singapore', 'Slovakia', 'Slovenia', 'Somalia', 'South Africa', 'South Korea', 'Spain', 'Sri Lanka', 'St. Martin', 'Sudan', 'Suriname', 'Sweden', 'Switzerland', 'Syria', 'Taipei and environs', 'Taiwan', 'Taiwan*', 'Tanzania', 'Thailand', 'The Bahamas', 'The Gambia', 'Timor-Leste', 'Togo', 'Trinidad and Tobago', 'Tunisia', 'Turkey', 'UK', 'Uganda', 'Ukraine', 'United Arab Emirates', 'United Kingdom', 'Uruguay', 'US', 'Uzbekistan', 'Vatican City', 'Venezuela', 'Viet Nam', 'Vietnam', 'West Bank and Gaza', 'Zambia', 'Zimbabwe', 'occupied Palestinian territory']\nstates2 = {\n 'AK': 'Alaska',\n 'AL': 'Alabama',\n 'AR': 'Arkansas',\n 'AS': 'American Samoa',\n 'AZ': 'Arizona',\n 'CA': 'California',\n 'CO': 'Colorado',\n 'CT': 'Connecticut',\n 'DC': 'District of Columbia',\n 'DE': 'Delaware',\n 'FL': 'Florida',\n 'GA': 'Georgia',\n 'GU': 'Guam',\n 'HI': 'Hawaii',\n 'IA': 'Iowa',\n 'ID': 'Idaho',\n 'IL': 'Illinois',\n 'IN': 'Indiana',\n 'KS': 'Kansas',\n 'KY': 'Kentucky',\n 'LA': 'Louisiana',\n 'MA': 'Massachusetts',\n 'MD': 'Maryland',\n 'ME': 'Maine',\n 'MI': 'Michigan',\n 'MN': 'Minnesota',\n 'MO': 'Missouri',\n 'MP': 'Northern Mariana Islands',\n 'MS': 'Mississippi',\n 'MT': 'Montana',\n 'NA': 'National',\n 'NC': 'North Carolina',\n 'ND': 'North Dakota',\n 'NE': 'Nebraska',\n 'NH': 'New Hampshire',\n 'NJ': 'New Jersey',\n 'NM': 'New Mexico',\n 'NV': 'Nevada',\n 'NY': 'New York',\n 'OH': 'Ohio',\n 'OK': 'Oklahoma',\n 'OR': 'Oregon',\n 'PA': 'Pennsylvania',\n 'PR': 'Puerto Rico',\n 'RI': 'Rhode Island',\n 'SC': 'South Carolina',\n 'SD': 'South Dakota',\n 'TN': 'Tennessee',\n 'TX': 'Texas',\n 'UT': 'Utah',\n 'VA': 'Virginia',\n 'VI': 'Virgin Islands',\n 'VT': 'Vermont',\n 'WA': 'Washington',\n 'WI': 'Wisconsin',\n 'WV': 'West Virginia',\n 'WY': 'Wyoming'\n}\n\n\nclass Analysis():\n def __init__(self):\n self.top10StatesTable = ''\n self.top10CountriesTable = ''\n self.statesLinks = np.array([],dtype=str)\n self.countriesLinks = np.array([],dtype=str)\n \n\n def sigmoid(self, x, L ,x0, k, b):\n y = L / (1 + np.exp(-k*(x-x0)))+b\n return (y)\n \n def analyze(self, cases, population, which=None, verbose=False, links = None): \n link = (\"[%s](https://github.com/lintondf/COVIDtoTimeSeries/raw/master/analysis/%s/%s.png)\" %\n (cases.columns[0], which, urllib.parse.quote(cases.columns[0]) ) )\n if not verbose :\n print(cases.columns[0])\n cases['Ln'] = np.log(cases[cases.columns[0]])\n lnCases = cases[['Ln']].dropna()\n if (len(lnCases) < 10) :\n return None, None, None, None, links\n reg = linear_model.LinearRegression()\n T = np.asarray(((lnCases.index-lnCases.index[0]).days))\n T = T.reshape(-1, 1)\n Y = np.asarray(lnCases['Ln']).reshape(-1, 1)\n \n lowess = sm.nonparametric.lowess\n trend = lowess(Y[:,0], T[:,0], frac=0.5, it=10)[:,1]\n\n# try:\n# ydata = np.exp(trend)\n# xdata = T[:,0] - T[0,0]\n# idx = ydata > population\n# ydata = ydata[idx]\n# xdata = xdata[idx]\n# p0 = [max(ydata), np.median(xdata),1,min(ydata)] # this is an mandatory initial guess\n# \n# popt, pcov = curve_fit(self.sigmoid, xdata, ydata,p0, method='dogbox', maxfev=5000)\n# print(cases.columns[0], ydata[-1], popt)\n# print(np.sqrt(np.diag(pcov)))\n# f,a = plt.subplots()\n# a.plot(xdata, ydata, 'r.')\n# xz = np.linspace(xdata[0], 2*xdata[-1], 25)\n# z = self.sigmoid( xz, popt[0], popt[1], popt[2], popt[3] )\n# a.plot(xz,z, 'b-')\n# a.set_title(cases.columns[0])\n# plt.show()\n# except:\n# print(cases.columns[0], \"?\")\n Z = np.exp(Y) # very roundabout way to get non-zero values\n y3raw = (Z[nD:,0] / Z[:-nD,0])\n y3ddr = lowess(y3raw, T[nD:,0], frac=0.5, it=10)[:,1]\n # np.savetxt('../' + cases.columns[0] + '.csv', trend, delimiter=',') \n x3ddr = lnCases.index[-len(T[nD:,0]):] # T[3:]\n if (len(T) < 10) :\n print(cases.columns[0], len(T), 'days of history')\n return None, None, None, None, links\n else:\n t = T[-3:] - T[-1]\n t = np.hstack([t,t**2])\n reg.fit(t, y3ddr[-3:])\n intercept = reg.intercept_\n coef = reg.coef_\n y_pred = reg.predict(t)\n \n overall = ( (trend[-1] - trend[0]) / (T[-1] - T[0]));\n direction = 'D'\n if (y3ddr[-1] > y3ddr[-5]) :\n direction = 'A'\n scaledTrend = np.exp(trend) / population\n def rpt(y3 : float) -> str:\n ddgr = y3**(1/nD)\n if ddgr <= 1.0 :\n ddgr = 1.0\n days = ' --'\n elif (ddgr > 1.0) :\n ndays = int(np.log(2)/np.log(ddgr))\n if ndays > 99:\n days = ' **'\n else : \n days = '%3d' % ndays\n return '%7.3f/%s' % (ddgr, days)\n row = ''\n row += ('|%-15s| %3d ' % (link, len(T))) \n row += ('| %6.0f| %10.3f' % (np.exp(trend[-1]), scaledTrend[-1]))\n row += ('| %s| %s| %s| %s |' % (rpt(y3ddr[-7]), rpt(y3ddr[-3]), rpt(y3ddr[-2]), rpt(y3ddr[-1])))\n row += ('\\n')\n if verbose :\n if (which == 'states') :\n self.top10StatesTable += row\n else:\n self.top10CountriesTable += row\n else:\n if not links is None:\n links = np.append( links, np.array([row], dtype=str) )\n \n return scaledTrend, x3ddr, y3ddr, y3raw, links\n\n def plotOneState( self, path, state, pop, which, links, compare=False ):\n scaled, x3ddr, y3ddr, y3raw, links = self.analyze( state, pop, which=which, verbose=False, links=links ) # smoothed trend/population (M), x and y for smoothed 3-day death ratios\n if scaled is None:\n return links\n values = np.asarray(state[[state.columns[0]]].values)\n if (compare) :\n# fig, (ax1, axi) = plt.subplots(2, figsize=(8,10.5), sharex=False)\n fig = plt.figure(constrained_layout=False, figsize=(8,10.5))\n spec1 = gridspec.GridSpec(ncols=1, nrows=2, figure=fig)\n ax1 = fig.add_subplot(spec1[0, 0]) #, sharex=False)\n ax2 = ax1.twinx()\n spec2 = gridspec.GridSpec(ncols=1, nrows=2, figure=fig, hspace=0.30)\n axi = fig.add_subplot(spec2[1, 0]) #, sharex=False) \n ax1.get_shared_x_axes().remove(axi) \n ax2.get_shared_x_axes().remove(axi) \n axi.get_shared_x_axes().remove(ax1) \n axi.get_shared_x_axes().remove(ax2) \n else:\n fig, ax1 = plt.subplots()\n ax2 = ax1.twinx()\n# fig.autofmt_xdate()\n plt.setp(ax1.get_xticklabels(), rotation=30, ha='right')\n ax1.set_title('%s - %d Deaths' % (state.columns[0], values[-1]))\n ax1.grid(True)\n ax2.set_ylim(1, Y_UPPER)\n color = 'red'\n ax1.semilogy(state.index[:], (scaled), color=color, label='Deaths/1M (Left)') # \n ax1.semilogy(state.index[:], (values/pop), linestyle='', markeredgecolor='none', marker='.', color=color)\n ax1.annotate('%5.1f' % (values[-1]/pop),\n xy=(state.index[-1], values[-1]/pop), xycoords='data',\n xytext=(-10, -30), textcoords='offset points',\n arrowprops=dict(arrowstyle=\"->\"))\n ax2.plot( x3ddr, y3ddr**(1/nD), color='blue', label='DDGR (Right)')\n ax2.plot( x3ddr, y3raw**(1/nD), color='blue', linestyle='', markeredgecolor='none', marker='.' )\n ax2.annotate('%5.3f' % (y3raw[-1]**(1/nD)),\n xy=(x3ddr[-1], y3raw[-1]**(1/nD)), xycoords='data',\n xytext=(-10, 30), textcoords='offset points',\n arrowprops=dict(arrowstyle=\"->\"))\n ax1.legend(loc='upper left')\n ax2.legend(loc='center right')\n if compare:\n ihme.plot( state.columns[0], axi)\n# print('ax1', ax1 )\n# print(sorted(map(tuple, ax1.get_shared_x_axes())))\n# print('ax2', ax2 )\n# print(sorted(map(tuple, ax2.get_shared_x_axes())))\n# print('axi', axi )\n# print(sorted(map(tuple, axi.get_shared_x_axes())))\n plt.setp(axi.get_xticklabels(), rotation=30, ha='right')\n plt.draw()\n fig.savefig(path+\"/\"+state.columns[0]+\".png\")\n plt.close()\n return links\n \n def plotCasesVsDeaths(self, deaths, cases, population, xlim, ylim, which=None):\n fig, ax1 = plt.subplots()\n texts = []\n if which is None:\n states = deaths.columns;\n else :\n states = which\n for s in states :\n if not s in cases.columns:\n continue;\n if not s in population:\n continue;\n d = deaths[s] / population[s]\n d = d[d > 50]\n if len(d) == 0:\n continue;\n c = cases[s] / population[s]\n c = c[c > 1000]\n if len(c) == 0:\n continue;\n first = c.index[0]\n if d.index[0] > first:\n first = d.index[0]\n last = c.index[-1]\n if d.index[-1] < last:\n last = d.index[-1]\n color = next(ax1._get_lines.prop_cycler)['color']\n if which is None:\n ax1.loglog( np.asarray(c[first:last].values), np.asarray(d[first:last].values), '-', color='gray')\n ax1.loglog(c[last], d[last], '.', color=color)\n text = ax1.annotate(s, color=color,\n xy=(c[last], d[last]), xycoords='data',\n xytext=(10, 30), textcoords='offset points',\n arrowprops=dict(arrowstyle=\"->\", color=color))\n if len(texts) == 0 :\n texts = [text]\n else :\n texts.append(text)\n fig.canvas.draw() \n# print(texts, texts.get_window_extent() )\n# very slow; pushes label out of frame...\n# adjust_text(texts, arrowprops=dict(arrowstyle='->', color='gray'))\n ax1.grid(True)\n ax1.set_xlabel('Cases per Million')\n ax1.set_ylabel('Deaths per Million')\n# ax1.set_xlim(50, xlim)\n# ax1.set_ylim(1000, ylim)\n plt.show();\n \n def main(self, reload, pathToRepository, outPath):\n if (reload) :\n f, g, fc, gc = updateDeaths(pathToRepository, pull=False)\n f.sort_values(f.index[-1], axis=1,ascending=False,inplace=True)\n g.sort_values(g.index[-1], axis=1,ascending=False,inplace=True)\n f.to_csv(outPath + \"/data/states.csv\")\n g.to_csv(outPath + \"/data/countries.csv\")\n fc.to_csv(outPath + \"/data/states-cases.csv\")\n gc.to_csv(outPath + \"/data/countries-cases.csv\")\n else :\n f = pd.read_csv(outPath + \"/data/states.csv\", parse_dates=True, index_col=0)\n g = pd.read_csv(outPath + \"/data/countries.csv\", parse_dates=True, index_col=0)\n fc = pd.read_csv(outPath + \"/data/states-cases.csv\", parse_dates=True, index_col=0)\n gc = pd.read_csv(outPath + \"/data/countries-cases.csv\", parse_dates=True, index_col=0)\n \n print(f.index[-1])\n \n statesPopulation = loadStatePopulations();\n countriesPopulation = loadPopulation();\n countriesPopulation['US'] = countriesPopulation['United States']\n \n# self.plotCasesVsDeaths( g, gc, countriesPopulation, 5000, 500 )\n# self.plotCasesVsDeaths( f, fc, statesPopulation, 5000, 500 )\n\n population = statesPopulation;\n # print('%-15s N %10s %10s %7s %7s %7s %7s' % ('State', 'Deaths', 'Per 1M', 'DDGR[-7]', 'DDGR[-3]', 'DDGR[-2]', 'DDGR[-1]'))\n header1 = (\"|State|Days|Deaths|Deaths/1M|DDGR[6:7]|DDGR[2:3]|DDGR[1:2]|DDGR[0:1]|\\n\")\n header2 = (\"|:--|--:|--:|--:|--:|--:|--:|--:|\\n\")\n self.statesLinks = np.append( self.statesLinks, np.array([header1, header2], dtype=str) )\n self.top10StatesTable = header1\n self.top10StatesTable += header2\n # fig, (ax1, ax2) = plt.subplots(1,2)\n # plt.grid(True)\n fig1, ax1 = plt.subplots() # 10 states death rates\n fig2, ax2 = plt.subplots() # 10 states DDGRs\n# fig1.autofmt_xdate()\n# fig2.autofmt_xdate()\n \n ax1.set_title('Highest 10 States - Deaths/Million Population')\n ax1.grid(True)\n ax2.set_title('Highest 10 States - Lowess Smoothed DDGRs')\n ax2.grid(True)\n for i in range(0,10) :\n x = f[[f.columns[i]]]\n x = x[(x.T != 0).any()].apply(pd.to_numeric, errors='coerce')\n pop = population[x.columns[0]]\n scaled, x3ddr, y3ddr, __, __ = self.analyze( x, pop, 'states', verbose=True ) # smoothed trend/population (M), x and y for smoothed 3-day death ratios\n color = next(ax1._get_lines.prop_cycler)['color']\n # label = '%s : %6.1f' % (f.columns[i], scaled[-1])\n label = f.columns[i]\n ax1.semilogy(x.index[:], (scaled), label=label, color=color) # \n ax1.semilogy(x.index[:], (np.asarray(x[[x.columns[0]]].values)/pop), linestyle='', markeredgecolor='none', marker='.', color=color)\n # label = '%s : %6.1f' % (f.columns[i], y3ddr[-1]**(1/nD))\n ax2.plot( x3ddr, y3ddr**(1/nD), color=color, label=label)\n print()\n \n # generate charts for all states \n for name in f.columns.sort_values():\n x = f[[name]]\n x = x[(x.T != 0).any()].apply(pd.to_numeric, errors='coerce')\n if name in population:\n pop = population[x.columns[0]]\n compare = True\n if (name == 'Puerto Rico') or (name == 'Wyoming') :\n compare = False\n self.statesLinks = self.plotOneState(outPath + \"/analysis/states\", x, pop, \n which='states', links=self.statesLinks, compare=compare)\n \n \n # print('%-15s N %10s %10s %6s %6s %6s' % ('Country', 'Deaths', 'Per 1M', 'DDR[-3]', 'DDR[-2]', 'DDR[-1]'))\n header1 = (\"|Country|Days|Deaths|Deaths/1M|DDGR[6:7]|DDGR[2:3]|DDGR[1:2]|DDGR[0:1]|\\n\")\n header2 = (\"|:--|--:|--:|--:|--:|--:|--:|--:|\\n\")\n self.countriesLinks = np.append( self.countriesLinks, np.array([header1, header2], dtype=str) ) \n self.top10CountriesTable = header1\n self.top10CountriesTable += header2\n population = countriesPopulation;\n population['US'] = population['United States']\n \n x = g[['US']]\n x = x[(x.T != 0).any()].apply(pd.to_numeric, errors='coerce')\n pop = population[x.columns[0]]\n scaled, x3ddr, y3ddr, __, __ = self.analyze( x, pop, 'countries' ) # smoothed trend/population (M)\n color = 'Black'\n ax1.semilogy(x.index[:], (scaled), label=x.columns[0], color=color) # \n ax1.semilogy(x.index[:], (np.asarray(x[[x.columns[0]]].values)/pop), linestyle='', markeredgecolor='none', marker='.', color=color)\n ax2.plot( x3ddr, y3ddr**(1/nD), color=color, label=x.columns[0])\n \n ax1.legend(loc='upper left')\n ax2.legend(loc='upper left')\n ax2.set_ylim(1, Y_UPPER)\n fig1.savefig(outPath+\"/analysis/States10WorstDeathRates.png\")\n fig2.savefig(outPath+\"/analysis/States10WorstDDGR.png\")\n plt.close()\n \n fig3, ax3 = plt.subplots() # 10 countries death rates\n fig4, ax4 = plt.subplots() # 10 countries DDGRs\n ax3.set_xticklabels(ax3.get_xticklabels(), rotation=90) # fig3.autofmt_xdate()\n ax4.set_xticklabels(ax4.get_xticklabels(), rotation=90) # fig4.autofmt_xdate()\n ax3.set_title('Highest 10 Countries - Deaths/Million Population')\n ax3.grid(True)\n ax4.set_title('Highest 10 Countries - Lowess Smoothed DDGRs')\n ax4.grid(True)\n \n for i in range(0,10) :\n x = g[[g.columns[i]]]\n x = x[(x.T != 0).any()].apply(pd.to_numeric, errors='coerce')\n pop = population[x.columns[0]]\n scaled, x3ddr, y3ddr, __, __ = self.analyze( x, pop, 'countries', verbose=True ) # smoothed trend/population (M)\n color = next(ax3._get_lines.prop_cycler)['color']\n ax3.semilogy(x.index[:], (scaled), label=g.columns[i], color=color) # \n ax3.semilogy(x.index[:], (np.asarray(x[[x.columns[0]]].values)/pop), linestyle='', markeredgecolor='none', marker='.', color=color)\n ax4.plot( x3ddr, y3ddr**(1/nD), color=color, label=g.columns[i])\n ax3.legend(loc='upper left')\n ax4.legend(loc='upper left')\n ax4.set_ylim(1, Y_UPPER)\n fig3.savefig(outPath+\"/analysis/Countries10WorstDeathRates.png\")\n fig4.savefig(outPath+\"/analysis/Countries10WorstDDGR.png\")\n plt.close()\n \n # generate charts for all countries\n for name in g.columns.sort_values():\n x = g[[name]]\n x = x[(x.T != 0).any()].apply(pd.to_numeric, errors='coerce')\n if name in population:\n pop = population[x.columns[0]]\n self.countriesLinks = self.plotOneState(outPath + \"/analysis/countries\", x, pop, \n which='countries', links=self.countriesLinks, compare=name == 'US')\n # os.system('git -C %s commit -a -m \"daily update\"' % outPath)\n # os.system('git -C %s push' % outPath)\n\n\nif __name__ == '__main__':\n outPath = home + \"/GITHUB/COVIDtoTimeSeries\"\n env = Environment(\n loader=FileSystemLoader(outPath + '/covid/templates'),\n autoescape=select_autoescape(['html', 'xml'])\n )\n template = env.get_template(\"ANALYSIS.mdt\")\n analysis = Analysis() \n\n result = os.popen('git -C %s pull' % pathToRepository).read()\n\n reload = not result.startswith('Already up to date.')\n if reload:\n os.system('git -C %s pull' % outPath)\n\n analysis.main(reload, pathToRepository, outPath)\n \n statesContent = ''\n for state in analysis.statesLinks:\n statesContent += state\n countriesContent = ''\n for state in analysis.countriesLinks:\n countriesContent += state\n fields = dict();\n fields.update({'date': datetime.date(datetime.now())})\n fields.update({'top10StatesTable': analysis.top10StatesTable})\n fields.update({'top10CountriesTable': analysis.top10CountriesTable})\n fields.update({'statesLinks': statesContent})\n fields.update({'countriesLinks': countriesContent}) \n f = open(outPath + '/analysis/ANALYSIS.md', 'w') \n print(template.render(fields), file=f)\n f.close()\n if reload :\n os.system('git -C %s commit -a -m \"daily update\"' % outPath)\n os.system('git -C %s push' % outPath)\n" ]
[ [ "numpy.hstack", "numpy.log", "pandas.read_csv", "numpy.asarray", "matplotlib.pyplot.subplots", "matplotlib.pyplot.draw", "sklearn.linear_model.LinearRegression", "pandas.plotting.register_matplotlib_converters", "matplotlib.pyplot.close", "numpy.exp", "matplotlib.gridspec.GridSpec", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aloaberasturi/biLSTM_NER
[ "ad7442d1aab17edb9cfe0f1e49c7ea3342b368a8" ]
[ "src/nnerc_learner.py" ]
[ "#!usr/bin/python3\nimport json\nimport numpy as np\nfrom pathlib import Path\nfrom nnerc_utils import load_data, encode_words, j_dump, load_glove, classify_token, embedding_matrix, what_capital\nfrom nnerc_common import traindata_file, valdata_file, NetConfig\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.models import Model, Input\nfrom keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional, concatenate\nfrom keras_contrib.layers import CRF\nfrom keras_contrib.utils import save_load_utils\nfrom keras_contrib.metrics import crf_accuracy\nfrom keras.activations import softmax\nfrom keras_contrib.losses import crf_loss\nfrom keras.callbacks import EarlyStopping\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical\nfrom keras.optimizers import RMSprop, Nadam\nfrom collections import OrderedDict\nfrom nltk import pos_tag\nfrom matplotlib import pyplot\nfrom ast import literal_eval\n\ndef learn(traindir, validationdir, version):\n '''\n Learns a NN model using traindir as training data , and validationdir\n as validation data . Saves learnt model in a file named modelname\n \n Parameters:\n -----------\n traindir: pathlib.Path\n Path to train directory\n \n validationdir: pathlib.Path\n Path to validation directory\n\n version: int\n\n Returns:\n --------\n modelname: str\n '''\n\n modelname = \"LSTM-CRF_%i\" % version\n config = NetConfig(version)\n\n # ******** UNCOMMENT THIS SECTION TO LOAD DATA AND STORE IT *******\n\n # load the data from .xml files. This also stores the training and \n # validation data in a permanent .json files\n\n # load_data(traindir)\n # load_data(validationdir)\n\n # *****************************************************************\n \n # create indexes from training data\n\n idx = create_indexs(traindata_file, max_len_sentences=75, max_len_words=50)\n\n # build network\n\n model = build_network(idx, config)\n\n # write on file containing the summary\n\n dump_summary(model, modelname)\n\n # encode datasets\n\n Xtrain = encode_words(traindata_file, idx)\n Ytrain = encode_tags(traindata_file, idx)\n Xval = encode_words(valdata_file, idx)\n Yval = encode_tags(valdata_file, idx)\n\n # train model and save it\n\n\n history = model.fit(Xtrain, Ytrain, validation_data=(Xval, Yval),\n verbose=1, \n batch_size=config.batch, \n epochs=config.epochs\n # callbacks=[EarlyStopping(\n # monitor='val_loss', \n # patience=3, mode='min', \n # restore_best_weights=True\n # )\n # ]\n )\n \n # plot train and validation loss\n\n pyplot.plot(history.history['loss'])\n pyplot.plot(history.history['val_loss'])\n pyplot.title('model train vs validation loss')\n pyplot.ylabel('loss')\n pyplot.xlabel('epoch')\n pyplot.legend(['train', 'validation'], loc='upper right')\n pyplot.show()\n\n # save indexes for later use in prediction\n\n save_model_and_indexs(model, idx, modelname)\n\n return modelname\n\ndef create_indexs(j_file, max_len_sentences, max_len_words):\n '''\n Receives a dataset produced by load data, and the maximum\n length in a sentence.\n Creates a set of words seen in the data and a set of BIO\n tags. Enumerates those sets, assigning a unique integer to each\n element. Returns these mappings in a single dictionary, with an\n additional entry for the given max length value.\n Other embeddings explored: lowercased words, non-lowercased words, \n PoS tags and char embedding. This last one has a shape, for each \n sentence in the set, of \n\n Parameters:\n -----------\n j_file: pathlib.Path\n Path to .json file containing train data in dict form \n\n max_len_sentences: int\n Maximum sentence length\n \n max_len_words: int\n Maximum word length\n\n\n Returns:\n --------\n dict\n '''\n # load data as an ordered dict to get always the same ordering\n\n # with j_file.open('r') as f:\n # my_dict = json.load(f, object_pairs_hook=OrderedDict)\n with j_file.open('r') as f:\n my_dict = json.load(f)\n\n sentences = [[w[0] for w in s] for s in my_dict.values()]\n\n pos_list = list( set( [ pos[1] for s in sentences for pos in pos_tag(s) ] ) )\n w_list = list(set([w[0] for v in my_dict.values() for w in v]))\n chars_list = list(set([c for w in w_list for c in w ]))\n case_list = list(set([what_capital(w) for w in w_list]))\n type_list = list(set([classify_token(w) for w in w_list]))\n t_list = list(set([w[3] for v in my_dict.values() for w in v ])) \n\n pos_d = {j: i + 2 for i,j in enumerate(pos_list)}\n pos_d['<PAD>'] = 0\n pos_d['<UNK>'] = 1\n\n words_d = {j: i + 2 for i,j in enumerate(w_list)}\n words_d['<PAD>'] = 0\n words_d['<UNK>'] = 1\n\n case_d = {j: i + 1 for i,j in enumerate(case_list)}# unknown is already considered\n case_d['<PAD>'] = 0\n\n type_d = {j : i + 1 for i,j in enumerate(type_list)} # unknown is already considered\n type_d['<PAD>'] = 0\n\n chars_d = {j : i + 1 for i,j in enumerate(chars_list)}\n chars_d['<PAD>'] = 0\n chars_d['<UNK>'] = 1\n\n tags_d = {j: i + 1 for i,j in enumerate(t_list)}\n tags_d['<PAD>'] = 0\n\n return {'chars' : chars_d,\n 'words' : words_d,\n 'case' : case_d,\n 'pos' : pos_d,\n 'type' : type_d,\n 'tags' : tags_d, \n 'max_len_sentences' : max_len_sentences, \n 'max_len_words' : max_len_words}\n\n\ndef build_network(idx, config):\n '''\n Builds the nn. Receives the index dictionary with the encondings \n of words and tags , and the maximum length of sentences\n\n Parameters:\n -----------\n idx: dict\n\n config: NetConfig instance\n Contains configuration of the neural network\n \n Returns:\n -------- \n model: neural network\n '''\n\n # sizes\n\n n_pos = len(idx['pos']) # UNK & PAD considered\n n_case = len(idx['case']) # PAD considered\n n_type = len(idx['type']) # PAD considered\n n_chars = len(idx['chars']) # UNK & PAD considered\n n_words = len(idx['words']) # UNK & PAD considered\n n_tags = len(idx['tags']) # PAD considered\n max_len_sentences = idx['max_len_sentences'] \n max_len_words = idx['max_len_words'] \n\n # ************************************************\n\n # architectural parameters\n\n pre_trained = config.pre_trained\n w_embedding = config.w_embedding\n c_embedding = config.c_embedding\n lstm_char_units = config.lstm_char_units\n lstm_main_units = config.lstm_main_units\n dense_units = config.dense_units\n return_sequences = config.return_sequences\n mask_zero = config.mask_zero\n activation = config.activation\n\n #training parameters\n\n dropout = config.dropout\n rcrr_dropout = config.rcrr_dropout\n optimizer = config.optimizer\n loss = config.loss\n metrics = config.metrics\n\n #********************************************************\n\n # create network layers\n\n # type embedding\n #---------------#\n\n type_inp = Input(shape=(max_len_sentences,))\n type_emb = Embedding(\n input_dim=n_type,\n output_dim=w_embedding,\n input_length=max_len_sentences,\n mask_zero=mask_zero)(type_inp)\n\n # pos embedding\n #--------------#\n\n pos_inp = Input(shape=(max_len_sentences,))\n pos_emb = Embedding(\n input_dim=n_pos,\n output_dim=w_embedding,\n input_length=max_len_sentences,\n mask_zero=mask_zero)(pos_inp)\n\n # capitalization words embedding\n #--------------------------# \n\n case_inp = Input(shape=(max_len_sentences,))\n case_emb = Embedding(\n input_dim=n_case,\n output_dim=w_embedding,\n input_length=max_len_sentences,\n mask_zero=mask_zero)(case_inp)\n\n # word embedding\n # --------------#\n\n word_inp = Input(shape=(max_len_sentences,))\n \n if pre_trained: \n\n # word embedding option (1): load pre-trained embeddings \n # and create the customized weights matrix according to our dataset\n\n word_emb = Embedding(\n input_dim=n_words, \n output_dim=w_embedding,\n weights=[embedding_matrix(idx, n_words, w_embedding)], \n trainable=False)(word_inp)\n\n else:\n\n # word embedding option (2): random embedding\n\n word_emb = Embedding(\n input_dim=n_words, \n output_dim=w_embedding,\n input_length=max_len_sentences, \n mask_zero=mask_zero)(word_inp) \n\n\n #char embedding + char biLSTM\n #----------------------------\n\n char_inp = Input(shape=(max_len_sentences, max_len_words)) \n\n char_emb = TimeDistributed(\n Embedding(\n input_dim=n_chars,\n output_dim=c_embedding,\n input_length=max_len_words,\n mask_zero=mask_zero)\n )(char_inp) \n\n char_biLSTM = TimeDistributed(\n Bidirectional(LSTM(\n units=lstm_char_units, \n return_sequences=False,\n recurrent_dropout=rcrr_dropout, \n dropout=dropout))\n )(char_emb) \n \n # main LSTM\n #---------#\n\n model = concatenate([\n word_emb, \n char_biLSTM,\n case_emb,\n # pos_emb, \n # type_emb\n ]\n )\n\n\n # model = Dropout(dropout)(model)\n\n model = Bidirectional(LSTM(units=lstm_main_units, return_sequences=return_sequences,\n recurrent_dropout=rcrr_dropout, dropout=dropout))(model)\n\n model = TimeDistributed(Dense(units=dense_units, activation=activation))(model) \n\n model = TimeDistributed(Dense(units=n_tags, activation=activation))(model) \n \n # model = Dropout(dropout)(model)\n\n # CRF layer\n #----------\n\n crf = CRF(n_tags)\n\n out = crf(model) \n \n # create and compile model\n\n model = Model([\n word_inp, \n char_inp,\n case_inp,\n # pos_inp, \n # type_inp, \n \n ], out)\n\n \n if str.lower(optimizer) == 'nadam': \n optimizer = Nadam()\n\n model.compile(optimizer=optimizer, loss=crf_loss, metrics=[crf_accuracy])\n\n\n return model\n\ndef dump_summary(model, modelname):\n '''\n Receives a model and its name. Calls keras' model.summary() and \n dumps it into a txt file.\n\n Parameters:\n -----------\n model: neural network\n\n modelname: str\n\n Returns:\n --------\n None\n '''\n \n with open('./models/%s_summary.txt' % modelname,'w') as mf:\n \n # Pass the file handle in as a lambda function to make it callable \n\n model.summary(print_fn=lambda s: mf.write(s + '\\n'))\n\ndef encode_tags(j_file, idx):\n '''\n Receives a dataset produced by load data, and the index\n dictionary produced by create indexs. Returns the dataset \n as a list of sentences. Each sentence is a list of integers,\n corresponding to the code of the BIO tag for each word. \n If the sentence is shorter than max len it is padded with the\n code for <PAD>.\n\n Parameters:\n -----------\n j_file: pathlib.Path\n path to .json file containing the train data\n \n idx: dict\n Dictionary with the indexes returned by create_indexs\n\n Returns:\n --------\n np.array: list of encoded sentences\n '''\n\n # load data as an ordered dict to get always same ordering\n\n with j_file.open('r') as f:\n data = json.load(f, object_pairs_hook=OrderedDict)\n\n # convert the tags to integers \n Y = [\n [\n idx['tags'][e[3]] for e in entities\n ] for entities in data.values()\n ]\n\n # if the sentence is shorter than max_len, we pad it \n\n Y = pad_sequences(maxlen=idx['max_len_sentences'], sequences=Y, \n padding='post', value=idx['tags']['<PAD>'])\n\n # one-hot encoding\n \n return np.array([to_categorical(i, num_classes=len(idx['tags'])) for i in Y])\n\n\ndef save_model_and_indexs(model, idx, filename):\n '''\n Receives a trained model, an index dictionary, and a string.\n Stores the model in a file named filename.nn, and the\n indexs in a file named filename.idx\n\n Parameters:\n -----------\n model: Trained neural network\n\n idx: dict\n\n filename: str\n\n Returns:\n --------\n None\n '''\n\n path_to_model = Path('./models/%s.nn' % filename)\n path_to_indxs = Path('./models/%s.idx'% filename)\n\n # save model\n model.save(path_to_model.__str__())\n\n # save indexes\n with path_to_indxs.open('w') as f:\n json.dump(idx,f)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MatheusZickuhr/python-neat
[ "ce80175dcbfcf7a159499cb90d904605cd4c78a3" ]
[ "python_ne/core/neural_network/neural_network.py" ]
[ "import numpy as np\n\nimport python_ne.core.neural_network.saving as saving\nfrom python_ne.core.neural_network import activations\nfrom python_ne.core.neural_network.dense_layer import DenseLayer\n\n\nclass NeuralNetwork:\n\n def __init__(self):\n self.layers = []\n\n def initialize(self):\n \"\"\"\n Initialize all nn layers.\n\n This method is not required to be called. Layer initialization will be performed when predict is callled.\n Can be used to initialize the layers without calling predict.\n \"\"\"\n\n input_shape = None\n for index, layer in enumerate(self.layers):\n if index == 0:\n # dont need to set input shape here, first layer will always have an input shape\n layer.initialize()\n else:\n # current layer input shape = prev layer neuron count\n layer.input_shape = input_shape\n layer.initialize()\n\n # next layer input shape\n input_shape = (layer.units,)\n\n def predict(self, xs):\n input_shape = None\n output = None\n for index, layer in enumerate(self.layers):\n layer.input_shape = layer.input_shape if index == 0 else input_shape\n layer.initialize()\n input_shape = (layer.units,)\n output = layer.feedforward(xs if index == 0 else output)\n\n return output\n\n def add(self, layer):\n self.layers.append(layer)\n\n def save(self, file_path):\n saving.save_as_json(self, file_path)\n\n @staticmethod\n def load(file_path):\n layers = saving.load_from_json(file_path)\n\n nn = NeuralNetwork()\n\n for layer in layers:\n nn.add(\n DenseLayer(\n units=layer['units'],\n input_shape=tuple(layer['input_shape']),\n activation=layer['activation'],\n weights=(np.array(layer['weights']), np.array(layer['bias']))\n )\n )\n\n return nn\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ArlindKadra/Auto-PyTorch
[ "6e72d5ba088981b89371f29773d243a211a4d068" ]
[ "baselines/refit_experiment.py" ]
[ "import argparse\nimport json\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\nimport os\nimport random\n\nimport hpbandster.core.result as hpres\nimport numpy as np\nimport openml\n\nfrom data.loader import Loader\nfrom worker import XGBoostWorker, TabNetWorker\n\n\nparser = argparse.ArgumentParser(\n description='Baseline refit experiment.'\n)\nparser.add_argument(\n '--run_id',\n type=str,\n help='The run id of the optimization run.',\n default='Baseline',\n)\nparser.add_argument(\n '--working_directory',\n type=str,\n help='The working directory where results will be stored.',\n default='.',\n)\nparser.add_argument(\n '--model',\n type=str,\n help='Which model to use for the experiment.',\n default='tabnet',\n)\nparser.add_argument(\n '--task_id',\n type=int,\n help='Minimum budget used during the optimization.',\n default=233109,\n)\nparser.add_argument(\n '--seed',\n type=int,\n help='Seed used for the experiment.',\n default=11,\n)\nparser.add_argument(\n '--nr_threads',\n type=int,\n help='Number of threads for one worker.',\n default=2,\n)\nargs = parser.parse_args()\n\nnp.random.seed(args.seed)\nrandom.seed(args.seed)\n\nloader = Loader(task_id=args.task_id, val_fraction=0)\nnr_classes = int(openml.datasets.get_dataset(loader.get_dataset_id()).qualities['NumberOfClasses'])\n\nworker_choices = {\n 'tabnet': TabNetWorker,\n 'xgboost': XGBoostWorker,\n}\n\nmodel_worker = worker_choices[args.model]\n\nif args.model == 'tabnet':\n param = model_worker.get_parameters(\n seed=args.seed,\n )\nelse:\n param = model_worker.get_parameters(\n nr_classes=nr_classes,\n seed=args.seed,\n nr_threads=args.nr_threads,\n )\n\nprint(f'Refit experiment started with task id: {args.task_id}')\nrun_directory = os.path.join(\n args.working_directory,\n f'{args.task_id}',\n f'{args.seed}',\n)\nos.makedirs(run_directory, exist_ok=True)\n\nworker = model_worker(\n args.run_id,\n param=param,\n splits=loader.get_splits(),\n categorical_information=loader.categorical_information,\n nameserver='127.0.0.1',\n)\n\nresult = hpres.logged_results_to_HBS_result(run_directory)\nall_runs = result.get_all_runs()\nid2conf = result.get_id2config_mapping()\n\ninc_id = result.get_incumbent_id()\ninc_runs = result.get_runs_by_id(inc_id)\ninc_config = id2conf[inc_id]['config']\nprint(f\"Best Configuration So far {inc_config}\")\nrefit_result = worker.refit(inc_config)\nwith open(os.path.join(run_directory, 'refit_result.json'), 'w') as file:\n json.dump(refit_result, file)\n" ]
[ [ "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ningyuwhut/dqn-tensorflow-annotated
[ "3051bf418a06ba659170b738c03651383699c80b" ]
[ "main.py" ]
[ "from __future__ import print_function\nimport random\nimport tensorflow as tf\n\nfrom dqn.agent import Agent\nfrom dqn.environment import GymEnvironment, SimpleGymEnvironment\nfrom config import get_config\n\nflags = tf.app.flags\n\n# Model\nflags.DEFINE_string('model', 'm1', 'Type of model')\nflags.DEFINE_boolean('dueling', False, 'Whether to use dueling deep q-network')\nflags.DEFINE_boolean('double_q', False, 'Whether to use double q-learning')\n\n# Environment\nflags.DEFINE_string('env_name', 'Breakout-v0', 'The name of gym environment to use')\nflags.DEFINE_integer('action_repeat', 4, 'The number of action to be repeated')\n\n# Etc\nflags.DEFINE_boolean('use_gpu', False, 'Whether to use gpu or not')\nflags.DEFINE_string('gpu_fraction', '1/1', 'idx / # of gpu fraction e.g. 1/3, 2/3, 3/3')\nflags.DEFINE_boolean('display', False, 'Whether to do display the game screen or not')\nflags.DEFINE_boolean('is_train', True, 'Whether to do training or testing')\nflags.DEFINE_integer('random_seed', 123, 'Value of random seed')\n\nFLAGS = flags.FLAGS\n\n# Set random seed\ntf.set_random_seed(FLAGS.random_seed)\nrandom.seed(FLAGS.random_seed)\n\nif FLAGS.gpu_fraction == '':\n raise ValueError(\"--gpu_fraction should be defined\")\n\ndef calc_gpu_fraction(fraction_string):\n idx, num = fraction_string.split('/')\n idx, num = float(idx), float(num)\n\n fraction = 1 / (num - idx + 1)\n print(\" [*] GPU : %.4f\" % fraction)\n return fraction\n\ndef main(_):\n gpu_options = tf.GPUOptions(\n per_process_gpu_memory_fraction=calc_gpu_fraction(FLAGS.gpu_fraction))\n\n with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:\n config = get_config(FLAGS) or FLAGS\n\n if config.env_type == 'simple':\n env = SimpleGymEnvironment(config)\n else:\n env = GymEnvironment(config)\n\n if not tf.test.is_gpu_available() and FLAGS.use_gpu:\n raise Exception(\"use_gpu flag is true when no GPUs are available\")\n\n if not FLAGS.use_gpu:\n config.cnn_format = 'NHWC'\n\n agent = Agent(config, env, sess)\n\n if FLAGS.is_train:\n agent.train()\n else:\n agent.play()\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.set_random_seed", "tensorflow.test.is_gpu_available", "tensorflow.ConfigProto", "tensorflow.app.run" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Miguel-Antonm/deploy_DL_space_weather_forecast
[ "d94ec77ebe1b2d6c952c2a9f5515d872a62b73e2" ]
[ "swfd/load_model.py" ]
[ "# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_load_model.ipynb (unless otherwise specified).\n\n__all__ = ['NORMALIZE', 'HORIZONS', 'COLUMNS', 'loadModel', 'formatDate', 'getCsvData', 'getNameRunFolders',\n 'singlePrediction', 'ensemblePrediction', 'ensembleMeanVar', 'modelPrediction', 'getAllHorizonPrediction']\n\n# Cell\nimport torch\nimport yaml\nfrom fastcore.script import *\nfrom .resources import *\nimport os\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nimport pandas as pd\n\nNORMALIZE=707.6\nHORIZONS=[3,5,7,10,14,21,27]\nCOLUMNS=['Date','H3','H5','H7','H10','H14','H21','H27']\n\n# Cell\ndef loadModel(pathrun):\n pathyaml=str(pathrun)+\"config.yaml\"\n stacktype=[]\n pathpth=str(pathrun)+\"models/best.pth\"\n config=yaml.safe_load(open(str(pathyaml)))\n for i in range(0,config[\"nb_stacks\"][\"value\"]):\n stacktype.append(NBeatsNet.GENERIC_BLOCK)\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = NBeatsNet(\n device=device,\n stack_types=stacktype,\n nb_blocks_per_stack=config[\"nb_blocks_per_stack\"][\"value\"],\n forecast_length=config[\"horizon\"][\"value\"],\n backcast_length=config[\"lookback\"][\"value\"],\n thetas_dim=(7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8, 7, 8),\n share_weights_in_stack=False,\n hidden_layer_units=64\n #,nb_harmonics=None\n )\n checkpoint = torch.load(pathpth,map_location=device)\n model.load_state_dict(checkpoint['model_state_dict'])\n model.eval()\n return model\n\n\n# Cell\ndef formatDate(date):\n return date.strftime(\"%Y-%m-%d\")\n\n# Cell\n#Default startdate = yesterday\ndef getCsvData(lookback,startdate=(datetime.datetime.now()-datetime.timedelta(1)) ):\n pathfoldercsv=getInfo(\"csvdirectory\")\n csvname=\"sfuData.csv\"\n datalist=[]\n date=formatDate(startdate)\n start=False\n try:\n with open(str(pathfoldercsv)+str(csvname)) as csv_file:\n try:\n forecastcount=0\n for row in list(csv.reader(csv_file, delimiter=',')):\n if(date==row[0]):\n start=True\n if(forecastcount<lookback and start):\n datalist=np.append(datalist,float(row[1]))\n forecastcount=forecastcount+1\n finally:\n csv_file.close()\n except (IOError, ValueError, EOFError) as e:\n print(e)\n return datalist\n\n\n# Cell\ndef getNameRunFolders(horizon,runshorizonfolder):\n runshorizon_directories=runshorizonfolder+\"ensembleH\"+str(horizon)\n runs_directories=[] #runshorizon_directories\n directories = os.listdir(str(runshorizon_directories))\n for i in directories:\n pathrun=runshorizon_directories+\"/\"+str(i)+\"/\"\n runs_directories.append(pathrun)\n\n return runs_directories\n\n\n# Cell\n# Startdate!=gendate stardate=daybefore(gendate)\ndef singlePrediction(filepath,startdate):\n net=loadModel(str(filepath))\n lookback=net.backcast_length\n data=getCsvData(lookback,startdate)\n row = torch.Tensor([data[::-1]/NORMALIZE])\n backcast,forecast = net(row)\n preds_tensor=(forecast.detach().numpy())*NORMALIZE\n return preds_tensor[0].reshape(1,net.forecast_length)\n\n\n# Cell\ndef ensemblePrediction(horizon,startdate):\n prediction=np.empty((0,horizon))\n folderpath=getInfo(\"pthdirectory\")\n runspath=getNameRunFolders(horizon,folderpath)\n for run in runspath:\n prediction=np.append(prediction,singlePrediction(run,startdate),axis=0)\n return prediction\n\n# Cell\ndef ensembleMeanVar(prediction):\n meanlist=[]\n stdlist=[]\n for row in prediction.T:\n meanlist.append(row.mean())\n stdlist.append(row.std())\n return np.array(meanlist),np.array(stdlist)\n\n# Cell\ndef modelPrediction(horizon,startdate):\n prediction=ensemblePrediction(horizon,startdate)\n mean,std=ensembleMeanVar(prediction)\n return prediction,mean,std\n\n\n# Cell\ndef getAllHorizonPrediction(date):\n allpredicts=[]\n allpredicts.append(date)\n for horizon in HORIZONS:\n try:\n predict=ensemblePrediction(horizon,date)\n mean,std=ensembleMeanVar(predict)\n allpredicts.append([mean,std])\n except TypeError: #Excepcion que salta con fecha invalida\n print(\"error en horizonte\",horizon, \" el dia\",date)\n allpredicts.append([])\n return np.matrix(allpredicts,dtype=\"object\")\n" ]
[ [ "numpy.matrix", "torch.Tensor", "torch.load", "torch.cuda.is_available", "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
smartswdeveloper/pandas
[ "a9be7153fff15a9e28793dd54327b5342c34be51" ]
[ "pandas/tests/reshape/test_concat.py" ]
[ "from collections import OrderedDict, abc, deque\nimport datetime as dt\nfrom datetime import datetime\nfrom decimal import Decimal\nfrom io import StringIO\nfrom itertools import combinations\nfrom warnings import catch_warnings\n\nimport dateutil\nimport numpy as np\nfrom numpy.random import randn\nimport pytest\n\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n DatetimeIndex,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n concat,\n date_range,\n isna,\n read_csv,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import SparseArray\nfrom pandas.core.construction import create_series_with_explicit_dtype\nfrom pandas.tests.extension.decimal import to_decimal\n\n\[email protected](params=[True, False])\ndef sort(request):\n \"\"\"Boolean sort keyword for concat and DataFrame.append.\"\"\"\n return request.param\n\n\nclass TestConcatAppendCommon:\n \"\"\"\n Test common dtype coercion rules between concat and append.\n \"\"\"\n\n def setup_method(self, method):\n\n dt_data = [\n pd.Timestamp(\"2011-01-01\"),\n pd.Timestamp(\"2011-01-02\"),\n pd.Timestamp(\"2011-01-03\"),\n ]\n tz_data = [\n pd.Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\n pd.Timestamp(\"2011-01-02\", tz=\"US/Eastern\"),\n pd.Timestamp(\"2011-01-03\", tz=\"US/Eastern\"),\n ]\n\n td_data = [\n pd.Timedelta(\"1 days\"),\n pd.Timedelta(\"2 days\"),\n pd.Timedelta(\"3 days\"),\n ]\n\n period_data = [\n pd.Period(\"2011-01\", freq=\"M\"),\n pd.Period(\"2011-02\", freq=\"M\"),\n pd.Period(\"2011-03\", freq=\"M\"),\n ]\n\n self.data = {\n \"bool\": [True, False, True],\n \"int64\": [1, 2, 3],\n \"float64\": [1.1, np.nan, 3.3],\n \"category\": pd.Categorical([\"X\", \"Y\", \"Z\"]),\n \"object\": [\"a\", \"b\", \"c\"],\n \"datetime64[ns]\": dt_data,\n \"datetime64[ns, US/Eastern]\": tz_data,\n \"timedelta64[ns]\": td_data,\n \"period[M]\": period_data,\n }\n\n def _check_expected_dtype(self, obj, label):\n \"\"\"\n Check whether obj has expected dtype depending on label\n considering not-supported dtypes\n \"\"\"\n if isinstance(obj, pd.Index):\n if label == \"bool\":\n assert obj.dtype == \"object\"\n else:\n assert obj.dtype == label\n elif isinstance(obj, pd.Series):\n if label.startswith(\"period\"):\n assert obj.dtype == \"Period[M]\"\n else:\n assert obj.dtype == label\n else:\n raise ValueError\n\n def test_dtypes(self):\n # to confirm test case covers intended dtypes\n for typ, vals in self.data.items():\n self._check_expected_dtype(pd.Index(vals), typ)\n self._check_expected_dtype(pd.Series(vals), typ)\n\n def test_concatlike_same_dtypes(self):\n # GH 13660\n for typ1, vals1 in self.data.items():\n\n vals2 = vals1\n vals3 = vals1\n\n if typ1 == \"category\":\n exp_data = pd.Categorical(list(vals1) + list(vals2))\n exp_data3 = pd.Categorical(list(vals1) + list(vals2) + list(vals3))\n else:\n exp_data = vals1 + vals2\n exp_data3 = vals1 + vals2 + vals3\n\n # ----- Index ----- #\n\n # index.append\n res = pd.Index(vals1).append(pd.Index(vals2))\n exp = pd.Index(exp_data)\n tm.assert_index_equal(res, exp)\n\n # 3 elements\n res = pd.Index(vals1).append([pd.Index(vals2), pd.Index(vals3)])\n exp = pd.Index(exp_data3)\n tm.assert_index_equal(res, exp)\n\n # index.append name mismatch\n i1 = pd.Index(vals1, name=\"x\")\n i2 = pd.Index(vals2, name=\"y\")\n res = i1.append(i2)\n exp = pd.Index(exp_data)\n tm.assert_index_equal(res, exp)\n\n # index.append name match\n i1 = pd.Index(vals1, name=\"x\")\n i2 = pd.Index(vals2, name=\"x\")\n res = i1.append(i2)\n exp = pd.Index(exp_data, name=\"x\")\n tm.assert_index_equal(res, exp)\n\n # cannot append non-index\n with pytest.raises(TypeError, match=\"all inputs must be Index\"):\n pd.Index(vals1).append(vals2)\n\n with pytest.raises(TypeError, match=\"all inputs must be Index\"):\n pd.Index(vals1).append([pd.Index(vals2), vals3])\n\n # ----- Series ----- #\n\n # series.append\n res = pd.Series(vals1).append(pd.Series(vals2), ignore_index=True)\n exp = pd.Series(exp_data)\n tm.assert_series_equal(res, exp, check_index_type=True)\n\n # concat\n res = pd.concat([pd.Series(vals1), pd.Series(vals2)], ignore_index=True)\n tm.assert_series_equal(res, exp, check_index_type=True)\n\n # 3 elements\n res = pd.Series(vals1).append(\n [pd.Series(vals2), pd.Series(vals3)], ignore_index=True\n )\n exp = pd.Series(exp_data3)\n tm.assert_series_equal(res, exp)\n\n res = pd.concat(\n [pd.Series(vals1), pd.Series(vals2), pd.Series(vals3)],\n ignore_index=True,\n )\n tm.assert_series_equal(res, exp)\n\n # name mismatch\n s1 = pd.Series(vals1, name=\"x\")\n s2 = pd.Series(vals2, name=\"y\")\n res = s1.append(s2, ignore_index=True)\n exp = pd.Series(exp_data)\n tm.assert_series_equal(res, exp, check_index_type=True)\n\n res = pd.concat([s1, s2], ignore_index=True)\n tm.assert_series_equal(res, exp, check_index_type=True)\n\n # name match\n s1 = pd.Series(vals1, name=\"x\")\n s2 = pd.Series(vals2, name=\"x\")\n res = s1.append(s2, ignore_index=True)\n exp = pd.Series(exp_data, name=\"x\")\n tm.assert_series_equal(res, exp, check_index_type=True)\n\n res = pd.concat([s1, s2], ignore_index=True)\n tm.assert_series_equal(res, exp, check_index_type=True)\n\n # cannot append non-index\n msg = (\n r\"cannot concatenate object of type '.+'; \"\n \"only Series and DataFrame objs are valid\"\n )\n with pytest.raises(TypeError, match=msg):\n pd.Series(vals1).append(vals2)\n\n with pytest.raises(TypeError, match=msg):\n pd.Series(vals1).append([pd.Series(vals2), vals3])\n\n with pytest.raises(TypeError, match=msg):\n pd.concat([pd.Series(vals1), vals2])\n\n with pytest.raises(TypeError, match=msg):\n pd.concat([pd.Series(vals1), pd.Series(vals2), vals3])\n\n def test_concatlike_dtypes_coercion(self):\n # GH 13660\n for typ1, vals1 in self.data.items():\n for typ2, vals2 in self.data.items():\n\n vals3 = vals2\n\n # basically infer\n exp_index_dtype = None\n exp_series_dtype = None\n\n if typ1 == typ2:\n # same dtype is tested in test_concatlike_same_dtypes\n continue\n elif typ1 == \"category\" or typ2 == \"category\":\n # TODO: suspicious\n continue\n\n # specify expected dtype\n if typ1 == \"bool\" and typ2 in (\"int64\", \"float64\"):\n # series coerces to numeric based on numpy rule\n # index doesn't because bool is object dtype\n exp_series_dtype = typ2\n elif typ2 == \"bool\" and typ1 in (\"int64\", \"float64\"):\n exp_series_dtype = typ1\n elif (\n typ1 == \"datetime64[ns, US/Eastern]\"\n or typ2 == \"datetime64[ns, US/Eastern]\"\n or typ1 == \"timedelta64[ns]\"\n or typ2 == \"timedelta64[ns]\"\n ):\n exp_index_dtype = object\n exp_series_dtype = object\n\n exp_data = vals1 + vals2\n exp_data3 = vals1 + vals2 + vals3\n\n # ----- Index ----- #\n\n # index.append\n res = pd.Index(vals1).append(pd.Index(vals2))\n exp = pd.Index(exp_data, dtype=exp_index_dtype)\n tm.assert_index_equal(res, exp)\n\n # 3 elements\n res = pd.Index(vals1).append([pd.Index(vals2), pd.Index(vals3)])\n exp = pd.Index(exp_data3, dtype=exp_index_dtype)\n tm.assert_index_equal(res, exp)\n\n # ----- Series ----- #\n\n # series.append\n res = pd.Series(vals1).append(pd.Series(vals2), ignore_index=True)\n exp = pd.Series(exp_data, dtype=exp_series_dtype)\n tm.assert_series_equal(res, exp, check_index_type=True)\n\n # concat\n res = pd.concat([pd.Series(vals1), pd.Series(vals2)], ignore_index=True)\n tm.assert_series_equal(res, exp, check_index_type=True)\n\n # 3 elements\n res = pd.Series(vals1).append(\n [pd.Series(vals2), pd.Series(vals3)], ignore_index=True\n )\n exp = pd.Series(exp_data3, dtype=exp_series_dtype)\n tm.assert_series_equal(res, exp)\n\n res = pd.concat(\n [pd.Series(vals1), pd.Series(vals2), pd.Series(vals3)],\n ignore_index=True,\n )\n tm.assert_series_equal(res, exp)\n\n def test_concatlike_common_coerce_to_pandas_object(self):\n # GH 13626\n # result must be Timestamp/Timedelta, not datetime.datetime/timedelta\n dti = pd.DatetimeIndex([\"2011-01-01\", \"2011-01-02\"])\n tdi = pd.TimedeltaIndex([\"1 days\", \"2 days\"])\n\n exp = pd.Index(\n [\n pd.Timestamp(\"2011-01-01\"),\n pd.Timestamp(\"2011-01-02\"),\n pd.Timedelta(\"1 days\"),\n pd.Timedelta(\"2 days\"),\n ]\n )\n\n res = dti.append(tdi)\n tm.assert_index_equal(res, exp)\n assert isinstance(res[0], pd.Timestamp)\n assert isinstance(res[-1], pd.Timedelta)\n\n dts = pd.Series(dti)\n tds = pd.Series(tdi)\n res = dts.append(tds)\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n assert isinstance(res.iloc[0], pd.Timestamp)\n assert isinstance(res.iloc[-1], pd.Timedelta)\n\n res = pd.concat([dts, tds])\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n assert isinstance(res.iloc[0], pd.Timestamp)\n assert isinstance(res.iloc[-1], pd.Timedelta)\n\n def test_concatlike_datetimetz(self, tz_aware_fixture):\n tz = tz_aware_fixture\n # GH 7795\n dti1 = pd.DatetimeIndex([\"2011-01-01\", \"2011-01-02\"], tz=tz)\n dti2 = pd.DatetimeIndex([\"2012-01-01\", \"2012-01-02\"], tz=tz)\n\n exp = pd.DatetimeIndex(\n [\"2011-01-01\", \"2011-01-02\", \"2012-01-01\", \"2012-01-02\"], tz=tz\n )\n\n res = dti1.append(dti2)\n tm.assert_index_equal(res, exp)\n\n dts1 = pd.Series(dti1)\n dts2 = pd.Series(dti2)\n res = dts1.append(dts2)\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n\n res = pd.concat([dts1, dts2])\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n\n @pytest.mark.parametrize(\"tz\", [\"UTC\", \"US/Eastern\", \"Asia/Tokyo\", \"EST5EDT\"])\n def test_concatlike_datetimetz_short(self, tz):\n # GH#7795\n ix1 = pd.date_range(start=\"2014-07-15\", end=\"2014-07-17\", freq=\"D\", tz=tz)\n ix2 = pd.DatetimeIndex([\"2014-07-11\", \"2014-07-21\"], tz=tz)\n df1 = pd.DataFrame(0, index=ix1, columns=[\"A\", \"B\"])\n df2 = pd.DataFrame(0, index=ix2, columns=[\"A\", \"B\"])\n\n exp_idx = pd.DatetimeIndex(\n [\"2014-07-15\", \"2014-07-16\", \"2014-07-17\", \"2014-07-11\", \"2014-07-21\"],\n tz=tz,\n )\n exp = pd.DataFrame(0, index=exp_idx, columns=[\"A\", \"B\"])\n\n tm.assert_frame_equal(df1.append(df2), exp)\n tm.assert_frame_equal(pd.concat([df1, df2]), exp)\n\n def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):\n tz = tz_aware_fixture\n # GH 13660\n\n # different tz coerces to object\n dti1 = pd.DatetimeIndex([\"2011-01-01\", \"2011-01-02\"], tz=tz)\n dti2 = pd.DatetimeIndex([\"2012-01-01\", \"2012-01-02\"])\n\n exp = pd.Index(\n [\n pd.Timestamp(\"2011-01-01\", tz=tz),\n pd.Timestamp(\"2011-01-02\", tz=tz),\n pd.Timestamp(\"2012-01-01\"),\n pd.Timestamp(\"2012-01-02\"),\n ],\n dtype=object,\n )\n\n res = dti1.append(dti2)\n tm.assert_index_equal(res, exp)\n\n dts1 = pd.Series(dti1)\n dts2 = pd.Series(dti2)\n res = dts1.append(dts2)\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n\n res = pd.concat([dts1, dts2])\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n\n # different tz\n dti3 = pd.DatetimeIndex([\"2012-01-01\", \"2012-01-02\"], tz=\"US/Pacific\")\n\n exp = pd.Index(\n [\n pd.Timestamp(\"2011-01-01\", tz=tz),\n pd.Timestamp(\"2011-01-02\", tz=tz),\n pd.Timestamp(\"2012-01-01\", tz=\"US/Pacific\"),\n pd.Timestamp(\"2012-01-02\", tz=\"US/Pacific\"),\n ],\n dtype=object,\n )\n\n res = dti1.append(dti3)\n # tm.assert_index_equal(res, exp)\n\n dts1 = pd.Series(dti1)\n dts3 = pd.Series(dti3)\n res = dts1.append(dts3)\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n\n res = pd.concat([dts1, dts3])\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n\n def test_concatlike_common_period(self):\n # GH 13660\n pi1 = pd.PeriodIndex([\"2011-01\", \"2011-02\"], freq=\"M\")\n pi2 = pd.PeriodIndex([\"2012-01\", \"2012-02\"], freq=\"M\")\n\n exp = pd.PeriodIndex([\"2011-01\", \"2011-02\", \"2012-01\", \"2012-02\"], freq=\"M\")\n\n res = pi1.append(pi2)\n tm.assert_index_equal(res, exp)\n\n ps1 = pd.Series(pi1)\n ps2 = pd.Series(pi2)\n res = ps1.append(ps2)\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n\n res = pd.concat([ps1, ps2])\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n\n def test_concatlike_common_period_diff_freq_to_object(self):\n # GH 13221\n pi1 = pd.PeriodIndex([\"2011-01\", \"2011-02\"], freq=\"M\")\n pi2 = pd.PeriodIndex([\"2012-01-01\", \"2012-02-01\"], freq=\"D\")\n\n exp = pd.Index(\n [\n pd.Period(\"2011-01\", freq=\"M\"),\n pd.Period(\"2011-02\", freq=\"M\"),\n pd.Period(\"2012-01-01\", freq=\"D\"),\n pd.Period(\"2012-02-01\", freq=\"D\"),\n ],\n dtype=object,\n )\n\n res = pi1.append(pi2)\n tm.assert_index_equal(res, exp)\n\n ps1 = pd.Series(pi1)\n ps2 = pd.Series(pi2)\n res = ps1.append(ps2)\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n\n res = pd.concat([ps1, ps2])\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n\n def test_concatlike_common_period_mixed_dt_to_object(self):\n # GH 13221\n # different datetimelike\n pi1 = pd.PeriodIndex([\"2011-01\", \"2011-02\"], freq=\"M\")\n tdi = pd.TimedeltaIndex([\"1 days\", \"2 days\"])\n exp = pd.Index(\n [\n pd.Period(\"2011-01\", freq=\"M\"),\n pd.Period(\"2011-02\", freq=\"M\"),\n pd.Timedelta(\"1 days\"),\n pd.Timedelta(\"2 days\"),\n ],\n dtype=object,\n )\n\n res = pi1.append(tdi)\n tm.assert_index_equal(res, exp)\n\n ps1 = pd.Series(pi1)\n tds = pd.Series(tdi)\n res = ps1.append(tds)\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n\n res = pd.concat([ps1, tds])\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n\n # inverse\n exp = pd.Index(\n [\n pd.Timedelta(\"1 days\"),\n pd.Timedelta(\"2 days\"),\n pd.Period(\"2011-01\", freq=\"M\"),\n pd.Period(\"2011-02\", freq=\"M\"),\n ],\n dtype=object,\n )\n\n res = tdi.append(pi1)\n tm.assert_index_equal(res, exp)\n\n ps1 = pd.Series(pi1)\n tds = pd.Series(tdi)\n res = tds.append(ps1)\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n\n res = pd.concat([tds, ps1])\n tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))\n\n def test_concat_categorical(self):\n # GH 13524\n\n # same categories -> category\n s1 = pd.Series([1, 2, np.nan], dtype=\"category\")\n s2 = pd.Series([2, 1, 2], dtype=\"category\")\n\n exp = pd.Series([1, 2, np.nan, 2, 1, 2], dtype=\"category\")\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)\n\n # partially different categories => not-category\n s1 = pd.Series([3, 2], dtype=\"category\")\n s2 = pd.Series([2, 1], dtype=\"category\")\n\n exp = pd.Series([3, 2, 2, 1])\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)\n\n # completely different categories (same dtype) => not-category\n s1 = pd.Series([10, 11, np.nan], dtype=\"category\")\n s2 = pd.Series([np.nan, 1, 3, 2], dtype=\"category\")\n\n exp = pd.Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype=\"object\")\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)\n\n def test_union_categorical_same_categories_different_order(self):\n # https://github.com/pandas-dev/pandas/issues/19096\n a = pd.Series(Categorical([\"a\", \"b\", \"c\"], categories=[\"a\", \"b\", \"c\"]))\n b = pd.Series(Categorical([\"a\", \"b\", \"c\"], categories=[\"b\", \"a\", \"c\"]))\n result = pd.concat([a, b], ignore_index=True)\n expected = pd.Series(\n Categorical([\"a\", \"b\", \"c\", \"a\", \"b\", \"c\"], categories=[\"a\", \"b\", \"c\"])\n )\n tm.assert_series_equal(result, expected)\n\n def test_concat_categorical_coercion(self):\n # GH 13524\n\n # category + not-category => not-category\n s1 = pd.Series([1, 2, np.nan], dtype=\"category\")\n s2 = pd.Series([2, 1, 2])\n\n exp = pd.Series([1, 2, np.nan, 2, 1, 2], dtype=\"object\")\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)\n\n # result shouldn't be affected by 1st elem dtype\n exp = pd.Series([2, 1, 2, 1, 2, np.nan], dtype=\"object\")\n tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)\n tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)\n\n # all values are not in category => not-category\n s1 = pd.Series([3, 2], dtype=\"category\")\n s2 = pd.Series([2, 1])\n\n exp = pd.Series([3, 2, 2, 1])\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)\n\n exp = pd.Series([2, 1, 3, 2])\n tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)\n tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)\n\n # completely different categories => not-category\n s1 = pd.Series([10, 11, np.nan], dtype=\"category\")\n s2 = pd.Series([1, 3, 2])\n\n exp = pd.Series([10, 11, np.nan, 1, 3, 2], dtype=\"object\")\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)\n\n exp = pd.Series([1, 3, 2, 10, 11, np.nan], dtype=\"object\")\n tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)\n tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)\n\n # different dtype => not-category\n s1 = pd.Series([10, 11, np.nan], dtype=\"category\")\n s2 = pd.Series([\"a\", \"b\", \"c\"])\n\n exp = pd.Series([10, 11, np.nan, \"a\", \"b\", \"c\"])\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)\n\n exp = pd.Series([\"a\", \"b\", \"c\", 10, 11, np.nan])\n tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)\n tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)\n\n # if normal series only contains NaN-likes => not-category\n s1 = pd.Series([10, 11], dtype=\"category\")\n s2 = pd.Series([np.nan, np.nan, np.nan])\n\n exp = pd.Series([10, 11, np.nan, np.nan, np.nan])\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)\n\n exp = pd.Series([np.nan, np.nan, np.nan, 10, 11])\n tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)\n tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)\n\n def test_concat_categorical_3elem_coercion(self):\n # GH 13524\n\n # mixed dtypes => not-category\n s1 = pd.Series([1, 2, np.nan], dtype=\"category\")\n s2 = pd.Series([2, 1, 2], dtype=\"category\")\n s3 = pd.Series([1, 2, 1, 2, np.nan])\n\n exp = pd.Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype=\"float\")\n tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)\n tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)\n\n exp = pd.Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype=\"float\")\n tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)\n\n # values are all in either category => not-category\n s1 = pd.Series([4, 5, 6], dtype=\"category\")\n s2 = pd.Series([1, 2, 3], dtype=\"category\")\n s3 = pd.Series([1, 3, 4])\n\n exp = pd.Series([4, 5, 6, 1, 2, 3, 1, 3, 4])\n tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)\n tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)\n\n exp = pd.Series([1, 3, 4, 4, 5, 6, 1, 2, 3])\n tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)\n\n # values are all in either category => not-category\n s1 = pd.Series([4, 5, 6], dtype=\"category\")\n s2 = pd.Series([1, 2, 3], dtype=\"category\")\n s3 = pd.Series([10, 11, 12])\n\n exp = pd.Series([4, 5, 6, 1, 2, 3, 10, 11, 12])\n tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)\n tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)\n\n exp = pd.Series([10, 11, 12, 4, 5, 6, 1, 2, 3])\n tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)\n\n def test_concat_categorical_multi_coercion(self):\n # GH 13524\n\n s1 = pd.Series([1, 3], dtype=\"category\")\n s2 = pd.Series([3, 4], dtype=\"category\")\n s3 = pd.Series([2, 3])\n s4 = pd.Series([2, 2], dtype=\"category\")\n s5 = pd.Series([1, np.nan])\n s6 = pd.Series([1, 3, 2], dtype=\"category\")\n\n # mixed dtype, values are all in categories => not-category\n exp = pd.Series([1, 3, 3, 4, 2, 3, 2, 2, 1, np.nan, 1, 3, 2])\n res = pd.concat([s1, s2, s3, s4, s5, s6], ignore_index=True)\n tm.assert_series_equal(res, exp)\n res = s1.append([s2, s3, s4, s5, s6], ignore_index=True)\n tm.assert_series_equal(res, exp)\n\n exp = pd.Series([1, 3, 2, 1, np.nan, 2, 2, 2, 3, 3, 4, 1, 3])\n res = pd.concat([s6, s5, s4, s3, s2, s1], ignore_index=True)\n tm.assert_series_equal(res, exp)\n res = s6.append([s5, s4, s3, s2, s1], ignore_index=True)\n tm.assert_series_equal(res, exp)\n\n def test_concat_categorical_ordered(self):\n # GH 13524\n\n s1 = pd.Series(pd.Categorical([1, 2, np.nan], ordered=True))\n s2 = pd.Series(pd.Categorical([2, 1, 2], ordered=True))\n\n exp = pd.Series(pd.Categorical([1, 2, np.nan, 2, 1, 2], ordered=True))\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)\n\n exp = pd.Series(\n pd.Categorical([1, 2, np.nan, 2, 1, 2, 1, 2, np.nan], ordered=True)\n )\n tm.assert_series_equal(pd.concat([s1, s2, s1], ignore_index=True), exp)\n tm.assert_series_equal(s1.append([s2, s1], ignore_index=True), exp)\n\n def test_concat_categorical_coercion_nan(self):\n # GH 13524\n\n # some edge cases\n # category + not-category => not category\n s1 = pd.Series(np.array([np.nan, np.nan], dtype=np.float64), dtype=\"category\")\n s2 = pd.Series([np.nan, 1])\n\n exp = pd.Series([np.nan, np.nan, np.nan, 1])\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)\n\n s1 = pd.Series([1, np.nan], dtype=\"category\")\n s2 = pd.Series([np.nan, np.nan])\n\n exp = pd.Series([1, np.nan, np.nan, np.nan], dtype=\"float\")\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)\n\n # mixed dtype, all nan-likes => not-category\n s1 = pd.Series([np.nan, np.nan], dtype=\"category\")\n s2 = pd.Series([np.nan, np.nan])\n\n exp = pd.Series([np.nan, np.nan, np.nan, np.nan])\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)\n tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)\n tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)\n\n # all category nan-likes => category\n s1 = pd.Series([np.nan, np.nan], dtype=\"category\")\n s2 = pd.Series([np.nan, np.nan], dtype=\"category\")\n\n exp = pd.Series([np.nan, np.nan, np.nan, np.nan], dtype=\"category\")\n\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)\n\n def test_concat_categorical_empty(self):\n # GH 13524\n\n s1 = pd.Series([], dtype=\"category\")\n s2 = pd.Series([1, 2], dtype=\"category\")\n\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), s2)\n\n tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)\n tm.assert_series_equal(s2.append(s1, ignore_index=True), s2)\n\n s1 = pd.Series([], dtype=\"category\")\n s2 = pd.Series([], dtype=\"category\")\n\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), s2)\n\n s1 = pd.Series([], dtype=\"category\")\n s2 = pd.Series([], dtype=\"object\")\n\n # different dtype => not-category\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), s2)\n tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)\n tm.assert_series_equal(s2.append(s1, ignore_index=True), s2)\n\n s1 = pd.Series([], dtype=\"category\")\n s2 = pd.Series([np.nan, np.nan])\n\n # empty Series is ignored\n exp = pd.Series([np.nan, np.nan])\n tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)\n tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)\n\n tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)\n tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)\n\n\nclass TestAppend:\n def test_append(self, sort, float_frame):\n mixed_frame = float_frame.copy()\n mixed_frame[\"foo\"] = \"bar\"\n\n begin_index = float_frame.index[:5]\n end_index = float_frame.index[5:]\n\n begin_frame = float_frame.reindex(begin_index)\n end_frame = float_frame.reindex(end_index)\n\n appended = begin_frame.append(end_frame)\n tm.assert_almost_equal(appended[\"A\"], float_frame[\"A\"])\n\n del end_frame[\"A\"]\n partial_appended = begin_frame.append(end_frame, sort=sort)\n assert \"A\" in partial_appended\n\n partial_appended = end_frame.append(begin_frame, sort=sort)\n assert \"A\" in partial_appended\n\n # mixed type handling\n appended = mixed_frame[:5].append(mixed_frame[5:])\n tm.assert_frame_equal(appended, mixed_frame)\n\n # what to test here\n mixed_appended = mixed_frame[:5].append(float_frame[5:], sort=sort)\n mixed_appended2 = float_frame[:5].append(mixed_frame[5:], sort=sort)\n\n # all equal except 'foo' column\n tm.assert_frame_equal(\n mixed_appended.reindex(columns=[\"A\", \"B\", \"C\", \"D\"]),\n mixed_appended2.reindex(columns=[\"A\", \"B\", \"C\", \"D\"]),\n )\n\n def test_append_empty(self, float_frame):\n empty = DataFrame()\n\n appended = float_frame.append(empty)\n tm.assert_frame_equal(float_frame, appended)\n assert appended is not float_frame\n\n appended = empty.append(float_frame)\n tm.assert_frame_equal(float_frame, appended)\n assert appended is not float_frame\n\n def test_append_overlap_raises(self, float_frame):\n msg = \"Indexes have overlapping values\"\n with pytest.raises(ValueError, match=msg):\n float_frame.append(float_frame, verify_integrity=True)\n\n def test_append_new_columns(self):\n # see gh-6129: new columns\n df = DataFrame({\"a\": {\"x\": 1, \"y\": 2}, \"b\": {\"x\": 3, \"y\": 4}})\n row = Series([5, 6, 7], index=[\"a\", \"b\", \"c\"], name=\"z\")\n expected = DataFrame(\n {\n \"a\": {\"x\": 1, \"y\": 2, \"z\": 5},\n \"b\": {\"x\": 3, \"y\": 4, \"z\": 6},\n \"c\": {\"z\": 7},\n }\n )\n result = df.append(row)\n tm.assert_frame_equal(result, expected)\n\n def test_append_length0_frame(self, sort):\n df = DataFrame(columns=[\"A\", \"B\", \"C\"])\n df3 = DataFrame(index=[0, 1], columns=[\"A\", \"B\"])\n df5 = df.append(df3, sort=sort)\n\n expected = DataFrame(index=[0, 1], columns=[\"A\", \"B\", \"C\"])\n tm.assert_frame_equal(df5, expected)\n\n def test_append_records(self):\n arr1 = np.zeros((2,), dtype=(\"i4,f4,a10\"))\n arr1[:] = [(1, 2.0, \"Hello\"), (2, 3.0, \"World\")]\n\n arr2 = np.zeros((3,), dtype=(\"i4,f4,a10\"))\n arr2[:] = [(3, 4.0, \"foo\"), (5, 6.0, \"bar\"), (7.0, 8.0, \"baz\")]\n\n df1 = DataFrame(arr1)\n df2 = DataFrame(arr2)\n\n result = df1.append(df2, ignore_index=True)\n expected = DataFrame(np.concatenate((arr1, arr2)))\n tm.assert_frame_equal(result, expected)\n\n # rewrite sort fixture, since we also want to test default of None\n def test_append_sorts(self, sort):\n df1 = pd.DataFrame({\"a\": [1, 2], \"b\": [1, 2]}, columns=[\"b\", \"a\"])\n df2 = pd.DataFrame({\"a\": [1, 2], \"c\": [3, 4]}, index=[2, 3])\n\n with tm.assert_produces_warning(None):\n result = df1.append(df2, sort=sort)\n\n # for None / True\n expected = pd.DataFrame(\n {\"b\": [1, 2, None, None], \"a\": [1, 2, 1, 2], \"c\": [None, None, 3, 4]},\n columns=[\"a\", \"b\", \"c\"],\n )\n if sort is False:\n expected = expected[[\"b\", \"a\", \"c\"]]\n tm.assert_frame_equal(result, expected)\n\n def test_append_different_columns(self, sort):\n df = DataFrame(\n {\n \"bools\": np.random.randn(10) > 0,\n \"ints\": np.random.randint(0, 10, 10),\n \"floats\": np.random.randn(10),\n \"strings\": [\"foo\", \"bar\"] * 5,\n }\n )\n\n a = df[:5].loc[:, [\"bools\", \"ints\", \"floats\"]]\n b = df[5:].loc[:, [\"strings\", \"ints\", \"floats\"]]\n\n appended = a.append(b, sort=sort)\n assert isna(appended[\"strings\"][0:4]).all()\n assert isna(appended[\"bools\"][5:]).all()\n\n def test_append_many(self, sort, float_frame):\n chunks = [\n float_frame[:5],\n float_frame[5:10],\n float_frame[10:15],\n float_frame[15:],\n ]\n\n result = chunks[0].append(chunks[1:])\n tm.assert_frame_equal(result, float_frame)\n\n chunks[-1] = chunks[-1].copy()\n chunks[-1][\"foo\"] = \"bar\"\n result = chunks[0].append(chunks[1:], sort=sort)\n tm.assert_frame_equal(result.loc[:, float_frame.columns], float_frame)\n assert (result[\"foo\"][15:] == \"bar\").all()\n assert result[\"foo\"][:15].isna().all()\n\n def test_append_preserve_index_name(self):\n # #980\n df1 = DataFrame(columns=[\"A\", \"B\", \"C\"])\n df1 = df1.set_index([\"A\"])\n df2 = DataFrame(data=[[1, 4, 7], [2, 5, 8], [3, 6, 9]], columns=[\"A\", \"B\", \"C\"])\n df2 = df2.set_index([\"A\"])\n\n result = df1.append(df2)\n assert result.index.name == \"A\"\n\n indexes_can_append = [\n pd.RangeIndex(3),\n pd.Index([4, 5, 6]),\n pd.Index([4.5, 5.5, 6.5]),\n pd.Index(list(\"abc\")),\n pd.CategoricalIndex(\"A B C\".split()),\n pd.CategoricalIndex(\"D E F\".split(), ordered=True),\n pd.IntervalIndex.from_breaks([7, 8, 9, 10]),\n pd.DatetimeIndex(\n [\n dt.datetime(2013, 1, 3, 0, 0),\n dt.datetime(2013, 1, 3, 6, 10),\n dt.datetime(2013, 1, 3, 7, 12),\n ]\n ),\n ]\n\n indexes_cannot_append_with_other = [\n pd.MultiIndex.from_arrays([\"A B C\".split(), \"D E F\".split()])\n ]\n\n all_indexes = indexes_can_append + indexes_cannot_append_with_other\n\n @pytest.mark.parametrize(\"index\", all_indexes, ids=lambda x: type(x).__name__)\n def test_append_same_columns_type(self, index):\n # GH18359\n\n # df wider than ser\n df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=index)\n ser_index = index[:2]\n ser = pd.Series([7, 8], index=ser_index, name=2)\n result = df.append(ser)\n expected = pd.DataFrame(\n [[1.0, 2.0, 3.0], [4, 5, 6], [7, 8, np.nan]], index=[0, 1, 2], columns=index\n )\n tm.assert_frame_equal(result, expected)\n\n # ser wider than df\n ser_index = index\n index = index[:2]\n df = pd.DataFrame([[1, 2], [4, 5]], columns=index)\n ser = pd.Series([7, 8, 9], index=ser_index, name=2)\n result = df.append(ser)\n expected = pd.DataFrame(\n [[1, 2, np.nan], [4, 5, np.nan], [7, 8, 9]],\n index=[0, 1, 2],\n columns=ser_index,\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"df_columns, series_index\",\n combinations(indexes_can_append, r=2),\n ids=lambda x: type(x).__name__,\n )\n def test_append_different_columns_types(self, df_columns, series_index):\n # GH18359\n # See also test 'test_append_different_columns_types_raises' below\n # for errors raised when appending\n\n df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=df_columns)\n ser = pd.Series([7, 8, 9], index=series_index, name=2)\n\n result = df.append(ser)\n idx_diff = ser.index.difference(df_columns)\n combined_columns = Index(df_columns.tolist()).append(idx_diff)\n expected = pd.DataFrame(\n [\n [1.0, 2.0, 3.0, np.nan, np.nan, np.nan],\n [4, 5, 6, np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan, 7, 8, 9],\n ],\n index=[0, 1, 2],\n columns=combined_columns,\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"index_can_append\", indexes_can_append, ids=lambda x: type(x).__name__\n )\n @pytest.mark.parametrize(\n \"index_cannot_append_with_other\",\n indexes_cannot_append_with_other,\n ids=lambda x: type(x).__name__,\n )\n def test_append_different_columns_types_raises(\n self, index_can_append, index_cannot_append_with_other\n ):\n # GH18359\n # Dataframe.append will raise if MultiIndex appends\n # or is appended to a different index type\n #\n # See also test 'test_append_different_columns_types' above for\n # appending without raising.\n\n df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=index_can_append)\n ser = pd.Series([7, 8, 9], index=index_cannot_append_with_other, name=2)\n msg = (\n r\"Expected tuple, got (int|long|float|str|\"\n r\"pandas._libs.interval.Interval)|\"\n r\"object of type '(int|float|Timestamp|\"\n r\"pandas._libs.interval.Interval)' has no len\\(\\)|\"\n )\n with pytest.raises(TypeError, match=msg):\n df.append(ser)\n\n df = pd.DataFrame(\n [[1, 2, 3], [4, 5, 6]], columns=index_cannot_append_with_other\n )\n ser = pd.Series([7, 8, 9], index=index_can_append, name=2)\n\n with pytest.raises(TypeError, match=msg):\n df.append(ser)\n\n def test_append_dtype_coerce(self, sort):\n\n # GH 4993\n # appending with datetime will incorrectly convert datetime64\n\n df1 = DataFrame(\n index=[1, 2],\n data=[dt.datetime(2013, 1, 1, 0, 0), dt.datetime(2013, 1, 2, 0, 0)],\n columns=[\"start_time\"],\n )\n df2 = DataFrame(\n index=[4, 5],\n data=[\n [dt.datetime(2013, 1, 3, 0, 0), dt.datetime(2013, 1, 3, 6, 10)],\n [dt.datetime(2013, 1, 4, 0, 0), dt.datetime(2013, 1, 4, 7, 10)],\n ],\n columns=[\"start_time\", \"end_time\"],\n )\n\n expected = concat(\n [\n Series(\n [\n pd.NaT,\n pd.NaT,\n dt.datetime(2013, 1, 3, 6, 10),\n dt.datetime(2013, 1, 4, 7, 10),\n ],\n name=\"end_time\",\n ),\n Series(\n [\n dt.datetime(2013, 1, 1, 0, 0),\n dt.datetime(2013, 1, 2, 0, 0),\n dt.datetime(2013, 1, 3, 0, 0),\n dt.datetime(2013, 1, 4, 0, 0),\n ],\n name=\"start_time\",\n ),\n ],\n axis=1,\n sort=sort,\n )\n result = df1.append(df2, ignore_index=True, sort=sort)\n if sort:\n expected = expected[[\"end_time\", \"start_time\"]]\n else:\n expected = expected[[\"start_time\", \"end_time\"]]\n\n tm.assert_frame_equal(result, expected)\n\n def test_append_missing_column_proper_upcast(self, sort):\n df1 = DataFrame({\"A\": np.array([1, 2, 3, 4], dtype=\"i8\")})\n df2 = DataFrame({\"B\": np.array([True, False, True, False], dtype=bool)})\n\n appended = df1.append(df2, ignore_index=True, sort=sort)\n assert appended[\"A\"].dtype == \"f8\"\n assert appended[\"B\"].dtype == \"O\"\n\n def test_append_empty_frame_to_series_with_dateutil_tz(self):\n # GH 23682\n date = Timestamp(\"2018-10-24 07:30:00\", tz=dateutil.tz.tzutc())\n s = Series({\"date\": date, \"a\": 1.0, \"b\": 2.0})\n df = DataFrame(columns=[\"c\", \"d\"])\n result = df.append(s, ignore_index=True)\n # n.b. it's not clear to me that expected is correct here.\n # It's possible that the `date` column should have\n # datetime64[ns, tz] dtype for both result and expected.\n # that would be more consistent with new columns having\n # their own dtype (float for a and b, datetime64ns, tz for date).\n expected = DataFrame(\n [[np.nan, np.nan, 1.0, 2.0, date]],\n columns=[\"c\", \"d\", \"a\", \"b\", \"date\"],\n dtype=object,\n )\n # These columns get cast to object after append\n expected[\"a\"] = expected[\"a\"].astype(float)\n expected[\"b\"] = expected[\"b\"].astype(float)\n tm.assert_frame_equal(result, expected)\n\n\nclass TestConcatenate:\n def test_concat_copy(self):\n df = DataFrame(np.random.randn(4, 3))\n df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))\n df3 = DataFrame({5: \"foo\"}, index=range(4))\n\n # These are actual copies.\n result = concat([df, df2, df3], axis=1, copy=True)\n\n for b in result._mgr.blocks:\n assert b.values.base is None\n\n # These are the same.\n result = concat([df, df2, df3], axis=1, copy=False)\n\n for b in result._mgr.blocks:\n if b.is_float:\n assert b.values.base is df._mgr.blocks[0].values.base\n elif b.is_integer:\n assert b.values.base is df2._mgr.blocks[0].values.base\n elif b.is_object:\n assert b.values.base is not None\n\n # Float block was consolidated.\n df4 = DataFrame(np.random.randn(4, 1))\n result = concat([df, df2, df3, df4], axis=1, copy=False)\n for b in result._mgr.blocks:\n if b.is_float:\n assert b.values.base is None\n elif b.is_integer:\n assert b.values.base is df2._mgr.blocks[0].values.base\n elif b.is_object:\n assert b.values.base is not None\n\n def test_concat_with_group_keys(self):\n df = DataFrame(np.random.randn(4, 3))\n df2 = DataFrame(np.random.randn(4, 4))\n\n # axis=0\n df = DataFrame(np.random.randn(3, 4))\n df2 = DataFrame(np.random.randn(4, 4))\n\n result = concat([df, df2], keys=[0, 1])\n exp_index = MultiIndex.from_arrays(\n [[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]\n )\n expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)\n tm.assert_frame_equal(result, expected)\n\n result = concat([df, df], keys=[0, 1])\n exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])\n expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)\n tm.assert_frame_equal(result, expected)\n\n # axis=1\n df = DataFrame(np.random.randn(4, 3))\n df2 = DataFrame(np.random.randn(4, 4))\n\n result = concat([df, df2], keys=[0, 1], axis=1)\n expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)\n tm.assert_frame_equal(result, expected)\n\n result = concat([df, df], keys=[0, 1], axis=1)\n expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)\n tm.assert_frame_equal(result, expected)\n\n def test_concat_keys_specific_levels(self):\n df = DataFrame(np.random.randn(10, 4))\n pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]\n level = [\"three\", \"two\", \"one\", \"zero\"]\n result = concat(\n pieces,\n axis=1,\n keys=[\"one\", \"two\", \"three\"],\n levels=[level],\n names=[\"group_key\"],\n )\n\n tm.assert_index_equal(result.columns.levels[0], Index(level, name=\"group_key\"))\n tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))\n\n assert result.columns.names == [\"group_key\", None]\n\n def test_concat_dataframe_keys_bug(self, sort):\n t1 = DataFrame(\n {\"value\": Series([1, 2, 3], index=Index([\"a\", \"b\", \"c\"], name=\"id\"))}\n )\n t2 = DataFrame({\"value\": Series([7, 8], index=Index([\"a\", \"b\"], name=\"id\"))})\n\n # it works\n result = concat([t1, t2], axis=1, keys=[\"t1\", \"t2\"], sort=sort)\n assert list(result.columns) == [(\"t1\", \"value\"), (\"t2\", \"value\")]\n\n def test_concat_series_partial_columns_names(self):\n # GH10698\n foo = Series([1, 2], name=\"foo\")\n bar = Series([1, 2])\n baz = Series([4, 5])\n\n result = concat([foo, bar, baz], axis=1)\n expected = DataFrame(\n {\"foo\": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=[\"foo\", 0, 1]\n )\n tm.assert_frame_equal(result, expected)\n\n result = concat([foo, bar, baz], axis=1, keys=[\"red\", \"blue\", \"yellow\"])\n expected = DataFrame(\n {\"red\": [1, 2], \"blue\": [1, 2], \"yellow\": [4, 5]},\n columns=[\"red\", \"blue\", \"yellow\"],\n )\n tm.assert_frame_equal(result, expected)\n\n result = concat([foo, bar, baz], axis=1, ignore_index=True)\n expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"mapping\", [\"mapping\", \"dict\"])\n def test_concat_mapping(self, mapping, non_dict_mapping_subclass):\n constructor = dict if mapping == \"dict\" else non_dict_mapping_subclass\n frames = constructor(\n {\n \"foo\": DataFrame(np.random.randn(4, 3)),\n \"bar\": DataFrame(np.random.randn(4, 3)),\n \"baz\": DataFrame(np.random.randn(4, 3)),\n \"qux\": DataFrame(np.random.randn(4, 3)),\n }\n )\n\n sorted_keys = list(frames.keys())\n\n result = concat(frames)\n expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)\n tm.assert_frame_equal(result, expected)\n\n result = concat(frames, axis=1)\n expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)\n tm.assert_frame_equal(result, expected)\n\n keys = [\"baz\", \"foo\", \"bar\"]\n result = concat(frames, keys=keys)\n expected = concat([frames[k] for k in keys], keys=keys)\n tm.assert_frame_equal(result, expected)\n\n def test_concat_ignore_index(self, sort):\n frame1 = DataFrame(\n {\"test1\": [\"a\", \"b\", \"c\"], \"test2\": [1, 2, 3], \"test3\": [4.5, 3.2, 1.2]}\n )\n frame2 = DataFrame({\"test3\": [5.2, 2.2, 4.3]})\n frame1.index = Index([\"x\", \"y\", \"z\"])\n frame2.index = Index([\"x\", \"y\", \"q\"])\n\n v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)\n\n nan = np.nan\n expected = DataFrame(\n [\n [nan, nan, nan, 4.3],\n [\"a\", 1, 4.5, 5.2],\n [\"b\", 2, 3.2, 2.2],\n [\"c\", 3, 1.2, nan],\n ],\n index=Index([\"q\", \"x\", \"y\", \"z\"]),\n )\n if not sort:\n expected = expected.loc[[\"x\", \"y\", \"z\", \"q\"]]\n\n tm.assert_frame_equal(v1, expected)\n\n def test_concat_multiindex_with_keys(self):\n index = MultiIndex(\n levels=[[\"foo\", \"bar\", \"baz\", \"qux\"], [\"one\", \"two\", \"three\"]],\n codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=[\"first\", \"second\"],\n )\n frame = DataFrame(\n np.random.randn(10, 3),\n index=index,\n columns=Index([\"A\", \"B\", \"C\"], name=\"exp\"),\n )\n result = concat([frame, frame], keys=[0, 1], names=[\"iteration\"])\n\n assert result.index.names == (\"iteration\",) + index.names\n tm.assert_frame_equal(result.loc[0], frame)\n tm.assert_frame_equal(result.loc[1], frame)\n assert result.index.nlevels == 3\n\n def test_concat_multiindex_with_tz(self):\n # GH 6606\n df = DataFrame(\n {\n \"dt\": [\n datetime(2014, 1, 1),\n datetime(2014, 1, 2),\n datetime(2014, 1, 3),\n ],\n \"b\": [\"A\", \"B\", \"C\"],\n \"c\": [1, 2, 3],\n \"d\": [4, 5, 6],\n }\n )\n df[\"dt\"] = df[\"dt\"].apply(lambda d: Timestamp(d, tz=\"US/Pacific\"))\n df = df.set_index([\"dt\", \"b\"])\n\n exp_idx1 = DatetimeIndex(\n [\"2014-01-01\", \"2014-01-02\", \"2014-01-03\"] * 2, tz=\"US/Pacific\", name=\"dt\"\n )\n exp_idx2 = Index([\"A\", \"B\", \"C\"] * 2, name=\"b\")\n exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])\n expected = DataFrame(\n {\"c\": [1, 2, 3] * 2, \"d\": [4, 5, 6] * 2}, index=exp_idx, columns=[\"c\", \"d\"]\n )\n\n result = concat([df, df])\n tm.assert_frame_equal(result, expected)\n\n def test_concat_multiindex_with_none_in_index_names(self):\n # GH 15787\n index = pd.MultiIndex.from_product([[1], range(5)], names=[\"level1\", None])\n df = pd.DataFrame({\"col\": range(5)}, index=index, dtype=np.int32)\n\n result = concat([df, df], keys=[1, 2], names=[\"level2\"])\n index = pd.MultiIndex.from_product(\n [[1, 2], [1], range(5)], names=[\"level2\", \"level1\", None]\n )\n expected = pd.DataFrame(\n {\"col\": list(range(5)) * 2}, index=index, dtype=np.int32\n )\n tm.assert_frame_equal(result, expected)\n\n result = concat([df, df[:2]], keys=[1, 2], names=[\"level2\"])\n level2 = [1] * 5 + [2] * 2\n level1 = [1] * 7\n no_name = list(range(5)) + list(range(2))\n tuples = list(zip(level2, level1, no_name))\n index = pd.MultiIndex.from_tuples(tuples, names=[\"level2\", \"level1\", None])\n expected = pd.DataFrame({\"col\": no_name}, index=index, dtype=np.int32)\n tm.assert_frame_equal(result, expected)\n\n def test_concat_keys_and_levels(self):\n df = DataFrame(np.random.randn(1, 3))\n df2 = DataFrame(np.random.randn(1, 4))\n\n levels = [[\"foo\", \"baz\"], [\"one\", \"two\"]]\n names = [\"first\", \"second\"]\n result = concat(\n [df, df2, df, df2],\n keys=[(\"foo\", \"one\"), (\"foo\", \"two\"), (\"baz\", \"one\"), (\"baz\", \"two\")],\n levels=levels,\n names=names,\n )\n expected = concat([df, df2, df, df2])\n exp_index = MultiIndex(\n levels=levels + [[0]],\n codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],\n names=names + [None],\n )\n expected.index = exp_index\n\n tm.assert_frame_equal(result, expected)\n\n # no names\n result = concat(\n [df, df2, df, df2],\n keys=[(\"foo\", \"one\"), (\"foo\", \"two\"), (\"baz\", \"one\"), (\"baz\", \"two\")],\n levels=levels,\n )\n assert result.index.names == (None,) * 3\n\n # no levels\n result = concat(\n [df, df2, df, df2],\n keys=[(\"foo\", \"one\"), (\"foo\", \"two\"), (\"baz\", \"one\"), (\"baz\", \"two\")],\n names=[\"first\", \"second\"],\n )\n assert result.index.names == (\"first\", \"second\", None)\n tm.assert_index_equal(\n result.index.levels[0], Index([\"baz\", \"foo\"], name=\"first\")\n )\n\n def test_concat_keys_levels_no_overlap(self):\n # GH #1406\n df = DataFrame(np.random.randn(1, 3), index=[\"a\"])\n df2 = DataFrame(np.random.randn(1, 4), index=[\"b\"])\n\n msg = \"Values not found in passed level\"\n with pytest.raises(ValueError, match=msg):\n concat([df, df], keys=[\"one\", \"two\"], levels=[[\"foo\", \"bar\", \"baz\"]])\n\n msg = \"Key one not in level\"\n with pytest.raises(ValueError, match=msg):\n concat([df, df2], keys=[\"one\", \"two\"], levels=[[\"foo\", \"bar\", \"baz\"]])\n\n def test_concat_rename_index(self):\n a = DataFrame(\n np.random.rand(3, 3),\n columns=list(\"ABC\"),\n index=Index(list(\"abc\"), name=\"index_a\"),\n )\n b = DataFrame(\n np.random.rand(3, 3),\n columns=list(\"ABC\"),\n index=Index(list(\"abc\"), name=\"index_b\"),\n )\n\n result = concat([a, b], keys=[\"key0\", \"key1\"], names=[\"lvl0\", \"lvl1\"])\n\n exp = concat([a, b], keys=[\"key0\", \"key1\"], names=[\"lvl0\"])\n names = list(exp.index.names)\n names[1] = \"lvl1\"\n exp.index.set_names(names, inplace=True)\n\n tm.assert_frame_equal(result, exp)\n assert result.index.names == exp.index.names\n\n def test_crossed_dtypes_weird_corner(self):\n columns = [\"A\", \"B\", \"C\", \"D\"]\n df1 = DataFrame(\n {\n \"A\": np.array([1, 2, 3, 4], dtype=\"f8\"),\n \"B\": np.array([1, 2, 3, 4], dtype=\"i8\"),\n \"C\": np.array([1, 2, 3, 4], dtype=\"f8\"),\n \"D\": np.array([1, 2, 3, 4], dtype=\"i8\"),\n },\n columns=columns,\n )\n\n df2 = DataFrame(\n {\n \"A\": np.array([1, 2, 3, 4], dtype=\"i8\"),\n \"B\": np.array([1, 2, 3, 4], dtype=\"f8\"),\n \"C\": np.array([1, 2, 3, 4], dtype=\"i8\"),\n \"D\": np.array([1, 2, 3, 4], dtype=\"f8\"),\n },\n columns=columns,\n )\n\n appended = df1.append(df2, ignore_index=True)\n expected = DataFrame(\n np.concatenate([df1.values, df2.values], axis=0), columns=columns\n )\n tm.assert_frame_equal(appended, expected)\n\n df = DataFrame(np.random.randn(1, 3), index=[\"a\"])\n df2 = DataFrame(np.random.randn(1, 4), index=[\"b\"])\n result = concat([df, df2], keys=[\"one\", \"two\"], names=[\"first\", \"second\"])\n assert result.index.names == (\"first\", \"second\")\n\n def test_dups_index(self):\n # GH 4771\n\n # single dtypes\n df = DataFrame(\n np.random.randint(0, 10, size=40).reshape(10, 4),\n columns=[\"A\", \"A\", \"C\", \"C\"],\n )\n\n result = concat([df, df], axis=1)\n tm.assert_frame_equal(result.iloc[:, :4], df)\n tm.assert_frame_equal(result.iloc[:, 4:], df)\n\n result = concat([df, df], axis=0)\n tm.assert_frame_equal(result.iloc[:10], df)\n tm.assert_frame_equal(result.iloc[10:], df)\n\n # multi dtypes\n df = concat(\n [\n DataFrame(np.random.randn(10, 4), columns=[\"A\", \"A\", \"B\", \"B\"]),\n DataFrame(\n np.random.randint(0, 10, size=20).reshape(10, 2), columns=[\"A\", \"C\"]\n ),\n ],\n axis=1,\n )\n\n result = concat([df, df], axis=1)\n tm.assert_frame_equal(result.iloc[:, :6], df)\n tm.assert_frame_equal(result.iloc[:, 6:], df)\n\n result = concat([df, df], axis=0)\n tm.assert_frame_equal(result.iloc[:10], df)\n tm.assert_frame_equal(result.iloc[10:], df)\n\n # append\n result = df.iloc[0:8, :].append(df.iloc[8:])\n tm.assert_frame_equal(result, df)\n\n result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])\n tm.assert_frame_equal(result, df)\n\n expected = concat([df, df], axis=0)\n result = df.append(df)\n tm.assert_frame_equal(result, expected)\n\n def test_with_mixed_tuples(self, sort):\n # 10697\n # columns have mixed tuples, so handle properly\n df1 = DataFrame({\"A\": \"foo\", (\"B\", 1): \"bar\"}, index=range(2))\n df2 = DataFrame({\"B\": \"foo\", (\"B\", 1): \"bar\"}, index=range(2))\n\n # it works\n concat([df1, df2], sort=sort)\n\n def test_handle_empty_objects(self, sort):\n df = DataFrame(np.random.randn(10, 4), columns=list(\"abcd\"))\n\n baz = df[:5].copy()\n baz[\"foo\"] = \"bar\"\n empty = df[5:5]\n\n frames = [baz, empty, empty, df[5:]]\n concatted = concat(frames, axis=0, sort=sort)\n\n expected = df.reindex(columns=[\"a\", \"b\", \"c\", \"d\", \"foo\"])\n expected[\"foo\"] = expected[\"foo\"].astype(\"O\")\n expected.loc[0:4, \"foo\"] = \"bar\"\n\n tm.assert_frame_equal(concatted, expected)\n\n # empty as first element with time series\n # GH3259\n df = DataFrame(\n dict(A=range(10000)), index=date_range(\"20130101\", periods=10000, freq=\"s\")\n )\n empty = DataFrame()\n result = concat([df, empty], axis=1)\n tm.assert_frame_equal(result, df)\n result = concat([empty, df], axis=1)\n tm.assert_frame_equal(result, df)\n\n result = concat([df, empty])\n tm.assert_frame_equal(result, df)\n result = concat([empty, df])\n tm.assert_frame_equal(result, df)\n\n def test_concat_mixed_objs(self):\n\n # concat mixed series/frames\n # G2385\n\n # axis 1\n index = date_range(\"01-Jan-2013\", periods=10, freq=\"H\")\n arr = np.arange(10, dtype=\"int64\")\n s1 = Series(arr, index=index)\n s2 = Series(arr, index=index)\n df = DataFrame(arr.reshape(-1, 1), index=index)\n\n expected = DataFrame(\n np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]\n )\n result = concat([df, df], axis=1)\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame(\n np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]\n )\n result = concat([s1, s2], axis=1)\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame(\n np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]\n )\n result = concat([s1, s2, s1], axis=1)\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame(\n np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]\n )\n result = concat([s1, df, s2, s2, s1], axis=1)\n tm.assert_frame_equal(result, expected)\n\n # with names\n s1.name = \"foo\"\n expected = DataFrame(\n np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[\"foo\", 0, 0]\n )\n result = concat([s1, df, s2], axis=1)\n tm.assert_frame_equal(result, expected)\n\n s2.name = \"bar\"\n expected = DataFrame(\n np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[\"foo\", 0, \"bar\"]\n )\n result = concat([s1, df, s2], axis=1)\n tm.assert_frame_equal(result, expected)\n\n # ignore index\n expected = DataFrame(\n np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]\n )\n result = concat([s1, df, s2], axis=1, ignore_index=True)\n tm.assert_frame_equal(result, expected)\n\n # axis 0\n expected = DataFrame(\n np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]\n )\n result = concat([s1, df, s2])\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])\n result = concat([s1, df, s2], ignore_index=True)\n tm.assert_frame_equal(result, expected)\n\n def test_empty_dtype_coerce(self):\n\n # xref to #12411\n # xref to #12045\n # xref to #11594\n # see below\n\n # 10571\n df1 = DataFrame(data=[[1, None], [2, None]], columns=[\"a\", \"b\"])\n df2 = DataFrame(data=[[3, None], [4, None]], columns=[\"a\", \"b\"])\n result = concat([df1, df2])\n expected = df1.dtypes\n tm.assert_series_equal(result.dtypes, expected)\n\n def test_dtype_coerceion(self):\n\n # 12411\n df = DataFrame({\"date\": [pd.Timestamp(\"20130101\").tz_localize(\"UTC\"), pd.NaT]})\n\n result = concat([df.iloc[[0]], df.iloc[[1]]])\n tm.assert_series_equal(result.dtypes, df.dtypes)\n\n # 12045\n import datetime\n\n df = DataFrame(\n {\"date\": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}\n )\n result = concat([df.iloc[[0]], df.iloc[[1]]])\n tm.assert_series_equal(result.dtypes, df.dtypes)\n\n # 11594\n df = DataFrame({\"text\": [\"some words\"] + [None] * 9})\n result = concat([df.iloc[[0]], df.iloc[[1]]])\n tm.assert_series_equal(result.dtypes, df.dtypes)\n\n def test_concat_series(self):\n\n ts = tm.makeTimeSeries()\n ts.name = \"foo\"\n\n pieces = [ts[:5], ts[5:15], ts[15:]]\n\n result = concat(pieces)\n tm.assert_series_equal(result, ts)\n assert result.name == ts.name\n\n result = concat(pieces, keys=[0, 1, 2])\n expected = ts.copy()\n\n ts.index = DatetimeIndex(np.array(ts.index.values, dtype=\"M8[ns]\"))\n\n exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]\n exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)\n expected.index = exp_index\n tm.assert_series_equal(result, expected)\n\n def test_concat_series_axis1(self, sort=sort):\n ts = tm.makeTimeSeries()\n\n pieces = [ts[:-2], ts[2:], ts[2:-2]]\n\n result = concat(pieces, axis=1)\n expected = DataFrame(pieces).T\n tm.assert_frame_equal(result, expected)\n\n result = concat(pieces, keys=[\"A\", \"B\", \"C\"], axis=1)\n expected = DataFrame(pieces, index=[\"A\", \"B\", \"C\"]).T\n tm.assert_frame_equal(result, expected)\n\n # preserve series names, #2489\n s = Series(randn(5), name=\"A\")\n s2 = Series(randn(5), name=\"B\")\n\n result = concat([s, s2], axis=1)\n expected = DataFrame({\"A\": s, \"B\": s2})\n tm.assert_frame_equal(result, expected)\n\n s2.name = None\n result = concat([s, s2], axis=1)\n tm.assert_index_equal(result.columns, Index([\"A\", 0], dtype=\"object\"))\n\n # must reindex, #2603\n s = Series(randn(3), index=[\"c\", \"a\", \"b\"], name=\"A\")\n s2 = Series(randn(4), index=[\"d\", \"a\", \"b\", \"c\"], name=\"B\")\n result = concat([s, s2], axis=1, sort=sort)\n expected = DataFrame({\"A\": s, \"B\": s2})\n tm.assert_frame_equal(result, expected)\n\n def test_concat_series_axis1_names_applied(self):\n # ensure names argument is not ignored on axis=1, #23490\n s = Series([1, 2, 3])\n s2 = Series([4, 5, 6])\n result = concat([s, s2], axis=1, keys=[\"a\", \"b\"], names=[\"A\"])\n expected = DataFrame(\n [[1, 4], [2, 5], [3, 6]], columns=pd.Index([\"a\", \"b\"], name=\"A\")\n )\n tm.assert_frame_equal(result, expected)\n\n result = concat([s, s2], axis=1, keys=[(\"a\", 1), (\"b\", 2)], names=[\"A\", \"B\"])\n expected = DataFrame(\n [[1, 4], [2, 5], [3, 6]],\n columns=MultiIndex.from_tuples([(\"a\", 1), (\"b\", 2)], names=[\"A\", \"B\"]),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_concat_single_with_key(self):\n df = DataFrame(np.random.randn(10, 4))\n\n result = concat([df], keys=[\"foo\"])\n expected = concat([df, df], keys=[\"foo\", \"bar\"])\n tm.assert_frame_equal(result, expected[:10])\n\n def test_concat_exclude_none(self):\n df = DataFrame(np.random.randn(10, 4))\n\n pieces = [df[:5], None, None, df[5:]]\n result = concat(pieces)\n tm.assert_frame_equal(result, df)\n with pytest.raises(ValueError, match=\"All objects passed were None\"):\n concat([None, None])\n\n def test_concat_datetime64_block(self):\n from pandas.core.indexes.datetimes import date_range\n\n rng = date_range(\"1/1/2000\", periods=10)\n\n df = DataFrame({\"time\": rng})\n\n result = concat([df, df])\n assert (result.iloc[:10][\"time\"] == rng).all()\n assert (result.iloc[10:][\"time\"] == rng).all()\n\n def test_concat_timedelta64_block(self):\n from pandas import to_timedelta\n\n rng = to_timedelta(np.arange(10), unit=\"s\")\n\n df = DataFrame({\"time\": rng})\n\n result = concat([df, df])\n assert (result.iloc[:10][\"time\"] == rng).all()\n assert (result.iloc[10:][\"time\"] == rng).all()\n\n def test_concat_keys_with_none(self):\n # #1649\n df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])\n\n result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))\n expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))\n tm.assert_frame_equal(result, expected)\n\n result = concat(\n [None, df0, df0[:2], df0[:1], df0], keys=[\"a\", \"b\", \"c\", \"d\", \"e\"]\n )\n expected = concat([df0, df0[:2], df0[:1], df0], keys=[\"b\", \"c\", \"d\", \"e\"])\n tm.assert_frame_equal(result, expected)\n\n def test_concat_bug_1719(self):\n ts1 = tm.makeTimeSeries()\n ts2 = tm.makeTimeSeries()[::2]\n\n # to join with union\n # these two are of different length!\n left = concat([ts1, ts2], join=\"outer\", axis=1)\n right = concat([ts2, ts1], join=\"outer\", axis=1)\n\n assert len(left) == len(right)\n\n def test_concat_bug_2972(self):\n ts0 = Series(np.zeros(5))\n ts1 = Series(np.ones(5))\n ts0.name = ts1.name = \"same name\"\n result = concat([ts0, ts1], axis=1)\n\n expected = DataFrame({0: ts0, 1: ts1})\n expected.columns = [\"same name\", \"same name\"]\n tm.assert_frame_equal(result, expected)\n\n def test_concat_bug_3602(self):\n\n # GH 3602, duplicate columns\n df1 = DataFrame(\n {\n \"firmNo\": [0, 0, 0, 0],\n \"prc\": [6, 6, 6, 6],\n \"stringvar\": [\"rrr\", \"rrr\", \"rrr\", \"rrr\"],\n }\n )\n df2 = DataFrame(\n {\"C\": [9, 10, 11, 12], \"misc\": [1, 2, 3, 4], \"prc\": [6, 6, 6, 6]}\n )\n expected = DataFrame(\n [\n [0, 6, \"rrr\", 9, 1, 6],\n [0, 6, \"rrr\", 10, 2, 6],\n [0, 6, \"rrr\", 11, 3, 6],\n [0, 6, \"rrr\", 12, 4, 6],\n ]\n )\n expected.columns = [\"firmNo\", \"prc\", \"stringvar\", \"C\", \"misc\", \"prc\"]\n\n result = concat([df1, df2], axis=1)\n tm.assert_frame_equal(result, expected)\n\n def test_concat_inner_join_empty(self):\n # GH 15328\n df_empty = pd.DataFrame()\n df_a = pd.DataFrame({\"a\": [1, 2]}, index=[0, 1], dtype=\"int64\")\n df_expected = pd.DataFrame({\"a\": []}, index=[], dtype=\"int64\")\n\n for how, expected in [(\"inner\", df_expected), (\"outer\", df_a)]:\n result = pd.concat([df_a, df_empty], axis=1, join=how)\n tm.assert_frame_equal(result, expected)\n\n def test_concat_series_axis1_same_names_ignore_index(self):\n dates = date_range(\"01-Jan-2013\", \"01-Jan-2014\", freq=\"MS\")[0:-1]\n s1 = Series(randn(len(dates)), index=dates, name=\"value\")\n s2 = Series(randn(len(dates)), index=dates, name=\"value\")\n\n result = concat([s1, s2], axis=1, ignore_index=True)\n expected = Index([0, 1])\n\n tm.assert_index_equal(result.columns, expected)\n\n def test_concat_iterables(self):\n # GH8645 check concat works with tuples, list, generators, and weird\n # stuff like deque and custom iterables\n df1 = DataFrame([1, 2, 3])\n df2 = DataFrame([4, 5, 6])\n expected = DataFrame([1, 2, 3, 4, 5, 6])\n tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)\n tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)\n tm.assert_frame_equal(\n concat((df for df in (df1, df2)), ignore_index=True), expected\n )\n tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)\n\n class CustomIterator1:\n def __len__(self) -> int:\n return 2\n\n def __getitem__(self, index):\n try:\n return {0: df1, 1: df2}[index]\n except KeyError as err:\n raise IndexError from err\n\n tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)\n\n class CustomIterator2(abc.Iterable):\n def __iter__(self):\n yield df1\n yield df2\n\n tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)\n\n def test_concat_invalid(self):\n\n # trying to concat a ndframe with a non-ndframe\n df1 = tm.makeCustomDataframe(10, 2)\n for obj in [1, dict(), [1, 2], (1, 2)]:\n\n msg = (\n f\"cannot concatenate object of type '{type(obj)}'; \"\n \"only Series and DataFrame objs are valid\"\n )\n with pytest.raises(TypeError, match=msg):\n concat([df1, obj])\n\n def test_concat_invalid_first_argument(self):\n df1 = tm.makeCustomDataframe(10, 2)\n df2 = tm.makeCustomDataframe(10, 2)\n msg = (\n \"first argument must be an iterable of pandas \"\n 'objects, you passed an object of type \"DataFrame\"'\n )\n with pytest.raises(TypeError, match=msg):\n concat(df1, df2)\n\n # generator ok though\n concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))\n\n # text reader ok\n # GH6583\n data = \"\"\"index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n\"\"\"\n\n reader = read_csv(StringIO(data), chunksize=1)\n result = concat(reader, ignore_index=True)\n expected = read_csv(StringIO(data))\n tm.assert_frame_equal(result, expected)\n\n def test_concat_NaT_series(self):\n # GH 11693\n # test for merging NaT series with datetime series.\n x = Series(\n date_range(\"20151124 08:00\", \"20151124 09:00\", freq=\"1h\", tz=\"US/Eastern\")\n )\n y = Series(pd.NaT, index=[0, 1], dtype=\"datetime64[ns, US/Eastern]\")\n expected = Series([x[0], x[1], pd.NaT, pd.NaT])\n\n result = concat([x, y], ignore_index=True)\n tm.assert_series_equal(result, expected)\n\n # all NaT with tz\n expected = Series(pd.NaT, index=range(4), dtype=\"datetime64[ns, US/Eastern]\")\n result = pd.concat([y, y], ignore_index=True)\n tm.assert_series_equal(result, expected)\n\n # without tz\n x = pd.Series(pd.date_range(\"20151124 08:00\", \"20151124 09:00\", freq=\"1h\"))\n y = pd.Series(pd.date_range(\"20151124 10:00\", \"20151124 11:00\", freq=\"1h\"))\n y[:] = pd.NaT\n expected = pd.Series([x[0], x[1], pd.NaT, pd.NaT])\n result = pd.concat([x, y], ignore_index=True)\n tm.assert_series_equal(result, expected)\n\n # all NaT without tz\n x[:] = pd.NaT\n expected = pd.Series(pd.NaT, index=range(4), dtype=\"datetime64[ns]\")\n result = pd.concat([x, y], ignore_index=True)\n tm.assert_series_equal(result, expected)\n\n def test_concat_tz_frame(self):\n df2 = DataFrame(\n dict(\n A=pd.Timestamp(\"20130102\", tz=\"US/Eastern\"),\n B=pd.Timestamp(\"20130603\", tz=\"CET\"),\n ),\n index=range(5),\n )\n\n # concat\n df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)\n tm.assert_frame_equal(df2, df3)\n\n def test_concat_tz_series(self):\n # gh-11755: tz and no tz\n x = Series(date_range(\"20151124 08:00\", \"20151124 09:00\", freq=\"1h\", tz=\"UTC\"))\n y = Series(date_range(\"2012-01-01\", \"2012-01-02\"))\n expected = Series([x[0], x[1], y[0], y[1]], dtype=\"object\")\n result = concat([x, y], ignore_index=True)\n tm.assert_series_equal(result, expected)\n\n # gh-11887: concat tz and object\n x = Series(date_range(\"20151124 08:00\", \"20151124 09:00\", freq=\"1h\", tz=\"UTC\"))\n y = Series([\"a\", \"b\"])\n expected = Series([x[0], x[1], y[0], y[1]], dtype=\"object\")\n result = concat([x, y], ignore_index=True)\n tm.assert_series_equal(result, expected)\n\n # see gh-12217 and gh-12306\n # Concatenating two UTC times\n first = pd.DataFrame([[datetime(2016, 1, 1)]])\n first[0] = first[0].dt.tz_localize(\"UTC\")\n\n second = pd.DataFrame([[datetime(2016, 1, 2)]])\n second[0] = second[0].dt.tz_localize(\"UTC\")\n\n result = pd.concat([first, second])\n assert result[0].dtype == \"datetime64[ns, UTC]\"\n\n # Concatenating two London times\n first = pd.DataFrame([[datetime(2016, 1, 1)]])\n first[0] = first[0].dt.tz_localize(\"Europe/London\")\n\n second = pd.DataFrame([[datetime(2016, 1, 2)]])\n second[0] = second[0].dt.tz_localize(\"Europe/London\")\n\n result = pd.concat([first, second])\n assert result[0].dtype == \"datetime64[ns, Europe/London]\"\n\n # Concatenating 2+1 London times\n first = pd.DataFrame([[datetime(2016, 1, 1)], [datetime(2016, 1, 2)]])\n first[0] = first[0].dt.tz_localize(\"Europe/London\")\n\n second = pd.DataFrame([[datetime(2016, 1, 3)]])\n second[0] = second[0].dt.tz_localize(\"Europe/London\")\n\n result = pd.concat([first, second])\n assert result[0].dtype == \"datetime64[ns, Europe/London]\"\n\n # Concat'ing 1+2 London times\n first = pd.DataFrame([[datetime(2016, 1, 1)]])\n first[0] = first[0].dt.tz_localize(\"Europe/London\")\n\n second = pd.DataFrame([[datetime(2016, 1, 2)], [datetime(2016, 1, 3)]])\n second[0] = second[0].dt.tz_localize(\"Europe/London\")\n\n result = pd.concat([first, second])\n assert result[0].dtype == \"datetime64[ns, Europe/London]\"\n\n def test_concat_tz_series_with_datetimelike(self):\n # see gh-12620: tz and timedelta\n x = [\n pd.Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\n pd.Timestamp(\"2011-02-01\", tz=\"US/Eastern\"),\n ]\n y = [pd.Timedelta(\"1 day\"), pd.Timedelta(\"2 day\")]\n result = concat([pd.Series(x), pd.Series(y)], ignore_index=True)\n tm.assert_series_equal(result, pd.Series(x + y, dtype=\"object\"))\n\n # tz and period\n y = [pd.Period(\"2011-03\", freq=\"M\"), pd.Period(\"2011-04\", freq=\"M\")]\n result = concat([pd.Series(x), pd.Series(y)], ignore_index=True)\n tm.assert_series_equal(result, pd.Series(x + y, dtype=\"object\"))\n\n def test_concat_tz_series_tzlocal(self):\n # see gh-13583\n x = [\n pd.Timestamp(\"2011-01-01\", tz=dateutil.tz.tzlocal()),\n pd.Timestamp(\"2011-02-01\", tz=dateutil.tz.tzlocal()),\n ]\n y = [\n pd.Timestamp(\"2012-01-01\", tz=dateutil.tz.tzlocal()),\n pd.Timestamp(\"2012-02-01\", tz=dateutil.tz.tzlocal()),\n ]\n\n result = concat([pd.Series(x), pd.Series(y)], ignore_index=True)\n tm.assert_series_equal(result, pd.Series(x + y))\n assert result.dtype == \"datetime64[ns, tzlocal()]\"\n\n @pytest.mark.parametrize(\"tz1\", [None, \"UTC\"])\n @pytest.mark.parametrize(\"tz2\", [None, \"UTC\"])\n @pytest.mark.parametrize(\"s\", [pd.NaT, pd.Timestamp(\"20150101\")])\n def test_concat_NaT_dataframes_all_NaT_axis_0(self, tz1, tz2, s):\n # GH 12396\n\n # tz-naive\n first = pd.DataFrame([[pd.NaT], [pd.NaT]]).apply(\n lambda x: x.dt.tz_localize(tz1)\n )\n second = pd.DataFrame([s]).apply(lambda x: x.dt.tz_localize(tz2))\n\n result = pd.concat([first, second], axis=0)\n expected = pd.DataFrame(pd.Series([pd.NaT, pd.NaT, s], index=[0, 1, 0]))\n expected = expected.apply(lambda x: x.dt.tz_localize(tz2))\n if tz1 != tz2:\n expected = expected.astype(object)\n\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"tz1\", [None, \"UTC\"])\n @pytest.mark.parametrize(\"tz2\", [None, \"UTC\"])\n def test_concat_NaT_dataframes_all_NaT_axis_1(self, tz1, tz2):\n # GH 12396\n\n first = pd.DataFrame(pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1))\n second = pd.DataFrame(pd.Series([pd.NaT]).dt.tz_localize(tz2), columns=[1])\n expected = pd.DataFrame(\n {\n 0: pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1),\n 1: pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz2),\n }\n )\n result = pd.concat([first, second], axis=1)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"tz1\", [None, \"UTC\"])\n @pytest.mark.parametrize(\"tz2\", [None, \"UTC\"])\n def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2):\n # GH 12396\n\n # tz-naive\n first = pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1)\n second = pd.DataFrame(\n [\n [pd.Timestamp(\"2015/01/01\", tz=tz2)],\n [pd.Timestamp(\"2016/01/01\", tz=tz2)],\n ],\n index=[2, 3],\n )\n\n expected = pd.DataFrame(\n [\n pd.NaT,\n pd.NaT,\n pd.Timestamp(\"2015/01/01\", tz=tz2),\n pd.Timestamp(\"2016/01/01\", tz=tz2),\n ]\n )\n if tz1 != tz2:\n expected = expected.astype(object)\n\n result = pd.concat([first, second])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"tz\", [None, \"UTC\"])\n def test_concat_NaT_dataframes(self, tz):\n # GH 12396\n\n first = pd.DataFrame([[pd.NaT], [pd.NaT]])\n first = first.apply(lambda x: x.dt.tz_localize(tz))\n second = pd.DataFrame(\n [[pd.Timestamp(\"2015/01/01\", tz=tz)], [pd.Timestamp(\"2016/01/01\", tz=tz)]],\n index=[2, 3],\n )\n expected = pd.DataFrame(\n [\n pd.NaT,\n pd.NaT,\n pd.Timestamp(\"2015/01/01\", tz=tz),\n pd.Timestamp(\"2016/01/01\", tz=tz),\n ]\n )\n\n result = pd.concat([first, second], axis=0)\n tm.assert_frame_equal(result, expected)\n\n def test_concat_period_series(self):\n x = Series(pd.PeriodIndex([\"2015-11-01\", \"2015-12-01\"], freq=\"D\"))\n y = Series(pd.PeriodIndex([\"2015-10-01\", \"2016-01-01\"], freq=\"D\"))\n expected = Series([x[0], x[1], y[0], y[1]], dtype=\"Period[D]\")\n result = concat([x, y], ignore_index=True)\n tm.assert_series_equal(result, expected)\n\n def test_concat_period_multiple_freq_series(self):\n x = Series(pd.PeriodIndex([\"2015-11-01\", \"2015-12-01\"], freq=\"D\"))\n y = Series(pd.PeriodIndex([\"2015-10-01\", \"2016-01-01\"], freq=\"M\"))\n expected = Series([x[0], x[1], y[0], y[1]], dtype=\"object\")\n result = concat([x, y], ignore_index=True)\n tm.assert_series_equal(result, expected)\n assert result.dtype == \"object\"\n\n def test_concat_period_other_series(self):\n x = Series(pd.PeriodIndex([\"2015-11-01\", \"2015-12-01\"], freq=\"D\"))\n y = Series(pd.PeriodIndex([\"2015-11-01\", \"2015-12-01\"], freq=\"M\"))\n expected = Series([x[0], x[1], y[0], y[1]], dtype=\"object\")\n result = concat([x, y], ignore_index=True)\n tm.assert_series_equal(result, expected)\n assert result.dtype == \"object\"\n\n # non-period\n x = Series(pd.PeriodIndex([\"2015-11-01\", \"2015-12-01\"], freq=\"D\"))\n y = Series(pd.DatetimeIndex([\"2015-11-01\", \"2015-12-01\"]))\n expected = Series([x[0], x[1], y[0], y[1]], dtype=\"object\")\n result = concat([x, y], ignore_index=True)\n tm.assert_series_equal(result, expected)\n assert result.dtype == \"object\"\n\n x = Series(pd.PeriodIndex([\"2015-11-01\", \"2015-12-01\"], freq=\"D\"))\n y = Series([\"A\", \"B\"])\n expected = Series([x[0], x[1], y[0], y[1]], dtype=\"object\")\n result = concat([x, y], ignore_index=True)\n tm.assert_series_equal(result, expected)\n assert result.dtype == \"object\"\n\n def test_concat_empty_series(self):\n # GH 11082\n s1 = pd.Series([1, 2, 3], name=\"x\")\n s2 = pd.Series(name=\"y\", dtype=\"float64\")\n res = pd.concat([s1, s2], axis=1)\n exp = pd.DataFrame(\n {\"x\": [1, 2, 3], \"y\": [np.nan, np.nan, np.nan]},\n index=pd.Index([0, 1, 2], dtype=\"O\"),\n )\n tm.assert_frame_equal(res, exp)\n\n s1 = pd.Series([1, 2, 3], name=\"x\")\n s2 = pd.Series(name=\"y\", dtype=\"float64\")\n res = pd.concat([s1, s2], axis=0)\n # name will be reset\n exp = pd.Series([1, 2, 3])\n tm.assert_series_equal(res, exp)\n\n # empty Series with no name\n s1 = pd.Series([1, 2, 3], name=\"x\")\n s2 = pd.Series(name=None, dtype=\"float64\")\n res = pd.concat([s1, s2], axis=1)\n exp = pd.DataFrame(\n {\"x\": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},\n columns=[\"x\", 0],\n index=pd.Index([0, 1, 2], dtype=\"O\"),\n )\n tm.assert_frame_equal(res, exp)\n\n @pytest.mark.parametrize(\"tz\", [None, \"UTC\"])\n @pytest.mark.parametrize(\"values\", [[], [1, 2, 3]])\n def test_concat_empty_series_timelike(self, tz, values):\n # GH 18447\n\n first = Series([], dtype=\"M8[ns]\").dt.tz_localize(tz)\n dtype = None if values else np.float64\n second = Series(values, dtype=dtype)\n\n expected = DataFrame(\n {\n 0: pd.Series([pd.NaT] * len(values), dtype=\"M8[ns]\").dt.tz_localize(tz),\n 1: values,\n }\n )\n result = concat([first, second], axis=1)\n tm.assert_frame_equal(result, expected)\n\n def test_default_index(self):\n # is_series and ignore_index\n s1 = pd.Series([1, 2, 3], name=\"x\")\n s2 = pd.Series([4, 5, 6], name=\"y\")\n res = pd.concat([s1, s2], axis=1, ignore_index=True)\n assert isinstance(res.columns, pd.RangeIndex)\n exp = pd.DataFrame([[1, 4], [2, 5], [3, 6]])\n # use check_index_type=True to check the result have\n # RangeIndex (default index)\n tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)\n\n # is_series and all inputs have no names\n s1 = pd.Series([1, 2, 3])\n s2 = pd.Series([4, 5, 6])\n res = pd.concat([s1, s2], axis=1, ignore_index=False)\n assert isinstance(res.columns, pd.RangeIndex)\n exp = pd.DataFrame([[1, 4], [2, 5], [3, 6]])\n exp.columns = pd.RangeIndex(2)\n tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)\n\n # is_dataframe and ignore_index\n df1 = pd.DataFrame({\"A\": [1, 2], \"B\": [5, 6]})\n df2 = pd.DataFrame({\"A\": [3, 4], \"B\": [7, 8]})\n\n res = pd.concat([df1, df2], axis=0, ignore_index=True)\n exp = pd.DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=[\"A\", \"B\"])\n tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)\n\n res = pd.concat([df1, df2], axis=1, ignore_index=True)\n exp = pd.DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])\n tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)\n\n def test_concat_multiindex_rangeindex(self):\n # GH13542\n # when multi-index levels are RangeIndex objects\n # there is a bug in concat with objects of len 1\n\n df = DataFrame(np.random.randn(9, 2))\n df.index = MultiIndex(\n levels=[pd.RangeIndex(3), pd.RangeIndex(3)],\n codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],\n )\n\n res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])\n exp = df.iloc[[2, 3, 4, 5], :]\n tm.assert_frame_equal(res, exp)\n\n def test_concat_multiindex_dfs_with_deepcopy(self):\n # GH 9967\n from copy import deepcopy\n\n example_multiindex1 = pd.MultiIndex.from_product([[\"a\"], [\"b\"]])\n example_dataframe1 = pd.DataFrame([0], index=example_multiindex1)\n\n example_multiindex2 = pd.MultiIndex.from_product([[\"a\"], [\"c\"]])\n example_dataframe2 = pd.DataFrame([1], index=example_multiindex2)\n\n example_dict = {\"s1\": example_dataframe1, \"s2\": example_dataframe2}\n expected_index = pd.MultiIndex(\n levels=[[\"s1\", \"s2\"], [\"a\"], [\"b\", \"c\"]],\n codes=[[0, 1], [0, 0], [0, 1]],\n names=[\"testname\", None, None],\n )\n expected = pd.DataFrame([[0], [1]], index=expected_index)\n result_copy = pd.concat(deepcopy(example_dict), names=[\"testname\"])\n tm.assert_frame_equal(result_copy, expected)\n result_no_copy = pd.concat(example_dict, names=[\"testname\"])\n tm.assert_frame_equal(result_no_copy, expected)\n\n def test_categorical_concat_append(self):\n cat = Categorical([\"a\", \"b\"], categories=[\"a\", \"b\"])\n vals = [1, 2]\n df = DataFrame({\"cats\": cat, \"vals\": vals})\n cat2 = Categorical([\"a\", \"b\", \"a\", \"b\"], categories=[\"a\", \"b\"])\n vals2 = [1, 2, 1, 2]\n exp = DataFrame({\"cats\": cat2, \"vals\": vals2}, index=Index([0, 1, 0, 1]))\n\n tm.assert_frame_equal(pd.concat([df, df]), exp)\n tm.assert_frame_equal(df.append(df), exp)\n\n # GH 13524 can concat different categories\n cat3 = Categorical([\"a\", \"b\"], categories=[\"a\", \"b\", \"c\"])\n vals3 = [1, 2]\n df_different_categories = DataFrame({\"cats\": cat3, \"vals\": vals3})\n\n res = pd.concat([df, df_different_categories], ignore_index=True)\n exp = DataFrame({\"cats\": list(\"abab\"), \"vals\": [1, 2, 1, 2]})\n tm.assert_frame_equal(res, exp)\n\n res = df.append(df_different_categories, ignore_index=True)\n tm.assert_frame_equal(res, exp)\n\n def test_categorical_concat_dtypes(self):\n\n # GH8143\n index = [\"cat\", \"obj\", \"num\"]\n cat = Categorical([\"a\", \"b\", \"c\"])\n obj = Series([\"a\", \"b\", \"c\"])\n num = Series([1, 2, 3])\n df = pd.concat([Series(cat), obj, num], axis=1, keys=index)\n\n result = df.dtypes == \"object\"\n expected = Series([False, True, False], index=index)\n tm.assert_series_equal(result, expected)\n\n result = df.dtypes == \"int64\"\n expected = Series([False, False, True], index=index)\n tm.assert_series_equal(result, expected)\n\n result = df.dtypes == \"category\"\n expected = Series([True, False, False], index=index)\n tm.assert_series_equal(result, expected)\n\n def test_categorical_concat(self, sort):\n # See GH 10177\n df1 = DataFrame(\n np.arange(18, dtype=\"int64\").reshape(6, 3), columns=[\"a\", \"b\", \"c\"]\n )\n\n df2 = DataFrame(np.arange(14, dtype=\"int64\").reshape(7, 2), columns=[\"a\", \"c\"])\n\n cat_values = [\"one\", \"one\", \"two\", \"one\", \"two\", \"two\", \"one\"]\n df2[\"h\"] = Series(Categorical(cat_values))\n\n res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)\n exp = DataFrame(\n {\n \"a\": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],\n \"b\": [\n 1,\n 4,\n 7,\n 10,\n 13,\n 16,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n ],\n \"c\": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],\n \"h\": [None] * 6 + cat_values,\n }\n )\n tm.assert_frame_equal(res, exp)\n\n def test_categorical_concat_gh7864(self):\n # GH 7864\n # make sure ordering is preserved\n df = DataFrame({\"id\": [1, 2, 3, 4, 5, 6], \"raw_grade\": list(\"abbaae\")})\n df[\"grade\"] = Categorical(df[\"raw_grade\"])\n df[\"grade\"].cat.set_categories([\"e\", \"a\", \"b\"])\n\n df1 = df[0:3]\n df2 = df[3:]\n\n tm.assert_index_equal(df[\"grade\"].cat.categories, df1[\"grade\"].cat.categories)\n tm.assert_index_equal(df[\"grade\"].cat.categories, df2[\"grade\"].cat.categories)\n\n dfx = pd.concat([df1, df2])\n tm.assert_index_equal(df[\"grade\"].cat.categories, dfx[\"grade\"].cat.categories)\n\n dfa = df1.append(df2)\n tm.assert_index_equal(df[\"grade\"].cat.categories, dfa[\"grade\"].cat.categories)\n\n def test_categorical_concat_preserve(self):\n\n # GH 8641 series concat not preserving category dtype\n # GH 13524 can concat different categories\n s = Series(list(\"abc\"), dtype=\"category\")\n s2 = Series(list(\"abd\"), dtype=\"category\")\n\n exp = Series(list(\"abcabd\"))\n res = pd.concat([s, s2], ignore_index=True)\n tm.assert_series_equal(res, exp)\n\n exp = Series(list(\"abcabc\"), dtype=\"category\")\n res = pd.concat([s, s], ignore_index=True)\n tm.assert_series_equal(res, exp)\n\n exp = Series(list(\"abcabc\"), index=[0, 1, 2, 0, 1, 2], dtype=\"category\")\n res = pd.concat([s, s])\n tm.assert_series_equal(res, exp)\n\n a = Series(np.arange(6, dtype=\"int64\"))\n b = Series(list(\"aabbca\"))\n\n df2 = DataFrame({\"A\": a, \"B\": b.astype(CategoricalDtype(list(\"cab\")))})\n res = pd.concat([df2, df2])\n exp = DataFrame(\n {\n \"A\": pd.concat([a, a]),\n \"B\": pd.concat([b, b]).astype(CategoricalDtype(list(\"cab\"))),\n }\n )\n tm.assert_frame_equal(res, exp)\n\n def test_categorical_index_preserver(self):\n\n a = Series(np.arange(6, dtype=\"int64\"))\n b = Series(list(\"aabbca\"))\n\n df2 = DataFrame(\n {\"A\": a, \"B\": b.astype(CategoricalDtype(list(\"cab\")))}\n ).set_index(\"B\")\n result = pd.concat([df2, df2])\n expected = DataFrame(\n {\n \"A\": pd.concat([a, a]),\n \"B\": pd.concat([b, b]).astype(CategoricalDtype(list(\"cab\"))),\n }\n ).set_index(\"B\")\n tm.assert_frame_equal(result, expected)\n\n # wrong categories\n df3 = DataFrame(\n {\"A\": a, \"B\": Categorical(b, categories=list(\"abe\"))}\n ).set_index(\"B\")\n msg = \"categories must match existing categories when appending\"\n with pytest.raises(TypeError, match=msg):\n pd.concat([df2, df3])\n\n def test_concat_categoricalindex(self):\n # GH 16111, categories that aren't lexsorted\n categories = [9, 0, 1, 2, 3]\n\n a = pd.Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))\n b = pd.Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))\n c = pd.Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))\n\n result = pd.concat([a, b, c], axis=1)\n\n exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories)\n exp = pd.DataFrame(\n {\n 0: [1, 1, np.nan, np.nan],\n 1: [np.nan, 2, 2, np.nan],\n 2: [np.nan, np.nan, 3, 3],\n },\n columns=[0, 1, 2],\n index=exp_idx,\n )\n tm.assert_frame_equal(result, exp)\n\n def test_concat_order(self):\n # GH 17344\n dfs = [pd.DataFrame(index=range(3), columns=[\"a\", 1, None])]\n dfs += [\n pd.DataFrame(index=range(3), columns=[None, 1, \"a\"]) for i in range(100)\n ]\n\n result = pd.concat(dfs, sort=True).columns\n expected = dfs[0].columns\n tm.assert_index_equal(result, expected)\n\n def test_concat_datetime_timezone(self):\n # GH 18523\n idx1 = pd.date_range(\"2011-01-01\", periods=3, freq=\"H\", tz=\"Europe/Paris\")\n idx2 = pd.date_range(start=idx1[0], end=idx1[-1], freq=\"H\")\n df1 = pd.DataFrame({\"a\": [1, 2, 3]}, index=idx1)\n df2 = pd.DataFrame({\"b\": [1, 2, 3]}, index=idx2)\n result = pd.concat([df1, df2], axis=1)\n\n exp_idx = (\n DatetimeIndex(\n [\n \"2011-01-01 00:00:00+01:00\",\n \"2011-01-01 01:00:00+01:00\",\n \"2011-01-01 02:00:00+01:00\",\n ],\n freq=\"H\",\n )\n .tz_convert(\"UTC\")\n .tz_convert(\"Europe/Paris\")\n )\n\n expected = pd.DataFrame(\n [[1, 1], [2, 2], [3, 3]], index=exp_idx, columns=[\"a\", \"b\"]\n )\n\n tm.assert_frame_equal(result, expected)\n\n idx3 = pd.date_range(\"2011-01-01\", periods=3, freq=\"H\", tz=\"Asia/Tokyo\")\n df3 = pd.DataFrame({\"b\": [1, 2, 3]}, index=idx3)\n result = pd.concat([df1, df3], axis=1)\n\n exp_idx = DatetimeIndex(\n [\n \"2010-12-31 15:00:00+00:00\",\n \"2010-12-31 16:00:00+00:00\",\n \"2010-12-31 17:00:00+00:00\",\n \"2010-12-31 23:00:00+00:00\",\n \"2011-01-01 00:00:00+00:00\",\n \"2011-01-01 01:00:00+00:00\",\n ]\n )\n\n expected = pd.DataFrame(\n [\n [np.nan, 1],\n [np.nan, 2],\n [np.nan, 3],\n [1, np.nan],\n [2, np.nan],\n [3, np.nan],\n ],\n index=exp_idx,\n columns=[\"a\", \"b\"],\n )\n\n tm.assert_frame_equal(result, expected)\n\n # GH 13783: Concat after resample\n result = pd.concat(\n [df1.resample(\"H\").mean(), df2.resample(\"H\").mean()], sort=True\n )\n expected = pd.DataFrame(\n {\"a\": [1, 2, 3] + [np.nan] * 3, \"b\": [np.nan] * 3 + [1, 2, 3]},\n index=idx1.append(idx1),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_concat_different_extension_dtypes_upcasts(self):\n a = pd.Series(pd.core.arrays.integer_array([1, 2]))\n b = pd.Series(to_decimal([1, 2]))\n\n result = pd.concat([a, b], ignore_index=True)\n expected = pd.Series([1, 2, Decimal(1), Decimal(2)], dtype=object)\n tm.assert_series_equal(result, expected)\n\n def test_concat_odered_dict(self):\n # GH 21510\n expected = pd.concat(\n [pd.Series(range(3)), pd.Series(range(4))], keys=[\"First\", \"Another\"]\n )\n result = pd.concat(\n OrderedDict(\n [(\"First\", pd.Series(range(3))), (\"Another\", pd.Series(range(4)))]\n )\n )\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"pdt\", [pd.Series, pd.DataFrame])\[email protected](\"dt\", np.sctypes[\"float\"])\ndef test_concat_no_unnecessary_upcast(dt, pdt):\n # GH 13247\n dims = pdt(dtype=object).ndim\n\n dfs = [\n pdt(np.array([1], dtype=dt, ndmin=dims)),\n pdt(np.array([np.nan], dtype=dt, ndmin=dims)),\n pdt(np.array([5], dtype=dt, ndmin=dims)),\n ]\n x = pd.concat(dfs)\n assert x.values.dtype == dt\n\n\[email protected](\"pdt\", [create_series_with_explicit_dtype, pd.DataFrame])\[email protected](\"dt\", np.sctypes[\"int\"])\ndef test_concat_will_upcast(dt, pdt):\n with catch_warnings(record=True):\n dims = pdt().ndim\n dfs = [\n pdt(np.array([1], dtype=dt, ndmin=dims)),\n pdt(np.array([np.nan], ndmin=dims)),\n pdt(np.array([5], dtype=dt, ndmin=dims)),\n ]\n x = pd.concat(dfs)\n assert x.values.dtype == \"float64\"\n\n\ndef test_concat_empty_and_non_empty_frame_regression():\n # GH 18178 regression test\n df1 = pd.DataFrame({\"foo\": [1]})\n df2 = pd.DataFrame({\"foo\": []})\n expected = pd.DataFrame({\"foo\": [1.0]})\n result = pd.concat([df1, df2])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_concat_empty_and_non_empty_series_regression():\n # GH 18187 regression test\n s1 = pd.Series([1])\n s2 = pd.Series([], dtype=object)\n\n expected = s1\n result = pd.concat([s1, s2])\n tm.assert_series_equal(result, expected)\n\n\ndef test_concat_sorts_columns(sort):\n # GH-4588\n df1 = pd.DataFrame({\"a\": [1, 2], \"b\": [1, 2]}, columns=[\"b\", \"a\"])\n df2 = pd.DataFrame({\"a\": [3, 4], \"c\": [5, 6]})\n\n # for sort=True/None\n expected = pd.DataFrame(\n {\"a\": [1, 2, 3, 4], \"b\": [1, 2, None, None], \"c\": [None, None, 5, 6]},\n columns=[\"a\", \"b\", \"c\"],\n )\n\n if sort is False:\n expected = expected[[\"b\", \"a\", \"c\"]]\n\n # default\n with tm.assert_produces_warning(None):\n result = pd.concat([df1, df2], ignore_index=True, sort=sort)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_concat_sorts_index(sort):\n df1 = pd.DataFrame({\"a\": [1, 2, 3]}, index=[\"c\", \"a\", \"b\"])\n df2 = pd.DataFrame({\"b\": [1, 2]}, index=[\"a\", \"b\"])\n\n # For True/None\n expected = pd.DataFrame(\n {\"a\": [2, 3, 1], \"b\": [1, 2, None]}, index=[\"a\", \"b\", \"c\"], columns=[\"a\", \"b\"]\n )\n if sort is False:\n expected = expected.loc[[\"c\", \"a\", \"b\"]]\n\n # Warn and sort by default\n with tm.assert_produces_warning(None):\n result = pd.concat([df1, df2], axis=1, sort=sort)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_concat_inner_sort(sort):\n # https://github.com/pandas-dev/pandas/pull/20613\n df1 = pd.DataFrame({\"a\": [1, 2], \"b\": [1, 2], \"c\": [1, 2]}, columns=[\"b\", \"a\", \"c\"])\n df2 = pd.DataFrame({\"a\": [1, 2], \"b\": [3, 4]}, index=[3, 4])\n\n with tm.assert_produces_warning(None):\n # unset sort should *not* warn for inner join\n # since that never sorted\n result = pd.concat([df1, df2], sort=sort, join=\"inner\", ignore_index=True)\n\n expected = pd.DataFrame({\"b\": [1, 2, 3, 4], \"a\": [1, 2, 1, 2]}, columns=[\"b\", \"a\"])\n if sort is True:\n expected = expected[[\"a\", \"b\"]]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_concat_aligned_sort():\n # GH-4588\n df = pd.DataFrame({\"c\": [1, 2], \"b\": [3, 4], \"a\": [5, 6]}, columns=[\"c\", \"b\", \"a\"])\n result = pd.concat([df, df], sort=True, ignore_index=True)\n expected = pd.DataFrame(\n {\"a\": [5, 6, 5, 6], \"b\": [3, 4, 3, 4], \"c\": [1, 2, 1, 2]},\n columns=[\"a\", \"b\", \"c\"],\n )\n tm.assert_frame_equal(result, expected)\n\n result = pd.concat([df, df[[\"c\", \"b\"]]], join=\"inner\", sort=True, ignore_index=True)\n expected = expected[[\"b\", \"c\"]]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_concat_aligned_sort_does_not_raise():\n # GH-4588\n # We catch TypeErrors from sorting internally and do not re-raise.\n df = pd.DataFrame({1: [1, 2], \"a\": [3, 4]}, columns=[1, \"a\"])\n expected = pd.DataFrame({1: [1, 2, 1, 2], \"a\": [3, 4, 3, 4]}, columns=[1, \"a\"])\n result = pd.concat([df, df], ignore_index=True, sort=True)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"s1name,s2name\", [(np.int64(190), (43, 0)), (190, (43, 0))])\ndef test_concat_series_name_npscalar_tuple(s1name, s2name):\n # GH21015\n s1 = pd.Series({\"a\": 1, \"b\": 2}, name=s1name)\n s2 = pd.Series({\"c\": 5, \"d\": 6}, name=s2name)\n result = pd.concat([s1, s2])\n expected = pd.Series({\"a\": 1, \"b\": 2, \"c\": 5, \"d\": 6})\n tm.assert_series_equal(result, expected)\n\n\ndef test_concat_categorical_tz():\n # GH-23816\n a = pd.Series(pd.date_range(\"2017-01-01\", periods=2, tz=\"US/Pacific\"))\n b = pd.Series([\"a\", \"b\"], dtype=\"category\")\n result = pd.concat([a, b], ignore_index=True)\n expected = pd.Series(\n [\n pd.Timestamp(\"2017-01-01\", tz=\"US/Pacific\"),\n pd.Timestamp(\"2017-01-02\", tz=\"US/Pacific\"),\n \"a\",\n \"b\",\n ]\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_concat_categorical_unchanged():\n # GH-12007\n # test fix for when concat on categorical and float\n # coerces dtype categorical -> float\n df = pd.DataFrame(pd.Series([\"a\", \"b\", \"c\"], dtype=\"category\", name=\"A\"))\n ser = pd.Series([0, 1, 2], index=[0, 1, 3], name=\"B\")\n result = pd.concat([df, ser], axis=1)\n expected = pd.DataFrame(\n {\n \"A\": pd.Series([\"a\", \"b\", \"c\", np.nan], dtype=\"category\"),\n \"B\": pd.Series([0, 1, np.nan, 2], dtype=\"float\"),\n }\n )\n tm.assert_equal(result, expected)\n\n\ndef test_concat_datetimeindex_freq():\n # GH 3232\n # Monotonic index result\n dr = pd.date_range(\"01-Jan-2013\", periods=100, freq=\"50L\", tz=\"UTC\")\n data = list(range(100))\n expected = pd.DataFrame(data, index=dr)\n result = pd.concat([expected[:50], expected[50:]])\n tm.assert_frame_equal(result, expected)\n\n # Non-monotonic index result\n result = pd.concat([expected[50:], expected[:50]])\n expected = pd.DataFrame(data[50:] + data[:50], index=dr[50:].append(dr[:50]))\n expected.index._data.freq = None\n tm.assert_frame_equal(result, expected)\n\n\ndef test_concat_empty_df_object_dtype():\n # GH 9149\n df_1 = pd.DataFrame({\"Row\": [0, 1, 1], \"EmptyCol\": np.nan, \"NumberCol\": [1, 2, 3]})\n df_2 = pd.DataFrame(columns=df_1.columns)\n result = pd.concat([df_1, df_2], axis=0)\n expected = df_1.astype(object)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_concat_sparse():\n # GH 23557\n a = pd.Series(SparseArray([0, 1, 2]))\n expected = pd.DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype(\n pd.SparseDtype(np.int64, 0)\n )\n result = pd.concat([a, a], axis=1)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_concat_dense_sparse():\n # GH 30668\n a = pd.Series(pd.arrays.SparseArray([1, None]), dtype=float)\n b = pd.Series([1], dtype=float)\n expected = pd.Series(data=[1, None, 1], index=[0, 1, 0]).astype(\n pd.SparseDtype(np.float64, None)\n )\n result = pd.concat([a, b], axis=0)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"test_series\", [True, False])\ndef test_concat_copy_index(test_series, axis):\n # GH 29879\n if test_series:\n ser = Series([1, 2])\n comb = concat([ser, ser], axis=axis, copy=True)\n assert comb.index is not ser.index\n else:\n df = DataFrame([[1, 2], [3, 4]], columns=[\"a\", \"b\"])\n comb = concat([df, df], axis=axis, copy=True)\n assert comb.index is not df.index\n assert comb.columns is not df.columns\n\n\ndef test_concat_multiindex_datetime_object_index():\n # https://github.com/pandas-dev/pandas/issues/11058\n s = Series(\n [\"a\", \"b\"],\n index=MultiIndex.from_arrays(\n [[1, 2], Index([dt.date(2013, 1, 1), dt.date(2014, 1, 1)], dtype=\"object\")],\n names=[\"first\", \"second\"],\n ),\n )\n s2 = Series(\n [\"a\", \"b\"],\n index=MultiIndex.from_arrays(\n [[1, 2], Index([dt.date(2013, 1, 1), dt.date(2015, 1, 1)], dtype=\"object\")],\n names=[\"first\", \"second\"],\n ),\n )\n expected = DataFrame(\n [[\"a\", \"a\"], [\"b\", np.nan], [np.nan, \"b\"]],\n index=MultiIndex.from_arrays(\n [\n [1, 2, 2],\n DatetimeIndex(\n [\"2013-01-01\", \"2014-01-01\", \"2015-01-01\"],\n dtype=\"datetime64[ns]\",\n freq=None,\n ),\n ],\n names=[\"first\", \"second\"],\n ),\n )\n result = concat([s, s2], axis=1)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"keys\", [[\"e\", \"f\", \"f\"], [\"f\", \"e\", \"f\"]])\ndef test_duplicate_keys(keys):\n # GH 33654\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n s1 = Series([7, 8, 9], name=\"c\")\n s2 = Series([10, 11, 12], name=\"d\")\n result = concat([df, s1, s2], axis=1, keys=keys)\n expected_values = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]\n expected_columns = pd.MultiIndex.from_tuples(\n [(keys[0], \"a\"), (keys[0], \"b\"), (keys[1], \"c\"), (keys[2], \"d\")]\n )\n expected = DataFrame(expected_values, columns=expected_columns)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"obj\",\n [\n tm.SubclassedDataFrame({\"A\": np.arange(0, 10)}),\n tm.SubclassedSeries(np.arange(0, 10), name=\"A\"),\n ],\n)\ndef test_concat_preserves_subclass(obj):\n # GH28330 -- preserve subclass\n\n result = concat([obj, obj])\n assert isinstance(result, type(obj))\n" ]
[ [ "pandas._testing.assert_almost_equal", "pandas.Series", "pandas.PeriodIndex", "pandas.RangeIndex", "pandas.tests.extension.decimal.to_decimal", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "numpy.concatenate", "pandas.core.indexes.datetimes.date_range", "numpy.random.randn", "pandas.core.arrays.SparseArray", "pandas.isna", "pandas._testing.assert_frame_equal", "numpy.random.randint", "numpy.arange", "pandas._testing.makeTimeSeries", "pandas.Index", "pandas.DatetimeIndex", "pandas._testing.makeCustomDataframe", "pandas.core.arrays.integer_array", "pandas._testing.assert_series_equal", "numpy.repeat", "numpy.zeros", "pandas._testing.assert_index_equal", "pandas.concat", "pandas._testing.assert_produces_warning", "pandas.MultiIndex", "pandas.Categorical", "pandas.Timedelta", "numpy.int64", "pandas.MultiIndex.from_product", "numpy.random.rand", "pandas.date_range", "pandas.SparseDtype", "numpy.array", "pandas.CategoricalIndex", "pandas._testing.assert_equal", "pandas.TimedeltaIndex", "pandas.arrays.SparseArray", "numpy.tile", "pandas.MultiIndex.from_arrays", "numpy.ones", "pandas.IntervalIndex.from_breaks", "pandas.Period", "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dotrungkien/face_recognition
[ "52c552c4f73850e62db88d0dc7271d73e4150180" ]
[ "cifar10_input.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Routine for decoding the CIFAR-10 binary file format.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\n# Process images of this size. Note that this differs from the original CIFAR\n# image size of 32 x 32. If one alters this number, then the entire model\n# architecture will change and any model would need to be retrained.\nIMAGE_SIZE = 24\n\n# Global constants describing the CIFAR-10 data set.\nNUM_CLASSES = 27\n# NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000\n# NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000\n\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10\n\n\ndef read_cifar10(filename_queue):\n \"\"\"Reads and parses examples from CIFAR10 data files.\n Recommendation: if you want N-way read parallelism, call this function\n N times. This will give you N independent Readers reading different\n files & positions within those files, which will give better mixing of\n examples.\n Args:\n filename_queue: A queue of strings with the filenames to read from.\n Returns:\n An object representing a single example, with the following fields:\n height: number of rows in the result (32)\n width: number of columns in the result (32)\n depth: number of color channels in the result (3)\n key: a scalar string Tensor describing the filename & record number\n for this example.\n label: an int32 Tensor with the label in the range 0..9.\n uint8image: a [height, width, depth] uint8 Tensor with the image data\n \"\"\"\n\n class CIFAR10Record(object):\n pass\n result = CIFAR10Record()\n\n # Dimensions of the images in the CIFAR-10 dataset.\n # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the\n # input format.\n label_bytes = 1 # 2 for CIFAR-100\n result.height = 32\n result.width = 32\n result.depth = 3\n image_bytes = result.height * result.width * result.depth\n # Every record consists of a label followed by the image, with a\n # fixed number of bytes for each.\n record_bytes = label_bytes + image_bytes\n\n # Read a record, getting filenames from the filename_queue. No\n # header or footer in the CIFAR-10 format, so we leave header_bytes\n # and footer_bytes at their default of 0.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n result.key, value = reader.read(filename_queue)\n\n # Convert from a string to a vector of uint8 that is record_bytes long.\n record_bytes = tf.decode_raw(value, tf.uint8)\n\n # The first bytes represent the label, which we convert from uint8->int32.\n # result.label = tf.cast(\n # tf.slice(record_bytes, [0], [label_bytes]), tf.int32)\n result.label = tf.cast(tf.slice(record_bytes, [0], [label_bytes]), tf.int32)\n\n # The remaining bytes after the label represent the image, which we reshape\n # from [depth * height * width] to [depth, height, width].\n # depth_major = tf.reshape(tf.slice(record_bytes, [label_bytes], [label_bytes + image_bytes]), [result.depth, result.height, result.width])\n depth_major = tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]), [result.depth, result.height, result.width])\n\n # Convert from [depth, height, width] to [height, width, depth].\n result.uint8image = tf.transpose(depth_major, [1, 2, 0])\n\n return result\n\n\ndef _generate_image_and_label_batch(image, label, min_queue_examples,\n batch_size, shuffle):\n \"\"\"Construct a queued batch of images and labels.\n Args:\n image: 3-D Tensor of [height, width, 3] of type.float32.\n label: 1-D Tensor of type.int32\n min_queue_examples: int32, minimum number of samples to retain\n in the queue that provides of batches of examples.\n batch_size: Number of images per batch.\n shuffle: boolean indicating whether to use a shuffling queue.\n Returns:\n images: Images. 4D tensor of [batch_size, height, width, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n # Create a queue that shuffles the examples, and then\n # read 'batch_size' images + labels from the example queue.\n num_preprocess_threads = 16\n if shuffle:\n images, label_batch = tf.train.shuffle_batch(\n [image, label],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size,\n min_after_dequeue=min_queue_examples)\n else:\n images, label_batch = tf.train.batch(\n [image, label],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size)\n\n # Display the training images in the visualizer.\n tf.summary.image('images', images)\n\n return images, tf.reshape(label_batch, [batch_size])\n\n\ndef distorted_inputs(data_dir, batch_size):\n \"\"\"Construct distorted input for CIFAR training using the Reader ops.\n Args:\n data_dir: Path to the CIFAR-10 data directory.\n batch_size: Number of images per batch.\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n #filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)\n # for i in xrange(1, 6)]\n filenames = [os.path.join(data_dir, 'train_batch.bin')]\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n\n # Image processing for training the network. Note the many random\n # distortions applied to the image.\n\n # Randomly crop a [height, width] section of the image.\n distorted_image = tf.random_crop(reshaped_image, [height, width, 3])\n\n # Randomly flip the image horizontally.\n distorted_image = tf.image.random_flip_left_right(distorted_image)\n\n # Because these operations are not commutative, consider randomizing\n # the order their operation.\n distorted_image = tf.image.random_brightness(distorted_image,\n max_delta=63)\n distorted_image = tf.image.random_contrast(distorted_image,\n lower=0.2, upper=1.8)\n\n # Subtract off the mean and divide by the variance of the pixels.\n float_image = tf.image.per_image_standardization(distorted_image)\n\n # Set the shapes of tensors.\n float_image.set_shape([height, width, 3])\n read_input.label.set_shape([1])\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *\n min_fraction_of_examples_in_queue)\n print ('Filling queue with %d CIFAR images before starting to train. '\n 'This will take a few minutes.' % min_queue_examples)\n\n # Generate a batch of images and labels by building up a queue of examples.\n return _generate_image_and_label_batch(float_image, read_input.label,\n min_queue_examples, batch_size,\n shuffle=True)\n\n\ndef inputs(eval_data, data_dir, batch_size):\n \"\"\"Construct input for CIFAR evaluation using the Reader ops.\n Args:\n eval_data: bool, indicating if one should use the train or eval data set.\n data_dir: Path to the CIFAR-10 data directory.\n batch_size: Number of images per batch.\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n if not eval_data:\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)\n for i in xrange(1, 6)]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\n else:\n filenames = [os.path.join(data_dir, 'test_batch.bin')]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n\n # Image processing for evaluation.\n # Crop the central [height, width] of the image.\n resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,\n height, width)\n\n # Subtract off the mean and divide by the variance of the pixels.\n float_image = tf.image.per_image_standardization(resized_image)\n\n # Set the shapes of tensors.\n float_image.set_shape([height, width, 3])\n read_input.label.set_shape([1])\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(num_examples_per_epoch *\n min_fraction_of_examples_in_queue)\n\n # Generate a batch of images and labels by building up a queue of examples.\n return _generate_image_and_label_batch(float_image, read_input.label,\n min_queue_examples, batch_size,\n shuffle=False)\n\n" ]
[ [ "tensorflow.image.resize_image_with_crop_or_pad", "tensorflow.image.random_brightness", "tensorflow.transpose", "tensorflow.image.random_flip_left_right", "tensorflow.image.random_contrast", "tensorflow.slice", "tensorflow.summary.image", "tensorflow.decode_raw", "tensorflow.cast", "tensorflow.reshape", "tensorflow.gfile.Exists", "tensorflow.FixedLengthRecordReader", "tensorflow.random_crop", "tensorflow.train.string_input_producer", "tensorflow.image.per_image_standardization", "tensorflow.train.batch", "tensorflow.train.shuffle_batch" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
mo6zes/Reproducing-Deep-Fair-Clustering
[ "91f915436821eb05cdd021d3e9eb050a248fe993" ]
[ "dfc.py" ]
[ "import torch\nimport wandb\nfrom sklearn.metrics import normalized_mutual_info_score\nfrom torch import nn\nfrom torch.nn import Parameter\n\nfrom adverserial import AdversarialNetwork, adv_loss\nfrom dataloader import mnist_usps\nfrom eval import predict, cluster_accuracy, balance, tsne_visualization\nfrom utils import set_seed, AverageMeter, aff, target_distribution, inv_lr_scheduler\nimport matplotlib.pyplot as plt\n\n\nclass ClusterAssignment(nn.Module):\n def __init__(self, cluster_number, embedding_dimension, alpha, cluster_centers):\n \"\"\"\n Module to handle the soft assignment, for a description see in 3.1.1. in Xie/Girshick/Farhadi,\n where the Student's t-distribution is used measure similarity between feature vector and each\n cluster centroid.\n\n Args:\n cluster_number: number of clusters\n embedding_dimension: embedding dimension of feature vectors\n alpha: representing the degrees of freedom in the t-distribution, default 1.0\n cluster_centers: clusters centers to initialise, if None then use Xavier uniform\n \"\"\"\n\n super(ClusterAssignment, self).__init__()\n self.embedding_dimension = embedding_dimension\n self.cluster_number = cluster_number\n self.alpha = alpha\n if cluster_centers is None:\n initial_cluster_centers = torch.zeros(\n self.cluster_number,\n self.embedding_dimension,\n dtype=torch.float\n )\n nn.init.xavier_uniform_(initial_cluster_centers)\n else:\n initial_cluster_centers = cluster_centers\n self.cluster_centers = Parameter(initial_cluster_centers)\n\n def forward(self, batch):\n \"\"\"\n Compute the soft assignment for a batch of feature vectors, returning a batch of assignments\n for each cluster.\n Args:\n batch (FloatTensor): [batch size, embedding dimension]\n\n Returns:\n FloatTensor [batch size, number of clusters]\n \"\"\"\n norm_squared = torch.sum((batch.unsqueeze(1) - self.cluster_centers) ** 2, 2)\n numerator = 1.0 / (1.0 + (norm_squared / self.alpha))\n\n return numerator / torch.sum(numerator, dim=1, keepdim=True)\n\n\nclass DFC(nn.Module):\n def __init__(self, cluster_number, hidden_dimension, alpha=1):\n \"\"\"\n Module which holds all the moving parts of the DEC algorithm, as described in\n Xie/Girshick/Farhadi; this includes the AutoEncoder stage and the ClusterAssignment stage.\n Args:\n cluster_number: number of clusters\n hidden_dimension: hidden dimension, output of the encoder\n alpha: parameter representing the degrees of freedom in the t-distribution, default = 1\n \"\"\"\n\n super(DFC, self).__init__()\n self.hidden_dimension = hidden_dimension\n self.cluster_number = cluster_number\n self.alpha = alpha\n self.assignment = ClusterAssignment(cluster_number, self.hidden_dimension, alpha, cluster_centers=None)\n\n def forward(self, batch):\n \"\"\"\n Compute the cluster assignment using the ClusterAssignment after running the batch\n through the encoder part of the associated AutoEncoder module.\n Args:\n batch (FloatTensor): [batch size, embedding dimension]\n Returns:\n FloatTensor: [batch_size, number of clusters]\n \"\"\"\n return self.assignment(batch)\n\n def get_parameters(self):\n return [{\"params\": self.parameters(), \"lr_mult\": 1}]\n\n\ndef train(args, dataloader_list, encoder, encoder_group_0=None, encoder_group_1=None, dfc_group_0=None,\n dfc_group_1=None, device='cpu', centers=None, get_loss_trade_off=lambda step: (10, 10, 10), save_name='DFC'):\n \"\"\"Trains DFC and optionally the critic,\n\n automatically saves when finished training\n\n Args:\n args: Namespace object which contains config set from argument parser\n {\n lr,\n seed,\n iters,\n log_dir,\n test_interval,\n adv_multiplier,\n dfc_hidden_dim\n }\n dataloader_list (list): this list may consist of only 1 dataloader or multiple\n encoder: Encoder to use\n encoder_group_0: Optional pre-trained golden standard model\n encoder_group_1: Optional pre-trained golden standard model\n dfc_group_0: Optional cluster centers file obtained with encoder_group_0\n dfc_group_1: Optional cluster centers file obtained with encoder_group_1\n device: Device configuration\n centers: Initial centers clusters if available\n get_loss_trade_off: Proportional importance of individual loss functions\n save_name: Prefix for save files\n Returns:\n DFC: A trained DFC model\n \"\"\"\n\n set_seed(args.seed)\n if args.half_tensor:\n torch.set_default_tensor_type('torch.HalfTensor')\n\n dfc = DFC(cluster_number=args.cluster_number, hidden_dimension=args.dfc_hidden_dim).to(device)\n wandb.watch(dfc)\n\n critic = AdversarialNetwork(in_feature=args.cluster_number,\n hidden_size=32,\n max_iter=args.iters,\n lr_mult=args.adv_multiplier).to(device)\n wandb.watch(critic)\n\n if not (centers is None):\n cluster_centers = centers.clone().detach().requires_grad_(True).to(device)\n with torch.no_grad():\n print(\"loading clustering centers...\")\n dfc.state_dict()['assignment.cluster_centers'].copy_(cluster_centers)\n\n encoder_param = encoder.get_parameters() if args.encoder_type == 'vae' else [\n {\"params\": encoder.parameters(), \"lr_mult\": 1}]\n optimizer = torch.optim.Adam(dfc.get_parameters() + encoder_param + critic.get_parameters(), lr=args.dec_lr,\n weight_decay=5e-4)\n\n criterion_c = nn.KLDivLoss(reduction=\"sum\")\n criterion_p = nn.MSELoss(reduction=\"sum\")\n C_LOSS = AverageMeter()\n F_LOSS = AverageMeter()\n P_LOSS = AverageMeter()\n\n partition_loss_enabled = True\n if not encoder_group_0 or not encoder_group_1 or not dfc_group_0 or not dfc_group_1:\n print(\"Missing Golden Standard models, switching to DEC mode instead of DFC.\")\n partition_loss_enabled = False\n\n if partition_loss_enabled:\n encoder_group_0.eval(), encoder_group_1.eval()\n dfc_group_0.eval(), dfc_group_1.eval()\n\n print(\"Start training\")\n assert 0 < len(dataloader_list) < 3\n len_image_0 = len(dataloader_list[0])\n len_image_1 = len(dataloader_list[1]) if len(dataloader_list) == 2 else None\n for step in range(args.iters):\n encoder.train()\n dfc.train()\n\n if step % len_image_0 == 0:\n iter_image_0 = iter(dataloader_list[0])\n if len_image_1 and step % len_image_1 == 0:\n iter_image_1 = iter(dataloader_list[1])\n\n image_0, _ = iter_image_0.__next__()\n image_0 = image_0.to(device)\n if not (len_image_1 is None):\n image_1, _ = iter_image_1.__next__()\n image_1 = image_1.to(device)\n image = torch.cat((image_0, image_1), dim=0)\n else:\n image_1 = None\n image = torch.cat((image_0,), dim=0)\n\n if args.encoder_type == 'vae':\n z, _, _ = encoder(image)\n elif args.encoder_type == 'resnet50':\n z = encoder(image)\n\n else:\n raise Exception('Wrong encoder type, how did you get this far in running the code?')\n output = dfc(z)\n features_enc_0 = encoder_group_0(image_0)[0] if args.encoder_type == 'vae' else encoder_group_0(image_0)\n predict_0 = dfc_group_0(features_enc_0)\n features_enc_1 = encoder_group_1(image_1)[0] if args.encoder_type == 'vae' else encoder_group_1(image_1)\n predict_1 = dfc_group_1(features_enc_1) if not (image_1 is None) else None\n\n output_0, output_1 = output[0:args.bs, :], output[args.bs:args.bs * 2, :] if not (predict_1 is None) else None\n target_0, target_1 = target_distribution(output_0).detach(), target_distribution(output_1).detach() if not (\n predict_1 is None) else None\n\n # Equaition (5) in the paper\n # output_0 and output_1 are probability distribution P of samples being assinged to a class in k\n # target_0 and target_1 are auxiliary distribuion Q calculated based on P. Eqation (4) in the paper\n if not (output_1 is None):\n clustering_loss = 0.5 * criterion_c(output_0.log(), target_0) + 0.5 * criterion_c(output_1.log(), target_1)\n else:\n clustering_loss = criterion_c(output_0.log(), target_0)\n\n # Equation (2) in the paper\n # output = D(A(F(X)))\n # critic is the distribuition of categorical sensitive subgroup variable G (?)\n if len(dataloader_list) > 1:\n fair_loss, critic_acc = adv_loss(output, critic, device=device)\n else:\n fair_loss, critic_acc = 0, 0\n\n if partition_loss_enabled:\n # Equation (3) in the paper\n # output_0 and output_1 are the output of the pretrained encoder\n # predict_0 and predict_1 are the soft cluster assignments of the DFC.\n # loss is high if the outputs and predictions (and this the cluster structures) differ.\n if not (predict_1 is None):\n partition_loss = 0.5 * criterion_p(aff(output_0), aff(predict_0).detach()) \\\n + 0.5 * criterion_p(aff(output_1), aff(predict_1).detach())\n else:\n partition_loss = criterion_p(aff(output_0), aff(predict_0).detach())\n else:\n partition_loss = 0\n\n loss_trade_off = get_loss_trade_off(step)\n if args.encoder_type == 'resnet50' and args.dataset == 'office_31': # alpha_s\n loss_trade_off = list(loss_trade_off)\n loss_trade_off[1] = ((512 / 128) ** 2) * (31 / 10)\n\n total_loss = loss_trade_off[0] * fair_loss + loss_trade_off[1] * partition_loss + loss_trade_off[\n 2] * clustering_loss\n\n optimizer = inv_lr_scheduler(optimizer, args.lr, step, args.iters)\n optimizer.zero_grad()\n total_loss.backward()\n optimizer.step()\n\n C_LOSS.update(clustering_loss)\n F_LOSS.update(fair_loss)\n P_LOSS.update(partition_loss)\n\n wandb.log({f\"{save_name} Train C Loss Avg\": C_LOSS.avg, f\"{save_name} Train F Loss Avg\": F_LOSS.avg,\n f\"{save_name} Train P Loss Avg\": P_LOSS.avg, f\"{save_name} step\": step,\n f\"{save_name} Critic ACC\": critic_acc})\n wandb.log({f\"{save_name} Train C Loss Cur\": C_LOSS.val, f\"{save_name} Train F Loss Cur\": F_LOSS.val,\n f\"{save_name} Train P Loss Cur\": P_LOSS.val, f\"{save_name} step\": step})\n\n if step % args.test_interval == args.test_interval - 1 or step == 0:\n\n predicted, labels = predict(dataloader_list, encoder, dfc, device=device, encoder_type=args.encoder_type)\n predicted, labels = predicted.cpu().numpy(), labels.numpy()\n _, accuracy = cluster_accuracy(predicted, labels, args.cluster_number)\n nmi = normalized_mutual_info_score(labels, predicted, average_method=\"arithmetic\")\n bal, en_0, en_1 = balance(predicted, len_image_0, k=args.cluster_number)\n\n wandb.log(\n {f\"{save_name} Train Accuracy\": accuracy, f\"{save_name} Train NMI\": nmi, f\"{save_name} Train Bal\": bal,\n f\"{save_name} Train Entropy 0\": en_0,\n f\"{save_name} Train Entropy 1\": en_1, f\"{save_name} step\": step})\n\n print(\"Step:[{:03d}/{:03d}] \"\n \"Acc:{:2.3f};\"\n \"NMI:{:1.3f};\"\n \"Bal:{:1.3f};\"\n \"En:{:1.3f}/{:1.3f};\"\n \"Clustering.loss:{C_Loss.avg:3.2f};\"\n \"Fairness.loss:{F_Loss.avg:3.2f};\"\n \"Partition.loss:{P_Loss.avg:3.2f};\".format(step + 1, args.iters, accuracy, nmi, bal, en_0,\n en_1, C_Loss=C_LOSS, F_Loss=F_LOSS, P_Loss=P_LOSS))\n\n # log tsne visualisation\n if args.encoder_type == \"vae\":\n tsne_img = tsne_visualization(dataloader_list, encoder, args.cluster_number,\n encoder_type=args.encoder_type, device=device)\n\n if not (tsne_img is None):\n wandb.log({f\"{save_name} TSNE\": plt, f\"{save_name} step\": step})\n\n torch.save(dfc.state_dict(), f'{args.log_dir}DFC_{save_name}.pth')\n\n if len(dataloader_list) > 1:\n torch.save(critic.state_dict(), f'{args.log_dir}CRITIC_{save_name}.pth')\n\n return dfc\n" ]
[ [ "torch.set_default_tensor_type", "torch.nn.Parameter", "torch.nn.KLDivLoss", "torch.zeros", "torch.cat", "torch.sum", "torch.no_grad", "torch.nn.init.xavier_uniform_", "sklearn.metrics.normalized_mutual_info_score", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Hoa-Lab/audiogram
[ "91fe29ebb1d460ef97bbb8014bf529ff443a134c" ]
[ "b03_model/a04_dl_02_score.py" ]
[ "import pandas as pd\nimport numpy as np\nimport pickle\nimport tensorflow.keras as keras\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom pathlib import Path\nfrom sklearn.metrics import confusion_matrix\n\n#--------------------------------------------------------\nfd_out='./out/a04_dl_02_score'\nfd_mod='./out/a04_dl_01_mod'\nfd_in='./out/a04_dl_00_pp'\n\n#-------------------------------------------------------\nPath(fd_out).mkdir(exist_ok=True, parents=True)\n\n#-------------------------------------------------------\ndef get_score(X, y, model):\n\t#pred\n\ty_pred=model.predict(X)\n\ty_pred=np.argmax(y_pred, axis=1)\n\tdf_mtx=pd.DataFrame(confusion_matrix(y, y_pred, labels=None), columns=['None_p', 'Tumor_p'], index=['None_t', 'Tumor_t'])\n\t#score\n\tf1=f1_score(y, y_pred)\n\taccu=accuracy_score(y, y_pred)\n\treturn df_mtx, f1, accu\n\n####################################################################\n#train\nX=np.load(f'{fd_in}/X_train.npy')\ny=np.load(f'{fd_in}/y_train.npy')\n\nl_data=[]\nfor i in range(3):\n\t#pp\n\tmodel=keras.models.load_model(f'{fd_mod}/mod_{i}')\n\t#get score\n\tdf, f1, accu=get_score(X, y, model)\n\tdf.to_csv(f'{fd_out}/train_{i}.csv')\n\tl_data.append((f'dl-{i}', f1, accu))\ndf=pd.DataFrame(l_data, columns=['mod', 'f1', 'accu'])\t\ndf.to_csv(f'{fd_out}/score_train.csv', index=False)\n\n#---------------------------------------------------------\n#test\nX=np.load(f'{fd_in}/X_test.npy')\ny=np.load(f'{fd_in}/y_test.npy')\n\nl_data=[]\nfor i in range(3):\n\t#pp\n\tmodel=keras.models.load_model(f'{fd_mod}/mod_{i}')\n\t#get score\n\tdf, f1, accu=get_score(X, y, model)\n\tdf.to_csv(f'{fd_out}/test_{i}.csv')\n\tl_data.append((f'dl-{i}', f1, accu))\ndf=pd.DataFrame(l_data, columns=['mod', 'f1', 'accu'])\t\ndf.to_csv(f'{fd_out}/score_test.csv', index=False)\n\n\n\n\n\n\n\n\n" ]
[ [ "tensorflow.keras.models.load_model", "sklearn.metrics.confusion_matrix", "pandas.DataFrame", "numpy.argmax", "numpy.load", "sklearn.metrics.f1_score", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
ac2sherry/DAT210x-ac2sherry
[ "c8ded1109dda5ac59b07f1608e2cb7e7d9396ac5" ]
[ "Module6/assignment1.py" ]
[ "import matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\nimport numpy as np \nimport time\n\n\n# \n# INFO: Your Parameters.\n# You can adjust them after completing the lab\nC = 1\nkernel = 'linear'\niterations = 5000 # TODO: Change to 200000 once you get to Question#2\n\n#\n# INFO: You can set this to false if you want to\n# draw the full square matrix\nFAST_DRAW = True\n\n\n\ndef drawPlots(model, X_train, X_test, y_train, y_test, wintitle='Figure 1'):\n # INFO: A convenience function for you\n # You can use this to break any higher-dimensional space down\n # And view cross sections of it.\n\n # If this line throws an error, use plt.style.use('ggplot') instead\n mpl.style.use('ggplot') # Look Pretty\n\n padding = 3\n resolution = 0.5\n max_2d_score = 0\n\n y_colors = ['#ff0000', '#00ff00', '#0000ff']\n my_cmap = mpl.colors.ListedColormap(['#ffaaaa', '#aaffaa', '#aaaaff'])\n colors = [y_colors[i] for i in y_train]\n num_columns = len(X_train.columns)\n\n fig = plt.figure()\n fig.canvas.set_window_title(wintitle)\n \n cnt = 0\n for col in range(num_columns):\n for row in range(num_columns):\n # Easy out\n if FAST_DRAW and col > row:\n cnt += 1\n continue\n\n ax = plt.subplot(num_columns, num_columns, cnt + 1)\n plt.xticks(())\n plt.yticks(())\n\n # Intersection:\n if col == row:\n plt.text(0.5, 0.5, X_train.columns[row], verticalalignment='center', horizontalalignment='center', fontsize=12)\n cnt += 1\n continue\n\n\n # Only select two features to display, then train the model\n X_train_bag = X_train.ix[:, [row,col]]\n X_test_bag = X_test.ix[:, [row,col]]\n model.fit(X_train_bag, y_train)\n\n # Create a mesh to plot in\n x_min, x_max = X_train_bag.ix[:, 0].min() - padding, X_train_bag.ix[:, 0].max() + padding\n y_min, y_max = X_train_bag.ix[:, 1].min() - padding, X_train_bag.ix[:, 1].max() + padding\n xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution),\n np.arange(y_min, y_max, resolution))\n\n # Plot Boundaries\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n\n # Prepare the contour\n Z = model.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n plt.contourf(xx, yy, Z, cmap=my_cmap, alpha=0.8)\n plt.scatter(X_train_bag.ix[:, 0], X_train_bag.ix[:, 1], c=colors, alpha=0.5)\n\n\n score = round(model.score(X_test_bag, y_test) * 100, 3)\n plt.text(0.5, 0, \"Score: {0}\".format(score), transform = ax.transAxes, horizontalalignment='center', fontsize=8)\n max_2d_score = score if score > max_2d_score else max_2d_score\n\n cnt += 1\n\n print (\"Max 2D Score: \", max_2d_score)\n fig.set_tight_layout(True)\n\n\ndef benchmark(model, X_train, X_test, y_train, y_test, wintitle='Figure 1'):\n print ('\\n\\n' + wintitle + ' Results')\n s = time.time()\n for i in range(iterations):\n #\n # TODO: train the classifier on the training data / labels:\n #\n # .. your code here ..\n model.fit(X_train, y_train)\n print (\"{0} Iterations Training Time: \".format(iterations), time.time() - s )\n\n s = time.time()\n for i in range(iterations):\n #\n # TODO: score the classifier on the testing data / labels:\n #\n # .. your code here ..\n score = model.score(X_test, y_test)\n print (\"{0} Iterations Scoring Time: \".format(iterations), time.time() - s)\n print (\"High-Dimensionality Score: \", round((score*100), 3))\n\n\n\n# \n# TODO: Load up the wheat dataset into dataframe 'X'\n# Verify you did it properly.\n# Indices shouldn't be doubled, nor weird headers...\n#\n# .. your code here ..\nX = pd.read_csv(\"Datasets/wheat.data\", index_col = 'id')\n\n# INFO: An easy way to show which rows have nans in them\nprint (X[pd.isnull(X).any(axis=1)])\n\n\n# \n# TODO: Go ahead and drop any row with a nan\n#\n# .. your code here ..\nX = X.dropna(how = \"any\")\n\n\n# \n# INFO: # In the future, you might try setting the nan values to the\n# mean value of that column, the mean should only be calculated for\n# the specific class rather than across all classes, now that you\n# have the labels\n\n\n#\n# TODO: Copy the labels out of the dset into variable 'y' then Remove\n# them from X. Encode the labels, using the .map() trick we showed\n# you in Module 5 -- canadian:0, kama:1, and rosa:2\n#\n# .. your code here ..\ny = X['wheat_type']\nX = X.drop(['wheat_type'], axis = 1)\ny = y.map({'canadian':0, 'kama':1, 'rosa':2})\n\n# \n# TODO: Split your data into test / train sets\n# Your test size can be 30% with random_state 7.\n# Use variable names: X_train, X_test, y_train, y_test\n#\n# .. your code here ..\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=7)\n\n\n\n#\n# TODO: Create an SVC classifier named svc\n# Use a linear kernel, and set the C value to C\n#\n# .. your code here ..\nfrom sklearn.svm import SVC\nsvc = SVC(kernel = 'linear', C = C)\n\n#\n# TODO: Create an KNeighbors classifier named knn\n# Set the neighbor count to 5\n#\n# .. your code here ..\nfrom sklearn.neighbors import KNeighborsClassifier\nknn = KNeighborsClassifier(n_neighbors = 5)\n\n\n\n\nbenchmark(knn, X_train, X_test, y_train, y_test, 'KNeighbors')\ndrawPlots(knn, X_train, X_test, y_train, y_test, 'KNeighbors')\n\nbenchmark(svc, X_train, X_test, y_train, y_test, 'SVC')\ndrawPlots(svc, X_train, X_test, y_train, y_test, 'SVC')\n\nplt.show()\n\n\n\n#\n# BONUS: After submitting your answers, toy around with\n# gamma, kernel, and C.\n\n\n" ]
[ [ "matplotlib.pyplot.text", "matplotlib.pyplot.yticks", "pandas.read_csv", "matplotlib.pyplot.contourf", "matplotlib.pyplot.scatter", "matplotlib.style.use", "pandas.isnull", "numpy.arange", "sklearn.model_selection.train_test_split", "sklearn.neighbors.KNeighborsClassifier", "matplotlib.pyplot.subplot", "matplotlib.colors.ListedColormap", "sklearn.svm.SVC", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
hammuRawi/DeepField
[ "3b336ed110ff806316f1f6a99b212f99256a6b56" ]
[ "deepfield/datasets/datasets.py" ]
[ "# pylint: disable=too-many-lines\n\"\"\"Dataset wrappers for Fields.\"\"\"\nimport os\nimport pickle\nimport inspect\nimport numpy as np\nimport pandas as pd\nimport torch\n\nfrom torch.utils.data import Dataset\n\nfrom ..field import Field\nfrom ..field.base_component import BaseComponent\nfrom ..field.utils import recursive_insensitive_glob, hasnested, overflow_safe_mean, get_spatial_perf\nfrom .utils import get_config, STATES_KEYWORD\nfrom .transforms import ToNumpy, Normalize, Compose, RemoveBatchDimension, AddBatchDimension, \\\n Transform, NON_NORMALIZED_ATTRS\n\nSEQUENTIAL_ATTRS = ['STATES', 'CONTROL']\nTABLES_WITHOUT_INDEX = ['DENSITY']\nINVALID_VALUE_FILLER = -1\nCONTROL_TO_RESULTS_KW = {'WBHP': 'BHPT'}\n\n\ndef safe_check(comp, state, expected, default=False):\n \"\"\"Check that components's state has expected value or return default if state is not defined.\"\"\"\n try:\n return getattr(comp.state, state) == expected\n except AttributeError:\n return default\n\n\nclass FieldDataset(Dataset): # pylint: disable=too-many-instance-attributes\n \"\"\"Baseclass for dataset of fields loaded with similar configs.\"\"\"\n default_sample_attrs = {\n 'MASKS': ['ACTNUM', 'TIME'],\n 'GRID': [],\n 'ROCK': ['PORO', 'PERMX', 'PERMY', 'PERMZ'],\n 'STATES': ['PRESSURE', 'RS', 'SGAS', 'SOIL', 'SWAT'],\n 'CONTROL': ['BHPT'],\n }\n\n _attrs_sampled_as_dict = ('MASKS', 'GRID', 'TABLES')\n\n def __init__(self, src, sample_attrs=None, fmt=('dat', 'data', 'hdf5'), subset_generator=None,\n unravel_model=None, from_samples=False, allow_change_preloaded=False):\n \"\"\"\n Parameters\n ----------\n src: str, Field, FieldSample or list of Fields or FieldSamples\n Path to a directory containing fields for the dataset or preloaded Fields\n sample_attrs: dict\n Attributes to be represented in samples\n fmt: str or tuple\n Format in which fields are represented\n subset_generator: callable or None\n Function generating subsequences for sequential attrs (states, control)\n Should return array-like objects with timestep indices\n If None, full sequences will be sampled\n unravel_model: bool or None\n Either or not unravel loaded models\n If None, will be inferred from sample_attrs (set to False if 'neighbours' or 'distances' keys are presented)\n from_samples: bool\n If True, tries to load samples from previously dumped dataset (with FieldDataset.dump_samples).\n The sample_attrs will not affect the content of the loaded samples.\n The transforms will still be applied.\n \"\"\"\n # TODO: add possibility to make subsets of timesteps limited to constant control\n super().__init__()\n if isinstance(fmt, str):\n fmt = (fmt, )\n files = []\n self.root_dir = None\n self.preloaded = None\n if isinstance(src, str):\n for f in fmt:\n files += recursive_insensitive_glob(src, pattern='*.%s' % f, return_relative=True)\n self.root_dir = src\n else:\n self.preloaded = np.atleast_1d(src)\n self.fmt = fmt\n self.files = files\n self.transform = None\n self._sample_attrs = None\n self.sample_attrs = sample_attrs if sample_attrs is not None else self.default_sample_attrs\n self.from_samples = from_samples\n self.allow_change_preloaded = allow_change_preloaded\n\n self.config = get_config()\n # TODO make config dependent on the sample attrs\n self.subset_generator = subset_generator\n\n self.mean = None\n self.std = None\n self.min = None\n self.max = None\n\n self.masks_getter_map = {\n 'ACTNUM': self._get_actnum,\n 'WELL_MASK': self._get_well_mask,\n 'NAMED_WELL_MASK': self._get_named_well_mask,\n 'NEIGHBOURS': self._get_neighbours,\n 'INVALID_NEIGHBOURS_MASK': self._get_invalid_neighbours_mask,\n 'TIME': self._get_time,\n 'CF_MASK': self._get_connection_factors,\n 'PERF_MASK': self._get_perforation_mask\n }\n self.grid_getter_map = {\n 'DISTANCES': self._get_distances,\n 'XYZ': self._get_xyz\n }\n self.attrs_getter_map = {\n 'STATES': self._get_states,\n 'ROCK': self._get_rock,\n 'CONTROL': self._get_control\n }\n\n invalid_unravel_attrs = {\n 'MASKS': ['NEIGHBOURS'],\n 'GRID': ['DISTANCES']\n }\n if unravel_model:\n for comp, attrs in invalid_unravel_attrs.items():\n for attr in attrs:\n if hasnested(self.sample_attrs, comp, attr):\n raise ValueError('Can not unravel model and sample %s simultaneously.' % attr)\n if unravel_model is None:\n ravel = False\n for comp, attrs in invalid_unravel_attrs.items():\n for attr in attrs:\n ravel = ravel or hasnested(self.sample_attrs, comp, attr)\n unravel_model = not ravel\n self.unravel_model = unravel_model\n\n def __len__(self):\n if self.preloaded is not None:\n return len(self.preloaded)\n return len(self.files)\n\n def __getitem__(self, idx):\n if self.from_samples:\n sample = self._load_sample(idx)\n else:\n sample = self._get_sample(idx)\n if self.transform:\n sample = self.transform(sample)\n return sample\n\n def _get_sample(self, idx): # pylint: disable=too-many-branches\n \"\"\"Get sample from the dataset\n\n Parameters\n ----------\n idx: int, torch.Tensor\n Index of the field\n\n Returns\n -------\n sample: FieldSample\n \"\"\"\n # TODO time and batch dimensions are transposed\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n if self.subset_generator is not None:\n sequence_subset = list(self.subset_generator())\n if not sequence_subset:\n raise ValueError('subset_generator should not generate empty subsets!')\n else:\n sequence_subset = None\n config = self.config.copy()\n config[STATES_KEYWORD] = {'attrs': self.config[STATES_KEYWORD]['attrs'], 'subset': sequence_subset}\n if 'Aquifers' in config:\n del config['Aquifers']\n if self.preloaded is None:\n model = self._load_model(idx, config)\n else:\n if isinstance(self.preloaded[idx], FieldSample):\n return self.preloaded[idx]\n model = self._get_preloaded(idx)\n\n sample = {}\n getter_kwargs = dict(\n sequence_subset=sequence_subset, fill_invalid_neighbours=INVALID_VALUE_FILLER, neighbouring_radius=1\n )\n\n for comp, attrs in self.sample_attrs.items():\n if comp == 'MASKS':\n sample[comp] = {}\n for attr, mask_getter in self.masks_getter_map.items():\n # FIXME\n if not self.unravel_model and attr.upper() in ('CF_MASK', 'PERF_MASK'):\n continue\n sample[comp][attr] = mask_getter(model, **getter_kwargs)\n elif comp == 'GRID':\n sample[comp] = {}\n for attr in attrs:\n sample[comp][attr] = self.grid_getter_map[attr](model, **getter_kwargs)\n elif comp == 'TABLES':\n sample[comp] = {}\n sample[comp] = self._get_tables(model, attrs)\n elif comp == 'CONTROL':\n res = self.attrs_getter_map[comp](model, attrs, **getter_kwargs)\n sample[comp] = res['control']\n sample['MASKS']['CONTROL_T'] = res['t']\n else:\n sample[comp] = self.attrs_getter_map[comp](model, attrs, **getter_kwargs)\n\n sample = FieldSample(field=model, dataset=self, **sample)\n for key in list(sample.masks.keys()):\n if sample.masks[key] is None:\n del sample.masks[key]\n if not self.unravel_model:\n sample.as_ravel(inplace=True)\n return sample\n\n def _get_preloaded(self, idx):\n \"\"\"Get a field from preloaded.\"\"\"\n model = self.preloaded[idx]\n if self.allow_change_preloaded:\n if model.state.spatial != self.unravel_model:\n if self.unravel_model:\n model.to_spatial()\n else:\n model.ravel()\n if 'CONTROL' in self.sample_attrs:\n if not model.wells.state.all_tracks_complete:\n model.wells.drop_incomplete()\n if not model.wells.state.has_blocks:\n model.wells.get_wellblocks(model.grid)\n if not model.wells.state.full_perforation:\n model.wells.apply_perforations()\n if not model.wells.state.all_tracks_inside:\n model.wells.drop_outside()\n if model.meta['MODEL_TYPE'] == 'ECL':\n model.wells.compute_events(grid=model.grid)\n else:\n assert model.state.spatial == self.unravel_model\n if 'CONTROL' in self.sample_attrs:\n assert model.wells.state.all_tracks_complete\n assert model.wells.state.has_blocks\n assert model.wells.state.full_perforation\n assert model.wells.state.all_tracks_inside\n return model\n\n def _load_model(self, idx, config=None, force_wells_calculations=False):\n \"\"\"Loads field by index.\n\n Parameters\n ----------\n idx: int\n config: dict, optional\n Config used while loading the model\n\n Returns\n -------\n model: Field\n \"\"\"\n _, fmt = os.path.splitext(self.files[idx])\n fmt = fmt.strip('.').lower()\n\n config = self.config if config is None else config\n\n if fmt == 'hdf5':\n if 'subset' not in config[STATES_KEYWORD] or config[STATES_KEYWORD]['subset'] is None:\n config = None\n else:\n for comp in config:\n config[comp]['attrs'] = None\n\n model = Field(path=os.path.join(self.root_dir, self.files[idx]), config=config,\n encoding='auto:10000', loglevel='ERROR')\n model.load(raise_errors=False)\n\n if self.unravel_model:\n model.to_spatial()\n if 'CONTROL' in self.sample_attrs:\n if not safe_check(model.wells, 'all_tracks_complete', True) or force_wells_calculations:\n model.wells.drop_incomplete()\n if not safe_check(model.wells, 'has_blocks', True) or force_wells_calculations:\n model.wells.get_wellblocks(grid=model.grid)\n if not safe_check(model.wells, 'full_perforation', True) or force_wells_calculations:\n model.wells.apply_perforations()\n if not safe_check(model.wells, 'all_tracks_inside', True) or force_wells_calculations:\n model.wells.drop_outside()\n if model.meta['MODEL_TYPE'] == 'ECL':\n model.wells.compute_events(grid=model.grid)\n if not self.unravel_model:\n model.ravel()\n return model\n\n def _load_sample(self, idx):\n sample = FieldSample(os.path.join(self.root_dir, self.files[idx]))\n sample.load()\n return sample\n\n def _get_actnum(self, model, **kwargs):\n \"\"\"Get ACTNUM of the model\"\"\"\n _ = kwargs\n if hasattr(model.grid, 'actnum'):\n return getattr(model.grid, 'actnum').astype(np.bool)\n actnum = np.ones(model.grid.dimens, dtype=np.bool)\n return actnum if self.unravel_model else actnum.ravel(order='F')\n\n def _get_well_mask(self, model, **kwargs):\n \"\"\"Get well mask of the model.\"\"\"\n _ = kwargs\n if hasnested(self.sample_attrs, 'MASKS', 'WELL_MASK') or 'CONTROL' in self.sample_attrs:\n return model.well_mask != ''\n return None\n\n def _get_named_well_mask(self, model, **kwargs):\n \"\"\"Get well mask of the model.\"\"\"\n _ = kwargs\n if hasnested(self.sample_attrs, 'MASKS', 'NAMED_WELL_MASK') or 'CONTROL' in self.sample_attrs:\n well_mask = model.well_mask\n named_well_mask = {}\n for well in model.wells:\n named_well_mask[well.name] = well_mask == well.name\n return named_well_mask\n return None\n\n def _get_neighbours(self, model, fill_invalid_neighbours=INVALID_VALUE_FILLER, neighbouring_radius=-1, **kwargs):\n \"\"\"Get connectivity matrix of cells presented in the model.\"\"\"\n if 'MASKS' in self.sample_attrs and 'NEIGHBOURS' in self.sample_attrs['MASKS']:\n neighbours = model.grid.get_neighbors_matrix(\n connectivity=neighbouring_radius,\n fill_value=fill_invalid_neighbours,\n ravel_index=True\n )\n # Indices are with respect to the full vectors: with active and non-active cells\n # We want indices with respect to the vector of active cells\n full_to_active_ind = kwargs['MASKS']['ACTNUM'].copy().astype(np.int)\n full_to_active_ind[full_to_active_ind == 1] = np.arange(full_to_active_ind.sum())\n full_to_active_ind = np.concatenate([full_to_active_ind, [-1]])\n neighbours = full_to_active_ind[neighbours.ravel()].reshape(neighbours.shape)\n # Neighbours should include the cell itself\n itself_ind = np.arange(neighbours.shape[0])[:, np.newaxis]\n neighbours = np.concatenate([itself_ind, neighbours], axis=1)\n return neighbours\n return None\n\n def _get_invalid_neighbours_mask(self, model, fill_invalid_neighbours=INVALID_VALUE_FILLER,\n neighbouring_radius=-1, **kwargs):\n \"\"\"Get mask of invalid neighbours (non-active or out of geometric bounds).\"\"\"\n _ = kwargs\n if hasnested(self.sample_attrs, 'GRID', 'DISTANCES'):\n neighbours = model.grid.get_neighbors_matrix(\n connectivity=neighbouring_radius,\n fill_value=fill_invalid_neighbours,\n ravel_index=True\n )\n return neighbours == INVALID_VALUE_FILLER\n return None\n\n @staticmethod\n def _get_time(model, sequence_subset=None, **kwargs):\n \"\"\"Get time in days associated with states timesteps relative to model start date.\"\"\"\n _ = kwargs\n dates = model.result_dates\n sec_in_day = 86400\n t = (dates - model.start).total_seconds().values / sec_in_day\n return t if sequence_subset is None else t[sequence_subset]\n\n def _get_connection_factors(self, model, sequence_subset=None, **kwargs):\n # FIXME calls the field's method twice\n _ = kwargs\n if sequence_subset is not None:\n res_dates = model.result_dates\n if res_dates.size:\n res_dates = res_dates[sequence_subset]\n date_range = (res_dates[0], res_dates[-1])\n else:\n date_range = None\n if hasnested(self.sample_attrs, 'MASKS', 'CF_MASK'):\n return model.get_spatial_connection_factors_and_perforation_ratio(date_range=date_range)[0]\n return None\n\n def _get_perforation_mask(self, model, sequence_subset=None, **kwargs):\n # FIXME calls the field's method twice\n _ = kwargs\n if sequence_subset is not None:\n res_dates = model.result_dates\n if res_dates.size:\n res_dates = res_dates[sequence_subset]\n date_range = (res_dates[0], res_dates[-1])\n else:\n date_range = None\n if hasnested(self.sample_attrs, 'MASKS', 'PERF_MASK'):\n return model.get_spatial_connection_factors_and_perforation_ratio(date_range=date_range)[1]\n return None\n\n @staticmethod\n def to_dates(model, t):\n \"\"\"Restore actual dates from time deltas.\"\"\"\n dates = model.start + np.array([pd.Timedelta(i, unit='day') for i in t])\n return pd.to_datetime(dates)\n\n @staticmethod\n def _get_distances(model, fill_invalid_neighbours=INVALID_VALUE_FILLER, neighbouring_radius=-1, **kwargs):\n \"\"\"Get matrix of distances for neighbouring cells.\"\"\"\n _ = kwargs\n return model.grid.calculate_neighbours_distances(\n connectivity=neighbouring_radius,\n fill_value=fill_invalid_neighbours\n )\n\n @staticmethod\n def _get_xyz(model, **kwargs):\n _ = kwargs\n return model.grid.xyz\n\n def _get_states(self, model, attrs, sequence_subset=None, **kwargs):\n \"\"\"Get stacked states sequence.\"\"\"\n _ = kwargs\n if (self.preloaded is not None) and (sequence_subset is not None):\n return np.stack([getattr(model.states, attr)[sequence_subset] for attr in attrs], axis=1)\n return np.stack([getattr(model.states, attr) for attr in attrs], axis=1)\n\n @staticmethod\n def _get_rock(model, attrs, **kwargs):\n \"\"\"Get stacked rock attributes.\"\"\"\n _ = kwargs\n return np.stack([getattr(model.rock, attr) for attr in attrs], axis=0)\n\n @staticmethod\n def _get_tables(model, attrs):\n \"\"\"Get sample table data\"\"\"\n return {\n attr: getattr(model.tables, attr).to_numpy() if attr in TABLES_WITHOUT_INDEX\n else getattr(model.tables, attr).to_numpy(include_index=True) for attr in attrs\n }\n\n @staticmethod\n def _get_control(model, attrs, sequence_subset=None, **kwargs):\n \"\"\"Get control in a spatial form (defined for all cells, meaningful values in\n perforated cells, other cells are filled with zeros) with corresponding dates.\n \"\"\"\n _ = kwargs\n if sequence_subset is not None:\n res_dates = model.result_dates\n if res_dates.size:\n res_dates = res_dates[sequence_subset]\n date_range = (res_dates[0], res_dates[-1])\n else:\n date_range = None\n filtered_attrs = attrs.copy()\n if 'PROD_PERF_MASK' in attrs:\n filtered_attrs.remove('PROD_PERF_MASK')\n if 'INJE_PERF_MASK' in attrs:\n filtered_attrs.remove('INJE_PERF_MASK')\n\n output = model.get_spatial_well_control(filtered_attrs, date_range=date_range, fill_shut=0., fill_outside=0.)\n if 'PROD_PERF_MASK' in attrs or 'INJE_PERF_MASK' in attrs:\n control = []\n i = 0\n for attr in attrs:\n if attr == 'PROD_PERF_MASK':\n control.append(get_spatial_perf(model, sequence_subset, mode='PROD'))\n elif attr == 'INJE_PERF_MASK':\n control.append(get_spatial_perf(model, sequence_subset, mode='INJE'))\n else:\n control.append(output['control'][:, i][:, None])\n i += 1\n output['control'] = np.concatenate(control, axis=1)\n return output\n\n def set_transform(self, transform):\n \"\"\"Set transforms to be applied to each sample\n\n Parameters\n ----------\n transform: class\n Class of transform to apply\n list of Classes can be used to compose several transforms\n Returns\n -------\n out: FieldDataset\n \"\"\"\n if not isinstance(transform, (list, tuple)):\n transform = [transform]\n self.transform = []\n for t in transform:\n if inspect.isclass(t) and issubclass(t, Transform):\n if issubclass(t, Normalize):\n if self.std is None or self.mean is None:\n raise RuntimeError(\"Dataset's statistics are not calculated!\")\n self.transform.append(t(\n mean=self.filtered_statistics['MEAN'],\n std=self.filtered_statistics['STD'],\n unravel_model=self.unravel_model\n ))\n else:\n self.transform.append(t())\n else:\n self.transform.append(t)\n self.transform = Compose(self.transform)\n return self\n\n def dump_samples(self, path, n_epoch=1, prefix=None, state=True, **kwargs):\n \"\"\"Dump samples from the dataset.\n\n Parameters\n ----------\n path: str\n Path to the directory for dump.\n n_epoch: int\n Number of times to pass through the dataset.\n prefix: str, None\n Prefix for dumped samples.\n state: bool\n If True, dump the state of the samples\n kwargs: dict\n Additional named arguments for sample.dump\n\n Returns\n -------\n\n \"\"\"\n if not os.path.isdir(path):\n os.mkdir(path)\n prefix = prefix + '_' if prefix is not None else ''\n i = 0\n for _ in range(n_epoch):\n for sample in self:\n sample.dump(os.path.join(path, prefix+str(i)+'.hdf5'), state=state, **kwargs)\n i += 1\n return self\n\n def convert_to_other_fmt(self, new_root_dir, new_fmt='hdf5', results_to_events=True, **kwargs):\n \"\"\"Convert dataset to a new format.\n\n Parameters\n ----------\n new_root_dir: str\n Directory to save converted dataset\n new_fmt: str\n Extension to use\n kwargs: dict\n Any additional named arguments passed to Field.dump\n\n Returns\n -------\n FieldDataset\n \"\"\"\n if not os.path.exists(new_root_dir):\n os.makedirs(new_root_dir)\n for i, path in enumerate(self.files):\n path, _ = os.path.splitext(path)\n if os.path.split(path)[0]:\n os.makedirs(os.path.join(new_root_dir, os.path.split(path)[0]))\n path = '.'.join([path, new_fmt])\n\n model = self._load_model(i, force_wells_calculations=True)\n if results_to_events:\n model.wells.results_to_events(grid=model.grid)\n config = None if new_fmt == 'hdf5' else self.config\n model.dump(path=os.path.join(new_root_dir, path), config=config, **kwargs)\n\n self.__init__(\n src=new_root_dir,\n sample_attrs=self.sample_attrs,\n fmt=(new_fmt, ),\n subset_generator=self.subset_generator\n )\n return self\n\n @property\n def filtered_statistics(self):\n \"\"\"Filters out non-normalized attrs and attrs, which are not presented in `sample_attrs`, from statistics.\"\"\"\n filtered_stats = dict()\n for key, value in zip(('MEAN', 'STD', 'MIN', 'MAX'), (self.mean, self.std, self.min, self.max)):\n if value is None:\n raise RuntimeError(\"Dataset's statistics are not calculated!\")\n filtered_stat = {\n comp: {} for comp in self.sample_attrs\n if comp not in NON_NORMALIZED_ATTRS and len(self.sample_attrs[comp]) > 0\n }\n for comp in filtered_stat:\n if comp not in value:\n raise ValueError('Component \"%s\" is not presented in calculated statistics.' % comp)\n for attr in self.sample_attrs[comp]:\n if attr not in value[comp]:\n raise ValueError('Attribute \"%s\" of component \"%s\" is not presented in calculated statistics.'\n % (attr, comp))\n filtered_stat[comp][attr] = value[comp][attr]\n if comp not in self._attrs_sampled_as_dict:\n filtered_stat[comp] = np.stack(\n [filtered_stat[comp][attr] for attr in self.sample_attrs[comp]]\n )\n filtered_stats[key] = filtered_stat\n return filtered_stats\n\n def calculate_statistics(self): # pylint: disable=too-many-branches\n \"\"\"Calculate mean and std values for the attributes of the dataset.\"\"\"\n # Change sampling behavior for statistics' calculation.\n subset_generator, self.subset_generator = self.subset_generator, None\n unravel_model, self.unravel_model = self.unravel_model, False\n\n mean, mean_of_squares, std, minim, maxim = {}, {}, {}, {}, {}\n for comp in self.sample_attrs:\n if comp not in NON_NORMALIZED_ATTRS:\n mean[comp] = {attr: [] for attr in self.sample_attrs[comp]}\n mean_of_squares[comp] = {attr: [] for attr in self.sample_attrs[comp]}\n std[comp] = {attr: [] for attr in self.sample_attrs[comp]}\n minim[comp] = {attr: [] for attr in self.sample_attrs[comp]}\n maxim[comp] = {attr: [] for attr in self.sample_attrs[comp]}\n\n for i in range(len(self)):\n m, m_sq, mn, mx = self._get_model_statistics(i)\n for comp in m:#pylint:disable=consider-using-dict-items\n for attr in m[comp]:\n mean[comp][attr].append(m[comp][attr])\n mean_of_squares[comp][attr].append(m_sq[comp][attr])\n minim[comp][attr].append(mn[comp][attr])\n maxim[comp][attr].append(mx[comp][attr])\n\n for comp in mean:#pylint:disable=consider-using-dict-items\n for attr in mean[comp]:\n mean[comp][attr] = np.mean(mean[comp][attr], axis=0)\n mean_of_squares[comp][attr] = np.mean(mean_of_squares[comp][attr], axis=0)\n std[comp][attr] = np.sqrt(np.abs(mean_of_squares[comp][attr] - mean[comp][attr]**2))\n minim[comp][attr] = np.min(minim[comp][attr], axis=0)\n maxim[comp][attr] = np.max(maxim[comp][attr], axis=0)\n\n # Recover old sampling behavior\n self.subset_generator = subset_generator\n self.unravel_model = unravel_model\n\n self.mean, self.std, self.min, self.max = mean, std, minim, maxim\n return self\n\n def _get_model_statistics(self, idx):\n \"\"\"Get mean and mean of squares for the attributes of the model.\"\"\"\n sample = self._get_sample(idx)\n mean, mean_of_squares, minim, maxim = {}, {}, {}, {}\n for comp in sample.keys():\n if comp.upper() in NON_NORMALIZED_ATTRS:\n continue\n mask = sample.masks.well_mask if comp.upper() == 'CONTROL' else None\n\n mean[comp] = dict()\n mean_of_squares[comp] = dict()\n minim[comp] = dict()\n maxim[comp] = dict()\n if comp in self._attrs_sampled_as_dict:\n ax = 0\n for attr, arr in sample[comp].items():\n if attr.upper() == 'DISTANCES':\n arr = arr.copy().astype(np.float)\n arr[sample.masks.invalid_neighbours_mask] = np.nan\n mean[comp][attr] = np.nanmean(arr, axis=ax)\n mean_of_squares[comp][attr] = np.nanmean(np.power(arr, 2), axis=ax)\n minim[comp][attr] = np.nanmin(arr, axis=ax)\n maxim[comp][attr] = np.nanmax(arr, axis=ax)\n else:\n ax = 1 if comp not in SEQUENTIAL_ATTRS else (0, 2)\n if mask is not None:\n arr = sample[comp][..., mask]\n else:\n arr = sample[comp]\n\n comp_mean = overflow_safe_mean(arr, axis=ax)\n comp_mean_of_squares = overflow_safe_mean(np.power(arr, 2), axis=ax)\n comp_min = np.min(arr, axis=ax)\n comp_max = np.max(arr, axis=ax)\n for i, attr in enumerate(self.sample_attrs[comp]):\n mean[comp][attr] = comp_mean[i]\n mean_of_squares[comp][attr] = comp_mean_of_squares[i]\n minim[comp][attr] = comp_min[i]\n maxim[comp][attr] = comp_max[i]\n\n return mean, mean_of_squares, minim, maxim\n\n def dump_statistics(self, path):\n \"\"\"Dump mean and std values of the dataset into a file.\"\"\"\n if self.std is None or self.mean is None or self.min is None or self.max is None:\n raise RuntimeError(\"Dataset's statistics are not calculated!\")\n with open(path, 'wb') as f:\n pickle.dump([self.mean, self.std, self.min, self.max], f)\n\n def load_statistics(self, path):\n \"\"\"Load mean and std values of the dataset from a file.\"\"\"\n with open(path, 'rb') as f:\n self.mean, self.std, self.min, self.max = pickle.load(f)\n for kind in ('mean', 'std', 'min', 'max'):\n stats = getattr(self, kind)\n upper_stats = {}\n for comp, value in stats.items():\n if isinstance(value, dict):\n upper_stats[comp.upper()] = {}\n for attr, arr in value.items():\n upper_stats[comp.upper()][attr.upper()] = arr\n else:\n upper_stats[comp.upper()] = value\n setattr(self, kind, upper_stats)\n if 'CONTROL' in self.sample_attrs and 'CONTROL' in self.mean:\n for kind in ('mean', 'std', 'min', 'max'):\n stats = getattr(self, kind)\n for k in stats['CONTROL']:\n if k in CONTROL_TO_RESULTS_KW and CONTROL_TO_RESULTS_KW[k] in self.sample_attrs['CONTROL']:\n stats['CONTROL'][CONTROL_TO_RESULTS_KW[k]] = stats['CONTROL'].pop(k)\n\n @property\n def sample_attrs(self):\n \"\"\"Attributes represented in the samples.\"\"\"\n return self._sample_attrs\n\n @sample_attrs.setter\n def sample_attrs(self, x):\n self._sample_attrs = {\n comp.upper(): [attr.upper() for attr in x[comp]] for comp in x\n }\n\n\nclass FieldSample(BaseComponent):\n \"\"\"Class representing the samples from the dataset.\n\n\n Parameters\n ----------\n path: str, optional\n Path to the file. Only HDF5 files are supported at the moment.\n field: Field, optional\n dataset: FieldDataset, optional\n state: dict, optional\n sample: dict-like, optional\n\n \"\"\"\n class _decorators:\n \"\"\"Decorators for the FieldSample.\"\"\"\n @classmethod\n def without_batch_dimension(cls, method):\n \"\"\"Decorates sample methods to be applied without the batch dimension.\"\"\"\n def decorated(instance, inplace=False, **kwargs):\n batch_dimension = instance.state.batch_dimension if hasattr(instance.state,\n 'batch_dimension') else False\n if batch_dimension:\n instance = instance.transformed(RemoveBatchDimension, inplace=inplace)\n inplace = True\n instance = method(instance, inplace=inplace, **kwargs)\n if batch_dimension:\n instance = instance.transformed(AddBatchDimension, inplace=inplace)\n return instance\n return decorated\n\n def __init__(self, path=None, field=None, dataset=None, state=None, **sample):\n super().__init__(**sample)\n self._path = path\n self._field = field\n self.sample_attrs = dataset.sample_attrs if dataset is not None else None\n self.dataset = dataset\n if state is not None:\n self.init_state(**state)\n\n def _nested_dicts_to_base_components(self, class_name, d):\n if isinstance(d, dict):\n d = BaseComponent(class_name=class_name, **d)\n for key, value in d.items():\n value = self._nested_dicts_to_base_components(key, value)\n setattr(d, key, value)\n return d\n\n def __setattr__(self, key, value):\n if key[0] != '_':\n value = self._nested_dicts_to_base_components(key.upper(), value)\n super().__setattr__(key, value)\n\n def empty_like(self):\n \"\"\"Get an empty sample with the same state and the structure of embedded BaseComponents (if any).\"\"\"\n empty = super().empty_like()\n empty = FieldSample(field=self.field, dataset=self.dataset, state=empty.state.as_dict(), **dict(empty))\n empty.sample_attrs = self.sample_attrs\n return empty\n\n def copy(self):\n \"\"\"Get a copy of the sample.\"\"\"\n copy = super().copy()\n copy.dataset = self.dataset\n copy.field = self.field\n copy.sample_attrs = self.sample_attrs\n return copy\n\n def dump(self, path, **kwargs):\n \"\"\"Dump the sample into a file.\n\n Parameters\n ----------\n path: str\n Path to the file.\n kwargs: dict\n Additional named arguments passed to BaseComponent's dump method.\n\n \"\"\"\n fname = os.path.basename(path)\n fmt = os.path.splitext(fname)[1].strip('.')\n\n if fmt.upper() == 'HDF5':\n if hasattr(self.state, 'tensor') and self.state.tensor:\n out = self.transformed(ToNumpy)\n return out.dump(path, **kwargs)\n for state, value in self.state.as_dict().items():\n if issubclass(value.__class__, BaseComponent):\n if state == 'sample_attributes':\n for k, v in value.items():\n value[k] = np.array(v, dtype='S16')\n setattr(self, state, value)\n self.set_state(**{state: 'base_component'})\n return self._dump_hdf5(path, **kwargs)\n raise NotImplementedError('File format {} not supported.'.format(fmt))\n\n def load(self, **kwargs):\n \"\"\"Load sample from a file.\n\n Parameters\n ----------\n kwargs: dict\n Additional named arguments passed to the load method.\n\n Returns\n -------\n sample: FieldSample\n Sample with loaded data.\n \"\"\"\n if self._path is None:\n raise RuntimeError('You should specify a path before loading!')\n fname = os.path.basename(self._path)\n fmt = os.path.splitext(fname)[1].strip('.')\n\n if fmt.upper() == 'HDF5':\n self._load_hdf5(self._path, **kwargs)\n else:\n raise NotImplementedError('File format {} not supported.'.format(fmt))\n for state, value in self.state.as_dict().items():\n if value == 'base_component':\n value = getattr(self, state)\n if state == 'sample_attributes':\n for k, v in value.items():\n value[k] = list(v.astype('U'))\n self.set_state(**{state: value})\n delattr(self, state)\n return self\n\n\n @property\n def field(self):\n \"\"\"Link to the parent field.\"\"\"\n return self._field\n\n @field.setter\n def field(self, x):\n if x is not None and not isinstance(x, Field):\n raise ValueError('Can assign only instances of the class %s!' % str(Field))\n self._field = x\n\n @property\n def dataset(self):\n \"\"\"Link to the parent dataset.\"\"\"\n return self._dataset\n\n @dataset.setter\n def dataset(self, x):\n if x is not None and not isinstance(x, FieldDataset):\n raise ValueError('Can assign only instances of the class %s!\\nGiven %s' % (str(FieldDataset), type(x)))\n self._dataset = x\n if x is not None:\n self.init_state(\n spatial=x.unravel_model,\n cropped_at_mask=None if x.unravel_model else 'ACTNUM'\n )\n try:\n self.init_state(dataset_statistics=self._nested_dicts_to_base_components(\n 'DATASET_STATISTICS', x.filtered_statistics\n ))\n except RuntimeError:\n pass\n\n def transformed(self, transforms, inplace=False):\n \"\"\"Apply a set of transforms to the sample.\n\n Parameters\n ----------\n transforms: list, tuple, Compose, Transform\n Transform to apply\n inplace: bool\n\n Returns\n -------\n sample: FieldSample\n Transformed sample.\n \"\"\"\n transforms = self._initialize_transform(transforms)\n return transforms(self, inplace=inplace)\n\n def at_wells(self, inplace=False):\n \"\"\"Crop all the spatial arrays to the perforated cells. Ravel if needed.\n\n Parameters\n ----------\n inplace: bool\n\n Returns\n -------\n sample: FieldSample\n Cropped sample.\n \"\"\"\n return self.as_ravel(inplace=inplace, crop_at_mask='WELL_MASK')\n\n @_decorators.without_batch_dimension\n def as_spatial(self, inplace=False):\n \"\"\"Transform the sample's arrays to the spatial form.\n\n Parameters\n ----------\n inplace: bool\n\n Returns\n -------\n sample: FieldSample\n \"\"\"\n raise NotImplementedError()\n\n # pylint: disable=too-many-nested-blocks\n @_decorators.without_batch_dimension\n def as_ravel(self, inplace=False, crop_at_mask='ACTNUM'):\n \"\"\"Ravel the sample's arrays.\n\n Parameters\n ----------\n inplace: bool\n\n Returns\n -------\n sample: FieldSample\n \"\"\"\n out = self if inplace else self.empty_like()\n if self.state.spatial:\n for comp in self.keys():#pylint:disable=consider-using-dict-items\n if comp.upper() == 'TABLES':\n out[comp] = self[comp]\n continue\n if comp.upper() in ('MASKS', 'GRID'):\n for attr in self[comp].keys():\n if attr.upper() in ('TIME', 'CONTROL_T'):\n out[comp][attr] = self[comp][attr]\n continue\n if attr.upper() == 'NAMED_WELL_MASK':\n for well in self[comp][attr].keys():\n new_shape = (-1,) + tuple(self[comp][attr][well].shape[3:])\n out[comp][attr][well] = \\\n self[comp][attr].reshape(attr=well, newshape=new_shape, order='F', inplace=False)\n continue\n if attr.upper() in ('CF_MASK', 'PERF_MASK'):\n new_shape = tuple(self[comp][attr].shape[:-3]) + (-1,)\n else:\n new_shape = (-1,) + tuple(self[comp][attr].shape[3:])\n out[comp][attr] = self[comp].reshape(attr=attr, newshape=new_shape, order='F', inplace=False)\n else:\n new_shape = tuple(self[comp].shape[:-3]) + (-1, )\n out[comp] = self.reshape(attr=comp, newshape=new_shape, order='F', inplace=False)\n out.set_state(spatial=False)\n if crop_at_mask != self.state.cropped_at_mask:\n if self.state.cropped_at_mask is not None:\n out = self._uncrop_from_mask(out, self.state.cropped_at_mask)\n out = self._crop_at_mask(out, crop_at_mask)\n return out\n\n @staticmethod\n def _crop_at_mask(obj, mask_name):\n \"\"\"Crop a sample at a given binary mask.\n\n Parameters\n ----------\n obj: FieldSample\n Sample to be cropped.\n mask_name: str\n Name of the mask from the sample['MASKS'].\n\n Returns\n -------\n obj: FieldSample\n Cropped sample.\n \"\"\"\n assert not obj.state.spatial\n mask = obj.masks[mask_name]\n if isinstance(mask, torch.Tensor):\n mask = mask.bool()\n else:\n mask = mask.astype(bool)\n for comp in obj.keys():\n if comp.upper() == 'TABLES':\n continue\n if comp.upper() in ('MASKS', 'GRID'):\n for attr in obj[comp].keys():\n if attr.upper() in ('CF_MASK', 'PERF_MASK'):\n obj[comp][attr] = obj[comp][attr][..., mask]\n elif attr.upper() == 'NAMED_WELL_MASK':\n for well in obj[comp][attr].keys():\n obj[comp][attr][well] = obj[comp][attr][well][..., mask]\n elif attr.upper() not in (mask_name.upper(), 'TIME', 'CONTROL_T') and obj[comp][attr] is not None:\n obj[comp][attr] = obj[comp][attr][mask]\n else:\n obj[comp] = obj[comp][..., mask]\n obj.set_state(cropped_at_mask=mask_name.upper())\n return obj\n\n @staticmethod\n def _uncrop_from_mask(obj, mask_name):\n \"\"\"Reverse operation to the crop_at_mask.\n\n Parameters\n ----------\n obj: FieldSample\n mask_name: str\n\n Returns\n -------\n obj: FieldSample\n \"\"\"\n raise NotImplementedError()\n\n def _initialize_transform(self, transforms):\n \"\"\"Initialize transforms before application.\"\"\"\n if not isinstance(transforms, (list, tuple, Compose)):\n transforms = [transforms]\n initialized_transforms = []\n for t in transforms:\n if inspect.isclass(t):\n if issubclass(t, Normalize):\n initialized_transforms.append(t(\n mean=self.state.dataset_statistics.mean,\n std=self.state.dataset_statistics.std,\n unravel_model=self.state.spatial\n ))\n else:\n initialized_transforms.append(t())\n else:\n initialized_transforms.append(t)\n return Compose(initialized_transforms)\n\n @property\n def sample_attrs(self):\n \"\"\"Attributes represented in the sample.\"\"\"\n return self.state.sample_attributes\n\n @sample_attrs.setter\n def sample_attrs(self, x):\n x = None if x is None else {comp.upper(): [attr.upper() for attr in x[comp]] for comp in x.keys()}\n x = self._nested_dicts_to_base_components('SAMPLE_ATTRIBUTES', x)\n if hasattr(self.state, 'SAMPLE_ATTRIBUTES'):\n self.set_state(sample_attributes=x)\n else:\n self.init_state(sample_attributes=x)\n\n @property\n def device(self):\n \"\"\"Get the sample's device (if it is in Torch format)\n\n Returns\n -------\n device: torch.device\n \"\"\"\n ref = None\n for _, value in self.items():\n if isinstance(value, BaseComponent):\n for _, arr in value.items():\n if isinstance(arr, BaseComponent):\n for _, mask in arr.items():\n ref = mask\n break\n else:\n ref = arr\n break\n else:\n ref = value\n if ref is None:\n raise RuntimeError('The sample is empty!')\n if isinstance(ref, torch.Tensor):\n return ref.device\n raise RuntimeError('The sample should be in the PyTorch format! Found: %s' % type(ref))\n\n def to(self, device, inplace=True):\n \"\"\"Change the sample's device (if it is in Torch format).\n\n Parameters\n ----------\n device: str, torch.device\n inplace: bool\n\n Returns\n -------\n sample: FieldSample\n Sample at the new device\n \"\"\"\n if self.device == device:\n return self if inplace else self.copy()\n out = self if inplace else self.empty_like()\n for comp, value in self.items():\n if isinstance(value, BaseComponent):\n for attr, arr in value.items():\n if isinstance(arr, BaseComponent):\n for well, mask in arr.items():\n out[comp][attr][well] = mask.to(device)\n else:\n out[comp][attr] = arr.to(device)\n else:\n out[comp] = value.to(device)\n return out\n\n\nclass SequenceSubset:\n \"\"\"Baseclass for generating subsets of sequences.\"\"\"\n def __init__(self, size, low, high, **kwargs):\n \"\"\"\n Parameters\n ----------\n size: int\n Lenght of the generated sequences\n low: int\n Minimal possible timestep (inclusive)\n high: int\n Maximal possible timestep (exclusive)\n kwargs: optional\n \"\"\"\n self.size = size\n self.low = low\n self.high = high\n _ = kwargs\n\n def __call__(self):\n \"\"\"Generate subset of timesteps.\"\"\"\n raise NotImplementedError('Abstract method is not implemented.')\n\n\nclass UniformSequenceSubset(SequenceSubset):\n \"\"\"Generator of timesteps sampled from a uniform distribution [low, high).\"\"\"\n def __call__(self):\n subset = np.random.choice(np.arange(self.low, self.high), size=self.size, replace=False)\n return np.sort(subset)\n\n\nclass RandomSubsequence(SequenceSubset):\n \"\"\"Generator of timestep subsequences.\"\"\"\n def __call__(self):\n start = np.random.randint(low=self.low, high=self.high - self.size + 1)\n subset = np.arange(start, start + self.size)\n return np.sort(subset)\n" ]
[ [ "numpy.nanmax", "pandas.to_datetime", "numpy.nanmin", "numpy.concatenate", "numpy.max", "numpy.mean", "numpy.nanmean", "numpy.random.randint", "numpy.arange", "numpy.stack", "numpy.atleast_1d", "numpy.min", "numpy.power", "torch.is_tensor", "pandas.Timedelta", "numpy.array", "numpy.abs", "numpy.sort", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jdlarsen-UA/flopy
[ "bf2c59aaa689de186bd4c80685532802ac7149cd" ]
[ "autotest/t057_test_mp7.py" ]
[ "import os\nimport shutil\nimport numpy as np\nimport flopy\n\nmodel_ws = os.path.join(\"temp\", \"t057\")\n# delete the directory if it exists\nif os.path.isdir(model_ws):\n shutil.rmtree(model_ws)\n\nexe_names = {\"mf2005\": \"mf2005\", \"mf6\": \"mf6\", \"mp7\": \"mp7\"}\nrun = True\nfor key in exe_names.keys():\n v = flopy.which(exe_names[key])\n if v is None:\n run = False\n break\n\nnper, nstp, perlen, tsmult = 1, 1, 1.0, 1.0\nnlay, nrow, ncol = 3, 21, 20\ndelr = delc = 500.0\ntop = 400.0\nbotm = [220.0, 200.0, 0.0]\nlaytyp = [1, 0, 0]\nkh = [50.0, 0.01, 200.0]\nkv = [10.0, 0.01, 20.0]\nwel_loc = (2, 10, 9)\nwel_q = -150000.0\nrch = 0.005\nriv_h = 320.0\nriv_z = 317.0\nriv_c = 1.0e5\n\nzone3 = np.ones((nrow, ncol), dtype=np.int32)\nzone3[wel_loc[1:]] = 2\nzones = [1, 1, zone3]\n\n# create particles\npartlocs = []\npartids = []\nfor i in range(nrow):\n partlocs.append((0, i, 2))\n partids.append(i)\npart0 = flopy.modpath.ParticleData(\n partlocs, structured=True, particleids=partids\n)\n# part0 = flopy.modpath.ParticleGroup.get_particledata_empty(ncells=21,\n# particleid=True)\n# part0['k'] = 0\n# part0['j'] = 2\n# part0['localx'] = 0.5\n# part0['localy'] = 0.5\n# part0['localz'] = 0.\n# part0['timeoffset'] = 0.\n# part0['drape'] = 0\n# for idx in range(part0.shape[0]):\n# part0['id'][idx] = idx\n# part0['i'][idx] = idx\npg0 = flopy.modpath.ParticleGroup(\n particlegroupname=\"PG1\", particledata=part0, filename=\"ex01a.sloc\"\n)\n\nv = [(0,), (400,)]\npids = [1, 2] # [1000, 1001]\n# part1 = flopy.modpath.ParticleGroup.create_particledata(v, drape=1,\n# particleids=pids)\npart1 = flopy.modpath.ParticleData(\n v, structured=False, drape=1, particleids=pids\n)\npg1 = flopy.modpath.ParticleGroup(\n particlegroupname=\"PG2\", particledata=part1, filename=\"ex01a.pg2.sloc\"\n)\n\nparticlegroups = [pg0, pg1]\n\ndefaultiface = {\"RECHARGE\": 6, \"ET\": 6}\ndefaultiface6 = {\"RCH\": 6, \"EVT\": 6}\n\n\ndef test_mf2005():\n # build and run MODPATH 7 with MODFLOW-2005\n build_mf2005()\n\n\ndef test_mf6():\n # build and run MODPATH 7 with MODFLOW 6\n build_mf6()\n\n\ndef test_pathline_output():\n\n # if models not run then there will be no output\n if not run:\n return\n\n fpth0 = os.path.join(model_ws, \"mf2005\", \"ex01_mf2005_mp.mppth\")\n p = flopy.utils.PathlineFile(fpth0)\n maxtime0 = p.get_maxtime()\n maxid0 = p.get_maxid()\n p0 = p.get_alldata()\n fpth1 = os.path.join(model_ws, \"mf6\", \"ex01_mf6_mp.mppth\")\n p = flopy.utils.PathlineFile(fpth1)\n maxtime1 = p.get_maxtime()\n maxid1 = p.get_maxid()\n p1 = p.get_alldata()\n\n # # check maxtimes\n # msg = 'pathline maxtime ({}) '.format(maxtime0) + \\\n # 'in {} '.format(os.path.basename(fpth0)) + \\\n # 'are not equal to the ' + \\\n # 'pathline maxtime ({}) '.format(maxtime1) + \\\n # 'in {}'.format(os.path.basename(fpth1))\n # assert maxtime0 == maxtime1, msg\n\n # check maxid\n msg = (\n f\"pathline maxid ({maxid0}) in {os.path.basename(fpth0)} are not \"\n f\"equal to the pathline maxid ({maxid1}) in {os.path.basename(fpth1)}\"\n )\n assert maxid0 == maxid1, msg\n\n # check that pathline data are approximately the same\n # names = ['x', 'y', 'z']\n # dtype = np.dtype([('x', np.float32), ('y', np.float32),\n # ('z', np.float32)])\n # for jdx, (pl0, pl1) in enumerate(zip(p0, p1)):\n # t0 = np.rec.fromarrays((pl0[name] for name in names), dtype=dtype)\n # t1 = np.rec.fromarrays((pl1[name] for name in names), dtype=dtype)\n # for name in names:\n # msg = 'pathline {} in {} '.format(jdx, os.path.basename(fpth0)) + \\\n # 'are not equal (within 1e-5) to the ' + \\\n # 'pathline {} in {} '.format(jdx, os.path.basename(fpth1)) + \\\n # 'for column {}.'.format(name)\n # assert np.allclose(t0[name], t1[name]), msg\n\n return\n\n\ndef test_endpoint_output():\n\n # if models not run then there will be no output\n if not run:\n return\n\n fpth0 = os.path.join(model_ws, \"mf2005\", \"ex01_mf2005_mp.mpend\")\n e = flopy.utils.EndpointFile(fpth0)\n maxtime0 = e.get_maxtime()\n maxid0 = e.get_maxid()\n maxtravel0 = e.get_maxtraveltime()\n e0 = e.get_alldata()\n fpth1 = os.path.join(model_ws, \"mf6\", \"ex01_mf6_mp.mpend\")\n e = flopy.utils.EndpointFile(fpth1)\n maxtime1 = e.get_maxtime()\n maxid1 = e.get_maxid()\n maxtravel1 = e.get_maxtraveltime()\n e1 = e.get_alldata()\n\n # check maxid\n msg = (\n f\"endpoint maxid ({maxid0}) in {os.path.basename(fpth0)} are not \"\n f\"equal to the endpoint maxid ({maxid1}) in {os.path.basename(fpth1)}\"\n )\n assert maxid0 == maxid1, msg\n\n # # check maxtravel\n # msg = 'endpoint maxtraveltime ({}) '.format(maxtravel0) + \\\n # 'in {} '.format(os.path.basename(fpth0)) + \\\n # 'are not equal to the ' + \\\n # 'endpoint maxtraveltime ({}) '.format(maxtravel1) + \\\n # 'in {}'.format(os.path.basename(fpth1))\n # assert e0 != e1, msg\n #\n # # check maxtimes\n # msg = 'endpoint maxtime ({}) '.format(maxtime0) + \\\n # 'in {} '.format(os.path.basename(fpth0)) + \\\n # 'are not equal to the ' + \\\n # 'endpoint maxtime ({}) '.format(maxtime1) + \\\n # 'in {}'.format(os.path.basename(fpth1))\n # assert e0 != e1, msg\n\n # check that endpoint data are approximately the same\n names = [\"x\", \"y\", \"z\", \"x0\", \"y0\", \"z0\"]\n dtype = np.dtype(\n [\n (\"x\", np.float32),\n (\"y\", np.float32),\n (\"z\", np.float32),\n (\"x0\", np.float32),\n (\"y0\", np.float32),\n (\"z0\", np.float32),\n ]\n )\n d = np.rec.fromarrays((e0[name] - e1[name] for name in names), dtype=dtype)\n msg = (\n f\"endpoints in {os.path.basename(fpth0)} are not equal (within 1e-5) \"\n f\"to the endpoints in {os.path.basename(fpth1)}\"\n )\n # assert not np.allclose(t0, t1), msg\n\n return\n\n\ndef build_mf2005():\n \"\"\"\n MODPATH 7 example 1 for MODFLOW-2005\n \"\"\"\n\n ws = os.path.join(model_ws, \"mf2005\")\n nm = \"ex01_mf2005\"\n exe_name = exe_names[\"mf2005\"]\n iu_cbc = 130\n m = flopy.modflow.Modflow(nm, model_ws=ws, exe_name=exe_name)\n flopy.modflow.ModflowDis(\n m,\n nlay=nlay,\n nrow=nrow,\n ncol=ncol,\n nper=nper,\n itmuni=4,\n lenuni=1,\n perlen=perlen,\n nstp=nstp,\n tsmult=tsmult,\n steady=True,\n delr=delr,\n delc=delc,\n top=top,\n botm=botm,\n )\n flopy.modflow.ModflowLpf(m, ipakcb=iu_cbc, laytyp=laytyp, hk=kh, vka=kv)\n flopy.modflow.ModflowBas(m, ibound=1, strt=top)\n # recharge\n flopy.modflow.ModflowRch(m, ipakcb=iu_cbc, rech=rch, nrchop=1)\n # wel\n wd = [i for i in wel_loc] + [wel_q]\n flopy.modflow.ModflowWel(m, ipakcb=iu_cbc, stress_period_data={0: wd})\n # river\n rd = []\n for i in range(nrow):\n rd.append([0, i, ncol - 1, riv_h, riv_c, riv_z])\n flopy.modflow.ModflowRiv(m, ipakcb=iu_cbc, stress_period_data={0: rd})\n # output control\n flopy.modflow.ModflowOc(\n m,\n stress_period_data={\n (0, 0): [\"save head\", \"save budget\", \"print head\"]\n },\n )\n flopy.modflow.ModflowPcg(m, hclose=1e-6, rclose=1e-3, iter1=100, mxiter=50)\n\n m.write_input()\n\n if run:\n success, buff = m.run_model()\n assert success, \"mf2005 model did not run\"\n\n # create modpath files\n exe_name = exe_names[\"mp7\"]\n mp = flopy.modpath.Modpath7(\n modelname=f\"{nm}_mp\", flowmodel=m, exe_name=exe_name, model_ws=ws\n )\n mpbas = flopy.modpath.Modpath7Bas(\n mp, porosity=0.1, defaultiface=defaultiface\n )\n mpsim = flopy.modpath.Modpath7Sim(\n mp,\n simulationtype=\"combined\",\n trackingdirection=\"forward\",\n weaksinkoption=\"pass_through\",\n weaksourceoption=\"pass_through\",\n budgetoutputoption=\"summary\",\n budgetcellnumbers=[1049, 1259],\n traceparticledata=[1, 1000],\n referencetime=[0, 0, 0.0],\n stoptimeoption=\"extend\",\n timepointdata=[500, 1000.0],\n zonedataoption=\"on\",\n zones=zones,\n particlegroups=particlegroups,\n )\n\n # write modpath datasets\n mp.write_input()\n\n # run modpath\n if run:\n success, buff = mp.run_model()\n assert success, f\"mp7 model ({mp.name}) did not run\"\n\n return\n\n\ndef build_mf6():\n \"\"\"\n MODPATH 7 example 1 for MODFLOW 6\n \"\"\"\n\n ws = os.path.join(model_ws, \"mf6\")\n nm = \"ex01_mf6\"\n exe_name = exe_names[\"mf6\"]\n\n # Create the Flopy simulation object\n sim = flopy.mf6.MFSimulation(\n sim_name=nm, exe_name=\"mf6\", version=\"mf6\", sim_ws=ws\n )\n\n # Create the Flopy temporal discretization object\n pd = (perlen, nstp, tsmult)\n tdis = flopy.mf6.modflow.mftdis.ModflowTdis(\n sim, pname=\"tdis\", time_units=\"DAYS\", nper=nper, perioddata=[pd]\n )\n\n # Create the Flopy groundwater flow (gwf) model object\n model_nam_file = f\"{nm}.nam\"\n gwf = flopy.mf6.ModflowGwf(\n sim, modelname=nm, model_nam_file=model_nam_file, save_flows=True\n )\n\n # Create the Flopy iterative model solver (ims) Package object\n ims = flopy.mf6.modflow.mfims.ModflowIms(\n sim,\n pname=\"ims\",\n complexity=\"SIMPLE\",\n inner_hclose=1e-6,\n rcloserecord=1e-3,\n outer_hclose=1e-6,\n outer_maximum=50,\n inner_maximum=100,\n )\n\n # create gwf file\n dis = flopy.mf6.modflow.mfgwfdis.ModflowGwfdis(\n gwf,\n pname=\"dis\",\n nlay=nlay,\n nrow=nrow,\n ncol=ncol,\n length_units=\"FEET\",\n delr=delr,\n delc=delc,\n top=top,\n botm=botm,\n )\n # Create the initial conditions package\n ic = flopy.mf6.modflow.mfgwfic.ModflowGwfic(gwf, pname=\"ic\", strt=top)\n\n # Create the node property flow package\n npf = flopy.mf6.modflow.mfgwfnpf.ModflowGwfnpf(\n gwf, pname=\"npf\", icelltype=laytyp, k=kh, k33=kv\n )\n\n # recharge\n flopy.mf6.modflow.mfgwfrcha.ModflowGwfrcha(gwf, recharge=rch)\n # wel\n wd = [(wel_loc, wel_q)]\n flopy.mf6.modflow.mfgwfwel.ModflowGwfwel(\n gwf, maxbound=1, stress_period_data={0: wd}\n )\n # river\n rd = []\n for i in range(nrow):\n rd.append([(0, i, ncol - 1), riv_h, riv_c, riv_z])\n flopy.mf6.modflow.mfgwfriv.ModflowGwfriv(gwf, stress_period_data={0: rd})\n # Create the output control package\n headfile = f\"{nm}.hds\"\n head_record = [headfile]\n budgetfile = f\"{nm}.cbb\"\n budget_record = [budgetfile]\n saverecord = [(\"HEAD\", \"ALL\"), (\"BUDGET\", \"ALL\")]\n oc = flopy.mf6.modflow.mfgwfoc.ModflowGwfoc(\n gwf,\n pname=\"oc\",\n saverecord=saverecord,\n head_filerecord=head_record,\n budget_filerecord=budget_record,\n )\n\n # Write the datasets\n sim.write_simulation()\n\n # Run the simulation\n if run:\n success, buff = sim.run_simulation()\n assert success, \"mf6 model did not run\"\n\n # create modpath files\n exe_name = exe_names[\"mp7\"]\n mp = flopy.modpath.Modpath7(\n modelname=f\"{nm}_mp\", flowmodel=gwf, exe_name=exe_name, model_ws=ws\n )\n mpbas = flopy.modpath.Modpath7Bas(\n mp, porosity=0.1, defaultiface=defaultiface6\n )\n mpsim = flopy.modpath.Modpath7Sim(\n mp,\n simulationtype=\"combined\",\n trackingdirection=\"forward\",\n weaksinkoption=\"pass_through\",\n weaksourceoption=\"pass_through\",\n budgetoutputoption=\"summary\",\n budgetcellnumbers=[1049, 1259],\n traceparticledata=[1, 1000],\n referencetime=[0, 0, 0.0],\n stoptimeoption=\"extend\",\n timepointdata=[500, 1000.0],\n zonedataoption=\"on\",\n zones=zones,\n particlegroups=particlegroups,\n )\n\n # write modpath datasets\n mp.write_input()\n\n # run modpath\n if run:\n success, buff = mp.run_model()\n assert success, f\"mp7 model ({mp.name}) did not run\"\n\n return\n\n\nif __name__ == \"__main__\":\n test_mf2005()\n test_mf6()\n test_pathline_output()\n test_endpoint_output()\n" ]
[ [ "numpy.rec.fromarrays", "numpy.dtype", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
theofpa/continual-object-instances
[ "630ab4b115e5bf6004a26855a7af24e37372e5bb" ]
[ "src/train.py" ]
[ "import torch\nfrom tqdm import tqdm\n\nfrom utils import device, args\nfrom utils import save_model, send_to_device, print_train_progress\nfrom metrics import evaluation\n\n\ndef train(model, criterion, train_loader, query_loader, gallery_loader, optimizer, experiment_name):\n for epoch in range(args.n_epochs):\n train_loss, metric = train_epoch(\n model, criterion, optimizer, train_loader)\n print_train_progress(epoch, train_loss, metric)\n if epoch % args.print_every == 0:\n evaluation(model, query_loader, gallery_loader)\n save_model(model, experiment_name)\n\n\ndef continuous_train(old_model, model, criterion, train_loader, query_loader, gallery_loader, optimizer, experiment_name):\n for epoch in range(args.n_epochs):\n if args.continuous_learning_method == \"naive\":\n train_loss, metric = train_epoch(\n model, criterion, optimizer, train_loader)\n elif args.continuous_learning_method == \"finetune\":\n train_loss, metric = train_epoch(\n model, criterion, optimizer, train_loader)\n elif args.continuous_learning_method == \"lfl\":\n train_loss, metric = train_lfl_epoch(\n old_model, model, criterion, optimizer, train_loader)\n elif args.continuous_learning_method == \"lwf\":\n train_loss, metric = train_lfl_epoch(\n old_model, model, criterion, optimizer, train_loader)\n elif args.continuous_learning_method == \"ewc\":\n train_loss, metric = train_ewc_epoch(\n old_model, model, criterion, optimizer, train_loader)\n else:\n raise ValueError(\n \"Provided Continual Learning method does not exist\")\n print_train_progress(epoch, train_loss, metric)\n save_model(model, experiment_name)\n\n\ndef train_epoch(model, criterion, optimizer, dataloader):\n model.train()\n total_loss = 0\n total_metrics = 0\n for idx, data_items in enumerate(tqdm(dataloader)):\n optimizer.zero_grad()\n data_items = send_to_device(data_items, device)\n\n b, c, h, w = data_items[\"neg\"].size()\n data_items[\"neg\"] = data_items[\"neg\"].view(\n b*args.neg_samples, int(c/args.neg_samples), h, w)\n\n anchor, pos, neg = model(\n data_items[\"anchor\"], data_items[\"pos\"], data_items[\"neg\"])\n loss, metric = criterion(\n anchor=anchor, pos=pos, neg=neg, targets=data_items[\"anchor_target\"])\n total_loss += loss.item()\n total_metrics += metric\n loss.backward()\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), 10)\n optimizer.step()\n\n total_loss /= len(dataloader)\n if args.task_method == \"regression\":\n metric = total_metrics/len(dataloader)\n else:\n metric = total_metrics/len(dataloader.dataset)\n return total_loss, metric\n\n\ndef train_lfl_epoch(old_model, model, criterion, optimizer, dataloader):\n old_model.eval()\n model.train()\n total_loss = 0\n total_metrics = 0\n for idx, data_items in enumerate(tqdm(dataloader)):\n optimizer.zero_grad()\n data_items = send_to_device(data_items, device)\n\n b, c, h, w = data_items[\"neg\"].size()\n data_items[\"neg\"] = data_items[\"neg\"].view(\n b*args.neg_samples, int(c/args.neg_samples), h, w)\n\n anchor, pos, neg = model(\n data_items[\"anchor\"], data_items[\"pos\"], data_items[\"neg\"])\n with torch.no_grad():\n old_anchor = old_model.get_embedding(data_items[\"anchor\"])\n loss, metric = criterion(old_anchor=old_anchor, anchor=anchor,\n pos=pos, neg=neg, targets=data_items[\"anchor_target\"])\n\n total_loss += loss.item()\n loss.backward()\n total_metrics += metric\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), 10)\n optimizer.step()\n\n total_loss /= len(dataloader)\n if args.task_method == \"regression\":\n metric = total_metrics/len(dataloader)\n else:\n metric = total_metrics/len(dataloader.dataset)\n return total_loss, metric\n\n\ndef train_ewc_epoch(old_model, model, criterion, optimizer, dataloader):\n old_model.eval()\n model.train()\n total_loss = 0\n total_metrics = 0\n\n criterion.update_models(old_model, model)\n criterion.update_fisher(dataloader)\n data = []\n for idx, data_items in enumerate(tqdm(dataloader)):\n optimizer.zero_grad()\n data_items = send_to_device(data_items, device)\n\n b, c, h, w = data_items[\"neg\"].size()\n data_items[\"neg\"] = data_items[\"neg\"].view(\n b*args.neg_samples, int(c/args.neg_samples), h, w)\n\n anchor, pos, neg = model(\n data_items[\"anchor\"], data_items[\"pos\"], data_items[\"neg\"])\n loss, metric = criterion(\n anchor=anchor, pos=pos, neg=neg, targets=data_items[\"anchor_target\"])\n\n total_loss += loss.item()\n loss.backward()\n total_metrics += metric\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), 10)\n optimizer.step()\n data.append(data_items)\n\n total_loss /= len(dataloader)\n if args.task_method == \"regression\":\n metric = total_metrics/len(dataloader)\n else:\n metric = total_metrics/len(dataloader.dataset)\n return total_loss, metric\n" ]
[ [ "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jhyuklee/piqa
[ "a38b94eb1e5146720ca443d3e8bebb93bd3c32f9" ]
[ "squad/piqa_evaluate.py" ]
[ "\"\"\" Official alpha evaluation script for PIQA (inherited from SQuAD v1.1 evaluation script).\"\"\"\nfrom __future__ import print_function\n\nimport os\nfrom collections import Counter\nimport string\nimport re\nimport argparse\nimport json\nimport sys\n\nimport scipy.sparse\nimport numpy as np\n\n\ndef normalize_answer(s):\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\n\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\n\ndef f1_score(prediction, ground_truth):\n prediction_tokens = normalize_answer(prediction).split()\n ground_truth_tokens = normalize_answer(ground_truth).split()\n common = Counter(prediction_tokens) & Counter(ground_truth_tokens)\n num_same = sum(common.values())\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(prediction_tokens)\n recall = 1.0 * num_same / len(ground_truth_tokens)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1\n\n\ndef exact_match_score(prediction, ground_truth):\n return (normalize_answer(prediction) == normalize_answer(ground_truth))\n\n\ndef metric_max_over_ground_truths(metric_fn, prediction, ground_truths):\n scores_for_ground_truths = []\n for ground_truth in ground_truths:\n score = metric_fn(prediction, ground_truth)\n scores_for_ground_truths.append(score)\n return max(scores_for_ground_truths)\n\n\ndef evaluate(dataset, predictions):\n f1 = exact_match = total = 0\n for article in dataset:\n for paragraph in article['paragraphs']:\n for qa in paragraph['qas']:\n total += 1\n if qa['id'] not in predictions:\n message = 'Unanswered question ' + qa['id'] + \\\n ' will receive score 0.'\n print(message, file=sys.stderr)\n continue\n ground_truths = list(map(lambda x: x['text'], qa['answers']))\n prediction = predictions[qa['id']]\n exact_match += metric_max_over_ground_truths(\n exact_match_score, prediction, ground_truths)\n f1 += metric_max_over_ground_truths(\n f1_score, prediction, ground_truths)\n\n exact_match = 100.0 * exact_match / total\n f1 = 100.0 * f1 / total\n\n return {'exact_match': exact_match, 'f1': f1}\n\n\ndef get_q2c(dataset):\n q2c = {}\n for article in dataset:\n for para_idx, paragraph in enumerate(article['paragraphs']):\n cid = '%s_%d' % (article['title'], para_idx)\n for qa in paragraph['qas']:\n q2c[qa['id']] = cid\n return q2c\n\n\ndef get_predictions(context_emb_dir, question_emb_dir, q2c, sparse=False, progress=False):\n if progress:\n from tqdm import tqdm\n else:\n tqdm = lambda x: x\n predictions = {}\n for id_, cid in tqdm(q2c.items()):\n q_emb_path = os.path.join(question_emb_dir, '%s.npz' % id_)\n c_emb_path = os.path.join(context_emb_dir, '%s.npz' % cid)\n c_json_path = os.path.join(context_emb_dir, '%s.json' % cid)\n\n if not os.path.exists(q_emb_path):\n continue\n\n load = scipy.sparse.load_npz if sparse else np.load\n q_emb = load(q_emb_path) # shape = [M, d], d is the embedding size.\n c_emb = load(c_emb_path) # shape = [N, d], d is the embedding size.\n\n with open(c_json_path, 'r') as fp:\n phrases = json.load(fp)\n\n if sparse:\n sim = c_emb * q_emb.T\n m = sim.max(1)\n m = np.squeeze(np.array(m.todense()), 1)\n else:\n q_emb = q_emb['arr_0']\n c_emb = c_emb['arr_0']\n sim = np.matmul(c_emb, q_emb.T)\n m = sim.max(1)\n\n argmax = m.argmax(0)\n predictions[id_] = phrases[argmax]\n \n # Dump piqa_pred\n # with open('test/piqa_pred.json', 'w') as f:\n # f.write(json.dumps(predictions))\n\n return predictions\n\n\nif __name__ == '__main__':\n expected_version = '1.1'\n parser = argparse.ArgumentParser(\n description='Evaluation for SQuAD ' + expected_version)\n parser.add_argument('dataset_file', help='Dataset file')\n parser.add_argument('context_emb_dir', help='Context embedding directory')\n parser.add_argument('question_emb_dir', help='Question embedding directory')\n parser.add_argument('--sparse', default=False, action='store_true',\n help='Whether the embeddings are scipy.sparse or pure numpy.')\n parser.add_argument('--progress', default=False, action='store_true', help='Show progress bar. Requires `tqdm`.')\n args = parser.parse_args()\n with open(args.dataset_file) as dataset_file:\n dataset_json = json.load(dataset_file)\n if (dataset_json['version'] != expected_version):\n print('Evaluation expects v-' + expected_version +\n ', but got dataset with v-' + dataset_json['version'],\n file=sys.stderr)\n dataset = dataset_json['data']\n q2c = get_q2c(dataset)\n predictions = get_predictions(args.context_emb_dir, args.question_emb_dir, q2c, sparse=args.sparse,\n progress=args.progress)\n print(json.dumps(evaluate(dataset, predictions)))\n" ]
[ [ "numpy.matmul" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ndoo/depthai-face-to-mqtt
[ "bbcf5c87401e155593f1bddffeb835e59f12a6aa" ]
[ "main.py" ]
[ "# coding=utf-8\nimport os\nfrom pathlib import Path\nfrom queue import Queue\nimport argparse\nfrom time import monotonic\nimport datetime\nimport throttle\nimport logging\n\nimport cv2\nimport depthai\nimport numpy as np\nfrom imutils.video import FPS\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"-p\", \"--preview\", action=\"store_true\",\n help=\"preview camera\")\nparser.add_argument(\"-b\", \"--database-dir\", type=str, default=\"databases\",\n help=\"path to save/load recognition databases (default: %(default)s)\")\nparser.add_argument(\"-n\", \"--no-enroll\", action=\"store_true\",\n help=\"do not auto-enroll\")\nparser.add_argument(\"-t\", \"--throttle\", type=int, default=10,\n help=\"seconds to throttle recognition alerts (default: %(default)d)\")\nparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\n help=\"increase output verbosity\")\nparser.add_argument(\"-r\", \"--recognize-threshold\", type=int, default=85,\n help=\"confidence percentage to meet or exceed for recognition (default: %(default)d)\")\nparser.add_argument(\"-c\", \"--continue-threshold\", type=int, default=75,\n help=\"confidence percentage to meet or exceed to continue enroll (re-recognition) (default: %(default)d)\")\n\nargs = parser.parse_args()\n\npreview = args.preview\nnoenroll = args.no_enroll\nthrottle_secs = args.throttle\ndb_dir = args.database_dir\nrecognize_conf = args.recognize_threshold / 100\nenroll_conf = args.continue_threshold / 100\n\nlog_level = logging.DEBUG if args.verbose else logging.INFO\nlogging.basicConfig(level=log_level,\n format='%(asctime)s - %(levelname)s - %(message)s')\n\[email protected](throttle_secs, 1)\ndef face_detected(name):\n logging.info(f\"Face detected was called\")\n print(f\"{name}\\a\")\n\ndef to_planar(arr: np.ndarray, shape: tuple):\n return cv2.resize(arr, shape).transpose((2, 0, 1)).flatten()\n\ndef to_nn_result(nn_data):\n return np.array(nn_data.getFirstLayerFp16())\n\ndef run_nn(x_in, x_out, in_dict):\n nn_data = depthai.NNData()\n for key in in_dict:\n nn_data.setLayer(key, in_dict[key])\n x_in.send(nn_data)\n return x_out.tryGet()\n\ndef frame_norm(frame, *xy_vals):\n return (\n np.clip(np.array(xy_vals), 0, 1) * np.array(frame * (len(xy_vals) // 2))[::-1]\n ).astype(int)\n\ndef correction(frame, angle=None, invert=False):\n h, w = frame.shape[:2]\n center = (w // 2, h // 2)\n mat = cv2.getRotationMatrix2D(center, angle, 1)\n affine = cv2.invertAffineTransform(mat).astype(\"float32\")\n corr = cv2.warpAffine(\n frame,\n mat,\n (w, h),\n flags=cv2.INTER_CUBIC,\n borderMode=cv2.BORDER_CONSTANT,\n )\n if invert:\n return corr, affine\n return corr\n\ndef cosine_distance(a, b):\n if a.shape != b.shape:\n raise RuntimeError(\"array {} shape not match {}\".format(a.shape, b.shape))\n a_norm = np.linalg.norm(a)\n b_norm = np.linalg.norm(b)\n similarity = np.dot(a, b.T) / (a_norm * b_norm)\n return similarity\n\ndef enroll(name, face_frame, results, labels, db_dic):\n\n if name in db_dic:\n db_ = db_dic[name]\n if len(db_) >= 200:\n logging.info(f\"Not enrolling {name}, too many records.\")\n return\n db_.append(np.array(results))\n else:\n labels.add(name)\n db_ = np.array(results)\n db_dic[name] = db_\n # Save an image if newly enrolling\n logging.info(f\"Enrolling new face as {name}\")\n cc = face_frame.copy()\n cv2.imwrite(f\"{db_dir}/{name}.jpg\", cc)\n\n if not os.path.exists(db_dir):\n os.mkdir(db_dir)\n np.savez_compressed(f\"{db_dir}/{name}\", *db_)\n\ndef read_db(labels):\n for file in os.listdir(db_dir):\n filename = os.path.splitext(file)\n if filename[1] == \".npz\":\n label = filename[0]\n labels.add(label)\n db_dic = {}\n for label in list(labels):\n with np.load(f\"{db_dir}/{label}.npz\") as db:\n db_dic[label] = [db[j] for j in db.files]\n return db_dic\n\n\nclass DepthAI:\n def __init__(\n self\n ):\n logging.debug(\"Loading pipeline...\")\n self.fps_cam = FPS()\n self.fps_nn = FPS()\n self.create_pipeline()\n self.start_pipeline()\n self.fontScale = 1\n self.lineType = 0\n\n def create_pipeline(self):\n logging.debug(\"Creating pipeline...\")\n self.pipeline = depthai.Pipeline()\n\n # ColorCamera\n logging.debug(\"Creating Color Camera...\")\n self.cam = self.pipeline.createColorCamera()\n self.cam.setPreviewSize(self._cam_size[1], self._cam_size[0])\n self.cam.setResolution(\n depthai.ColorCameraProperties.SensorResolution.THE_4_K\n )\n self.cam.setInterleaved(False)\n self.cam.setBoardSocket(depthai.CameraBoardSocket.RGB)\n self.cam.setColorOrder(depthai.ColorCameraProperties.ColorOrder.BGR)\n\n self.cam_xout = self.pipeline.createXLinkOut()\n self.cam_xout.setStreamName(\"preview\")\n self.cam.preview.link(self.cam_xout.input)\n\n self.create_nns()\n\n logging.info(\"Pipeline created.\")\n\n def create_nns(self):\n pass\n\n def create_nn(self, model_path: str, model_name: str, first: bool = False):\n \"\"\"\n\n :param model_path: model path\n :param model_name: model abbreviation\n :param first: Is it the first model\n :return:\n \"\"\"\n # NeuralNetwork\n logging.debug(f\"Creating {model_path} Neural Network...\")\n model_nn = self.pipeline.createNeuralNetwork()\n model_nn.setBlobPath(str(Path(f\"{model_path}\").resolve().absolute()))\n model_nn.input.setBlocking(False)\n if first:\n logging.debug(\"linked cam.preview to model_nn.input\")\n self.cam.preview.link(model_nn.input)\n else:\n model_in = self.pipeline.createXLinkIn()\n model_in.setStreamName(f\"{model_name}_in\")\n model_in.out.link(model_nn.input)\n\n model_nn_xout = self.pipeline.createXLinkOut()\n model_nn_xout.setStreamName(f\"{model_name}_nn\")\n model_nn.out.link(model_nn_xout.input)\n\n def create_mobilenet_nn(\n self,\n model_path: str,\n model_name: str,\n conf: float = 0.5,\n first: bool = False,\n ):\n \"\"\"\n\n :param model_path: model name\n :param model_name: model abbreviation\n :param conf: confidence threshold\n :param first: Is it the first model\n :return:\n \"\"\"\n # NeuralNetwork\n logging.debug(f\"Creating {model_path} MobileNet Neural Network...\")\n model_nn = self.pipeline.createMobileNetDetectionNetwork()\n model_nn.setBlobPath(str(Path(f\"{model_path}\").resolve().absolute()))\n model_nn.setConfidenceThreshold(conf)\n model_nn.input.setBlocking(False)\n\n if first:\n self.cam.preview.link(model_nn.input)\n else:\n model_in = self.pipeline.createXLinkIn()\n model_in.setStreamName(f\"{model_name}_in\")\n model_in.out.link(model_nn.input)\n\n model_nn_xout = self.pipeline.createXLinkOut()\n model_nn_xout.setStreamName(f\"{model_name}_nn\")\n model_nn.out.link(model_nn_xout.input)\n\n def start_pipeline(self):\n try:\n logging.info(\"Starting pipeline...\")\n self.device = depthai.Device(self.pipeline)\n except Exception as e:\n logging.critical(\"Could not create pipeline: %s\", str(e))\n exit(1)\n\n self.start_nns()\n\n self.preview = self.device.getOutputQueue(\n name=\"preview\", maxSize=4, blocking=False\n )\n\n def start_nns(self):\n pass\n\n def put_text(self, text, dot, color=(0, 0, 255), font_scale=None,\n line_type=None):\n font_scale = font_scale if font_scale else self.fontScale\n line_type = line_type if line_type else self.lineType\n dot = tuple(dot[:2])\n cv2.putText(\n img=self.debug_frame,\n text=text,\n org=dot,\n fontFace=cv2.FONT_HERSHEY_COMPLEX,\n fontScale=font_scale,\n color=color,\n lineType=line_type,\n )\n\n def draw_bbox(self, bbox, color):\n cv2.rectangle(\n img=self.debug_frame,\n pt1=(bbox[0], bbox[1]),\n pt2=(bbox[2], bbox[3]),\n color=color,\n thickness=2,\n )\n\n def parse(self):\n if preview:\n self.debug_frame = self.frame.copy()\n\n s = self.parse_fun()\n # if s :\n # raise StopIteration()\n if preview:\n cv2.imshow(\n \"Camera_view\",\n self.debug_frame,\n )\n self.fps_cam.update()\n if cv2.waitKey(1) == ord(\"q\"):\n cv2.destroyAllWindows()\n self.fps_cam.stop()\n self.fps_nn.stop()\n logging.debug(\n f\"FPS_CAMERA: {self.fps_cam.fps():.2f} , FPS_NN: {self.fps_nn.fps():.2f}\"\n )\n raise StopIteration()\n\n def run_camera(self):\n while True:\n in_rgb = self.preview.tryGet()\n if in_rgb is not None:\n shape = (3, in_rgb.getHeight(), in_rgb.getWidth())\n self.frame = (\n in_rgb.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8)\n )\n self.frame = np.ascontiguousarray(self.frame)\n try:\n self.parse()\n except StopIteration:\n break\n\n @property\n def cam_size(self):\n return self._cam_size\n\n @cam_size.setter\n def cam_size(self, v):\n self._cam_size = v\n\n def run(self):\n self.fps_cam.start()\n self.fps_nn.start()\n self.run_camera()\n del self.device\n\n\nclass Main(DepthAI):\n def __init__(self):\n self.cam_size = (300, 300)\n super(Main, self).__init__()\n self.face_frame_corr = Queue()\n self.face_frame = Queue()\n self.face_coords = Queue()\n self.labels = set()\n self.db_dic = read_db(self.labels)\n\n def create_nns(self):\n\n self.create_mobilenet_nn(\n \"models/face-detection-retail-0005_openvino_2021.4_4shave.blob\",\n \"mfd\",\n first=True,\n conf=0.9, # Raised to prevent auto-enroll of non-faces\n )\n\n self.create_nn(\n \"models/head-pose-estimation-adas-0001_openvino_2021.4_4shave.blob\",\n \"head_pose\",\n )\n self.create_nn(\n \"models/face-recognition-mobilefacenet-arcface_2021.2_4shave.blob\",\n \"arcface\",\n )\n\n def start_nns(self):\n self.mfd_nn = self.device.getOutputQueue(\"mfd_nn\", 4, False)\n self.head_pose_in = self.device.getInputQueue(\"head_pose_in\", 4, False)\n self.head_pose_nn = self.device.getOutputQueue(\"head_pose_nn\", 4, False)\n self.arcface_in = self.device.getInputQueue(\"arcface_in\", 4, False)\n self.arcface_nn = self.device.getOutputQueue(\"arcface_nn\", 4, False)\n\n def run_face_mn(self):\n nn_data = self.mfd_nn.tryGet()\n if nn_data is None:\n return False\n\n bboxes = nn_data.detections\n for bbox in bboxes:\n face_coord = frame_norm(\n self.frame.shape[:2], *[bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax]\n )\n self.face_frame.put(\n self.frame[face_coord[1] : face_coord[3], face_coord[0] : face_coord[2]]\n )\n self.face_coords.put(face_coord)\n if preview:\n self.draw_bbox(face_coord, (10, 245, 10))\n\n return True\n\n def run_head_pose(self):\n while self.face_frame.qsize():\n face_frame = self.face_frame.get()\n nn_data = run_nn(\n self.head_pose_in,\n self.head_pose_nn,\n {\"data\": to_planar(face_frame, (60, 60))},\n )\n if nn_data is None:\n return False\n\n out = np.array(nn_data.getLayerFp16(\"angle_r_fc\"))\n self.face_frame_corr.put(correction(face_frame, -out[0]))\n\n return True\n\n def run_arcface(self):\n while self.face_frame_corr.qsize():\n face_coords = self.face_coords.get()\n face_frame = self.face_frame_corr.get()\n\n nn_data = run_nn(\n self.arcface_in,\n self.arcface_nn,\n {\"data\": to_planar(face_frame, (112, 112))},\n )\n\n if nn_data is None:\n return False\n self.fps_nn.update()\n results = to_nn_result(nn_data)\n\n conf = []\n max_ = 0\n label_ = None\n for label in list(self.labels):\n for j in self.db_dic.get(label):\n conf_ = cosine_distance(j, results)\n if conf_ > max_:\n max_ = conf_\n label_ = label\n conf.append((max_, label_))\n \n name = conf[0]\n\n if name[0] >= recognize_conf:\n # Use debug log level to minimize screen scroll\n logging.debug(f\"Face detected: {name[1]}; confidence: {name[0] * 100:.2f}%\\a\")\n face_detected(name[1])\n\n if name[0] >= enroll_conf:\n logging.info(f\"Face detected, updating enrolment: {name[1]}; confidence: {name[0] * 100:.2f}%\\a\")\n enroll(name[1], face_frame, results, self.labels,\n self.db_dic)\n\n if name[0] < 0.2:\n logging.info(f\"Face detected, enrolling: {name[1]}; confidence: {name[0] * 100:.2f}%\\a\")\n enroll(datetime.datetime.now().isoformat().replace(':','_'),\n face_frame, results, self.labels, self.db_dic)\n\n if preview:\n self.put_text(\n f\"name:{name[1]}\",\n (face_coords[0], face_coords[1] - 35),\n (244, 0, 255),\n )\n self.put_text(\n f\"conf:{name[0] * 100:.2f}%\",\n (face_coords[0], face_coords[1] - 10),\n (244, 0, 255),\n )\n\n return True\n\n def parse_fun(self):\n if self.run_face_mn():\n if self.run_head_pose():\n if self.run_arcface():\n return True\n\n\nif __name__ == \"__main__\":\n Main().run()\n" ]
[ [ "numpy.dot", "numpy.ascontiguousarray", "numpy.linalg.norm", "numpy.savez_compressed", "numpy.load", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Lambda-School-Labs/yelp-dataset-challenge-2-ds
[ "fa7cace65e502e335bdb804519e2b3ec5b6e77c6" ]
[ "wordcloudapi/wordcloudapp/timeseries.py" ]
[ "import numpy as np\nimport pandas as pd\nimport ast\nfrom collections import Counter\nfrom .models import DB, reviews\n\ndef wc_count(docs):\n \"\"\"Count the occurance of each word and rank\n \"\"\"\n total=len(docs)\n wc = pd.DataFrame({'word':docs, 'count':np.ones(len(docs))})\n wc = wc.groupby('word').sum()\n wc['pct_total'] = wc['count']/total\n wc['rank'] = wc['count'].rank(method='first', ascending=False)\n return wc.sort_values(by='rank').nlargest(30, 'count')\n\n\ndef timeseries(bus_id):\n\n result = reviews.query.with_entities(reviews.token, reviews.date, \\\n reviews.stars).filter_by(business_id=bus_id)\n df = pd.read_sql(sql = result.statement, con = DB.engine)\n df['token'] = df['token'].apply(lambda x: [i.strip('{').strip('}') for i in x.split(',')])\n print(df['token'].iloc[0])\n filtered = df.sort_values('date')\n filtered = filtered.reset_index()\n filtered['bins'] = pd.qcut(filtered.index, q=10, precision=0)\n new_df = filtered.groupby('bins').agg({'token': 'sum', \\\n 'stars': 'mean', 'date': lambda x: x.iloc[-1]})\n\n counts = []\n for i in range(len(new_df)):\n wc_df = wc_count(new_df['token'].values[i])\n wc_df['date'] = new_df['date'].values[i]\n wc_df['star_review'] = new_df['stars'].values[i]\n counts.append(wc_df)\n\n df_final = pd.concat(counts)\n df_final['date'] = df_final['date'].astype(str)\n df_final = df_final.reset_index()\n output = (df_final.groupby(['date'], as_index=True)\n .apply(lambda x: x[['word','count','pct_total','rank',\\\n 'star_review']].to_dict('r'))\n .to_json()).replace(\"'\", \"\")\n\n return output" ]
[ [ "pandas.concat", "pandas.qcut", "pandas.read_sql" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
VCAT19/torch-ngp
[ "dcbfe061b30808875a80f12a10a383b51b35f121" ]
[ "nerf/gui.py" ]
[ "import torch\nimport numpy as np\nimport dearpygui.dearpygui as dpg\nfrom scipy.spatial.transform import Rotation as R\n\nfrom nerf.utils import *\n\n\nclass OrbitCamera:\n def __init__(self, W, H, r=2, fovy=60):\n self.W = W\n self.H = H\n self.radius = r # camera distance from center\n self.fovy = fovy # in degree\n self.center = np.array([0, 0, 0], dtype=np.float32) # look at this point\n self.rot = R.from_quat([1, 0, 0, 0]) # init camera matrix: [[1, 0, 0], [0, -1, 0], [0, 0, 1]] (to suit ngp convention)\n self.up = np.array([0, 1, 0], dtype=np.float32) # need to be normalized!\n\n # pose\n @property\n def pose(self):\n # first move camera to radius\n res = np.eye(4, dtype=np.float32)\n res[2, 3] -= self.radius\n # rotate\n rot = np.eye(4, dtype=np.float32)\n rot[:3, :3] = self.rot.as_matrix()\n res = rot @ res\n # translate\n res[:3, 3] -= self.center\n return res\n \n # intrinsics\n @property\n def intrinsics(self):\n res = np.eye(3, dtype=np.float32)\n focal = self.H / (2 * np.tan(np.radians(self.fovy) / 2))\n res[0, 0] = res[1, 1] = focal\n res[0, 2] = self.W // 2\n res[1, 2] = self.H // 2\n return res\n \n def orbit(self, dx, dy):\n # rotate along camera up/side axis!\n side = self.rot.as_matrix()[:3, 0] # why this is side --> ? # already normalized.\n rotvec_x = self.up * np.radians(-0.1 * dx)\n rotvec_y = side * np.radians(-0.1 * dy)\n self.rot = R.from_rotvec(rotvec_x) * R.from_rotvec(rotvec_y) * self.rot\n\n # wrong: rotate along global x/y axis\n #self.rot = R.from_euler('xy', [-dy * 0.1, -dx * 0.1], degrees=True) * self.rot\n \n def scale(self, delta):\n self.radius *= 1.1 ** (-delta)\n\n def pan(self, dx, dy, dz=0):\n # pan in camera coordinate system (careful on the sensitivity!)\n self.center += 0.001 * self.rot.as_matrix()[:3, :3] @ np.array([dx, dy, dz])\n\n # wrong: pan in global coordinate system\n #self.center += 0.001 * np.array([-dx, -dy, dz])\n \n\n\nclass NeRFGUI:\n def __init__(self, opt, trainer, debug=True):\n self.opt = opt\n self.W = opt.W\n self.H = opt.H\n self.cam = OrbitCamera(opt.W, opt.H, r=opt.radius)\n self.trainer = trainer\n self.debug = debug\n self.bg_color = None # rendering bg color (TODO)\n self.training = False\n self.step = 0 # training step \n\n self.render_buffer = np.zeros((self.W, self.H, 3), dtype=np.float32)\n self.need_update = True # camera moved, should reset accumulation\n self.spp = 1 # sample per pixel\n\n dpg.create_context()\n self.register_dpg()\n self.test_step()\n \n\n def __del__(self):\n dpg.destroy_context()\n\n\n def train_step(self):\n\n starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)\n starter.record()\n\n outputs = self.trainer.train_gui(self.trainer.train_loader)\n\n ender.record()\n torch.cuda.synchronize()\n t = starter.elapsed_time(ender)\n\n self.step += 1\n self.need_update = True\n\n dpg.set_value(\"_log_train_time\", f'{t:.4f}ms')\n dpg.set_value(\"_log_train_log\", f'step = {self.step: 5d}, loss = {outputs[\"loss\"]:.4f}, lr = {outputs[\"lr\"]:.6f}')\n\n \n def test_step(self):\n # TODO: seems we have to move data from GPU --> CPU --> GPU?\n # TODO: dynamic rendering resolution to keep it fluent.\n\n if self.need_update or self.spp < self.opt.max_spp:\n \n starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)\n starter.record()\n\n outputs = self.trainer.test_gui(self.cam.pose, self.cam.intrinsics, self.W, self.H, self.bg_color, self.spp)\n\n ender.record()\n torch.cuda.synchronize()\n t = starter.elapsed_time(ender)\n\n if self.need_update:\n self.render_buffer = outputs['image']\n self.spp = 1\n self.need_update = False\n else:\n self.render_buffer = (self.render_buffer * self.spp + outputs['image']) / (self.spp + 1)\n self.spp += 1\n\n dpg.set_value(\"_log_infer_time\", f'{t:.4f}ms')\n dpg.set_value(\"_log_spp\", self.spp)\n dpg.set_value(\"_texture\", self.render_buffer)\n\n \n def register_dpg(self):\n\n ### register texture \n\n with dpg.texture_registry(show=False):\n dpg.add_raw_texture(self.W, self.H, self.render_buffer, format=dpg.mvFormat_Float_rgb, tag=\"_texture\")\n\n ### register window\n\n with dpg.window(tag=\"_primary_window\", width=self.W, height=self.H):\n dpg.add_image(\"_texture\")\n\n dpg.set_primary_window(\"_primary_window\", True)\n\n\n\n with dpg.window(label=\"Control\", tag=\"_control_window\", width=400, height=250):\n\n # button theme\n with dpg.theme() as theme_button:\n with dpg.theme_component(dpg.mvButton):\n dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18))\n dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47))\n dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83))\n dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5)\n dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3)\n\n # time\n if not self.opt.test:\n with dpg.group(horizontal=True):\n dpg.add_text(\"Train time: \")\n dpg.add_text(\"no data\", tag=\"_log_train_time\") \n\n with dpg.group(horizontal=True):\n dpg.add_text(\"Infer time: \")\n dpg.add_text(\"no data\", tag=\"_log_infer_time\")\n \n with dpg.group(horizontal=True):\n dpg.add_text(\"SPP: \")\n dpg.add_text(\"1\", tag=\"_log_spp\")\n\n # train button\n if not self.opt.test:\n with dpg.collapsing_header(label=\"Train\", default_open=True):\n with dpg.group(horizontal=True):\n dpg.add_text(\"Train: \")\n\n def callback_train(sender, app_data):\n if self.training:\n self.training = False\n dpg.configure_item(\"_button_train\", label=\"start\")\n else:\n self.training = True\n dpg.configure_item(\"_button_train\", label=\"stop\")\n\n dpg.add_button(label=\"start\", tag=\"_button_train\", callback=callback_train)\n dpg.bind_item_theme(\"_button_train\", theme_button)\n\n def callback_reset(sender, app_data):\n @torch.no_grad()\n def weight_reset(m: nn.Module):\n reset_parameters = getattr(m, \"reset_parameters\", None)\n if callable(reset_parameters):\n m.reset_parameters()\n self.trainer.model.apply(fn=weight_reset)\n self.trainer.model.reset_extra_state() # for cuda_ray density_grid and step_counter\n self.need_update = True\n\n dpg.add_button(label=\"reset\", tag=\"_button_reset\", callback=callback_reset)\n dpg.bind_item_theme(\"_button_reset\", theme_button)\n\n\n with dpg.group(horizontal=True):\n dpg.add_text(\"Checkpoint: \")\n\n def callback_save(sender, app_data):\n self.trainer.save_checkpoint(full=True, best=False)\n self.trainer.epoch += 1 # use epoch to indicate different calls.\n dpg.set_value(\"_log_ckpt\", \"saved \" + os.path.basename(self.trainer.stats[\"checkpoints\"][-1]))\n\n dpg.add_button(label=\"save\", tag=\"_button_save\", callback=callback_save)\n dpg.bind_item_theme(\"_button_save\", theme_button)\n\n dpg.add_text(\"\", tag=\"_log_ckpt\")\n\n\n with dpg.group(horizontal=True):\n dpg.add_text(\"Log: \")\n dpg.add_text(\"\", tag=\"_log_train_log\")\n\n \n \n # rendering options\n with dpg.collapsing_header(label=\"Options\"):\n # bg_color picker\n def callback_change_bg(sender, app_data):\n self.bg_color = torch.tensor(app_data[:3], dtype=torch.float32) # only need RGB in [0, 1]\n self.need_update = True\n\n dpg.add_color_edit((255, 255, 255), label=\"Background Color\", width=200, tag=\"_color_editor\", no_alpha=True, callback=callback_change_bg)\n\n # fov slider\n def callback_set_fovy(sender, app_data):\n self.cam.fovy = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"FoV (vertical)\", min_value=1, max_value=120, format=\"%d deg\", default_value=self.cam.fovy, callback=callback_set_fovy)\n\n # debug info\n if self.debug:\n with dpg.collapsing_header(label=\"Debug\"):\n # pose\n dpg.add_separator()\n dpg.add_text(\"Camera Pose:\")\n dpg.add_text(str(self.cam.pose), tag=\"_log_pose\")\n\n\n ### register camera handler\n\n def callback_camera_drag_rotate(sender, app_data):\n\n if not dpg.is_item_focused(\"_primary_window\"):\n return\n\n dx = app_data[1]\n dy = app_data[2]\n\n self.cam.orbit(dx, dy)\n self.need_update = True\n\n if self.debug:\n dpg.set_value(\"_log_pose\", str(self.cam.pose))\n\n\n def callback_camera_wheel_scale(sender, app_data):\n\n if not dpg.is_item_focused(\"_primary_window\"):\n return\n\n delta = app_data\n\n self.cam.scale(delta)\n self.need_update = True\n\n if self.debug:\n dpg.set_value(\"_log_pose\", str(self.cam.pose))\n\n\n def callback_camera_drag_pan(sender, app_data):\n\n if not dpg.is_item_focused(\"_primary_window\"):\n return\n\n dx = app_data[1]\n dy = app_data[2]\n\n self.cam.pan(dx, dy)\n self.need_update = True\n\n if self.debug:\n dpg.set_value(\"_log_pose\", str(self.cam.pose))\n\n\n with dpg.handler_registry():\n dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Left, callback=callback_camera_drag_rotate)\n dpg.add_mouse_wheel_handler(callback=callback_camera_wheel_scale)\n dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Middle, callback=callback_camera_drag_pan)\n\n \n dpg.create_viewport(title='torch-ngp', width=self.W, height=self.H, resizable=False)\n \n # TODO: seems dearpygui doesn't support resizing texture...\n # def callback_resize(sender, app_data):\n # self.W = app_data[0]\n # self.H = app_data[1]\n # # how to reload texture ???\n\n # dpg.set_viewport_resize_callback(callback_resize)\n\n ### global theme\n with dpg.theme() as theme_no_padding:\n with dpg.theme_component(dpg.mvAll):\n # set all padding to 0 to avoid scroll bar\n dpg.add_theme_style(dpg.mvStyleVar_WindowPadding, 0, 0, category=dpg.mvThemeCat_Core)\n dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 0, 0, category=dpg.mvThemeCat_Core)\n dpg.add_theme_style(dpg.mvStyleVar_CellPadding, 0, 0, category=dpg.mvThemeCat_Core)\n \n dpg.bind_item_theme(\"_primary_window\", theme_no_padding)\n\n dpg.setup_dearpygui()\n\n #dpg.show_metrics()\n\n dpg.show_viewport()\n\n\n def render(self):\n\n while dpg.is_dearpygui_running():\n # update texture every frame\n if self.training:\n self.train_step()\n self.test_step()\n dpg.render_dearpygui_frame()" ]
[ [ "torch.cuda.synchronize", "numpy.radians", "scipy.spatial.transform.Rotation.from_rotvec", "scipy.spatial.transform.Rotation.from_quat", "numpy.eye", "torch.cuda.Event", "torch.tensor", "torch.no_grad", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.5", "1.3", "1.2", "1.4" ], "tensorflow": [] } ]
brandonwillard/symbolic-pymc
[ "84e8d612c714f502f8d188c1766498f4ff7beecf" ]
[ "symbolic_pymc/theano/ops.py" ]
[ "import numpy as np\nimport theano\nimport theano.tensor as tt\n\nfrom collections.abc import Iterable, ByteString\nfrom warnings import warn\nfrom copy import copy\n\nfrom theano.tensor.raw_random import RandomStateType\n\n\ndef param_supp_shape_fn(ndim_supp, ndims_params, dist_params, rep_param_idx=0, param_shapes=None):\n \"\"\"Infer dimensions for a random variable.\n\n This is a function that derives a random variable's support\n shape/dimensions from one of its parameters.\n\n XXX: It's not always possible to determine a random variable's support\n shape from its parameters, so this function has fundamentally limited\n applicability.\n\n XXX: This function is not expected to handle `ndim_supp = 0` (i.e.\n scalars), since that is already definitively handled in the `Op` that\n calls this.\n\n TODO: Consider using `theano.compile.ops.shape_i` alongside `ShapeFeature`.\n\n Parameters\n ----------\n ndim_supp: int\n Total number of dimensions in the support (assumedly > 0).\n ndims_params: list of int\n Number of dimensions for each distribution parameter.\n dist_params: list of `theano.gof.graph.Variable`\n The distribution parameters.\n param_shapes: list of `theano.compile.ops.Shape` (optional)\n Symbolic shapes for each distribution parameter.\n Providing this value prevents us from reproducing the requisite\n `theano.compile.ops.Shape` object (e.g. when it's already available to\n the caller).\n rep_param_idx: int (optional)\n The index of the distribution parameter to use as a reference\n In other words, a parameter in `dist_param` with a shape corresponding\n to the support's shape.\n The default is the first parameter (i.e. the value 0).\n\n Results\n -------\n out: a tuple representing the support shape for a distribution with the\n given `dist_params`.\n\n \"\"\"\n # XXX: Gotta be careful slicing Theano variables, the `Subtensor` Op isn't\n # handled by `tensor.get_scalar_constant_value`!\n # E.g.\n # test_val = tt.as_tensor_variable([[1], [4]])\n # tt.get_scalar_constant_value(test_val.shape[-1]) # works\n # tt.get_scalar_constant_value(test_val.shape[0]) # doesn't\n # tt.get_scalar_constant_value(test_val.shape[:-1]) # doesn't\n if param_shapes is not None:\n ref_param = param_shapes[rep_param_idx]\n return (ref_param[-ndim_supp],)\n else:\n ref_param = dist_params[rep_param_idx]\n if ref_param.ndim < ndim_supp:\n raise ValueError(\n (\n \"Reference parameter does not match the \"\n f\"expected dimensions; {ref_param} has less than {ndim_supp} dim(s).\"\n )\n )\n return (ref_param.shape[-ndim_supp],)\n\n\nclass RandomVariable(tt.gof.Op):\n \"\"\"An `Op` that produces a sample from a random variable.\n\n This is essentially `RandomFunction`, except that it removes the\n `outtype` dependency and handles shape dimension information more\n directly.\n\n \"\"\"\n\n __props__ = (\"name\", \"dtype\", \"ndim_supp\", \"inplace\", \"ndims_params\")\n default_output = 1\n nondeterministic = True\n\n def __init__(\n self,\n name,\n dtype,\n ndim_supp,\n ndims_params,\n rng_fn,\n *args,\n supp_shape_fn=param_supp_shape_fn,\n inplace=False,\n **kwargs,\n ):\n \"\"\"Create a random variable `Op`.\n\n Parameters\n ----------\n name: str\n The `Op`'s display name.\n dtype: Theano dtype\n The underlying dtype.\n ndim_supp: int\n Dimension of the support. This value is used to infer the exact\n shape of the support and independent terms from ``dist_params``.\n ndims_params: tuple (int)\n Number of dimensions of each parameter in ``dist_params``.\n rng_fn: function or str\n The non-symbolic random variate sampling function.\n Can be the string name of a method provided by\n `numpy.random.RandomState`.\n supp_shape_fn: callable (optional)\n Function used to determine the exact shape of the distribution's\n support.\n\n It must take arguments ndim_supp, ndims_params, dist_params\n (i.e. an collection of the distribution parameters) and an\n optional param_shapes (i.e. tuples containing the size of each\n dimension for each distribution parameter).\n\n Defaults to `param_supp_shape_fn`.\n inplace: boolean (optional)\n Determine whether or not the underlying rng state is updated\n in-place or not (i.e. copied).\n\n \"\"\"\n super().__init__(*args, **kwargs)\n\n self.name = name\n self.ndim_supp = ndim_supp\n self.dtype = dtype\n self.supp_shape_fn = supp_shape_fn\n self.inplace = inplace\n\n if not isinstance(ndims_params, Iterable):\n raise ValueError(\"Parameter ndims_params must be iterable.\")\n\n self.ndims_params = tuple(ndims_params)\n\n if isinstance(rng_fn, (str, ByteString)):\n self.rng_fn = getattr(np.random.RandomState, rng_fn)\n else:\n self.rng_fn = rng_fn\n\n def __str__(self):\n return \"{}_rv\".format(self.name)\n\n def _infer_shape(self, size, dist_params, param_shapes=None):\n \"\"\"Compute shapes and broadcasts values.\n\n Inspired by `tt.add.get_output_info`.\n\n \"\"\"\n\n param_shapes = param_shapes or [p.shape for p in dist_params]\n\n def slice_ind_dims(p, ps, n):\n shape = tuple(ps)\n\n if n == 0:\n return (p, shape, p.broadcastable)\n\n ind_slice = (np.s_[:],) * (p.ndim - n) + (0,) * n\n return (p[ind_slice], shape[:-n], p.broadcastable[:-n])\n\n # These are versions of our actual parameters with the expected\n # dimensions removed so that only the independent variate dimensions\n # are left.\n params_ind_slice = tuple(\n slice_ind_dims(p, ps, n)\n for p, ps, n in zip(dist_params, param_shapes, self.ndims_params)\n )\n\n if len(params_ind_slice) == 1:\n ind_param, ind_shape, ind_bcast = params_ind_slice[0]\n ndim_ind = len(ind_shape)\n shape_ind = ind_shape\n elif len(params_ind_slice) > 1:\n # When there are multiple parameters with different dimensions\n # *and* independent dimensions, the independent dimensions should\n # broadcast together. We simply add those independent dimension\n # slices and let `tt.add` work out the broadcasting logic.\n p_slices, p_shapes, p_bcasts = zip(*params_ind_slice)\n (shape_ind,) = tt.add.infer_shape(tt.add(*p_slices).owner, p_shapes)\n ndim_ind = len(shape_ind)\n\n size_len = tt.get_vector_length(size)\n\n if self.ndim_supp == 0:\n shape_supp = tuple()\n\n # In the scalar case, `size` corresponds to the entire result's\n # shape. This implies the following:\n # shape_ind == size[:ndim_ind]\n # TODO: Do we wan to constraint/check symbolically?\n\n shape_reps = tuple(size)\n\n if ndim_ind > 0:\n shape_reps = shape_reps[:-ndim_ind]\n\n ndim_reps = len(shape_reps)\n else:\n shape_supp = self.supp_shape_fn(\n self.ndim_supp, self.ndims_params, dist_params, param_shapes=param_shapes\n )\n\n ndim_reps = size_len\n shape_reps = size\n\n ndim_shape = self.ndim_supp + ndim_ind + ndim_reps\n\n if ndim_shape == 0:\n shape = tt.constant([], dtype=\"int64\")\n else:\n shape = tuple(shape_reps) + tuple(shape_ind) + tuple(shape_supp)\n\n # if shape is None:\n # raise tt.ShapeError()\n\n return shape\n\n def compute_bcast(self, dist_params, size):\n \"\"\"Compute the broadcast array for this distribution's `TensorType`.\n\n Parameters\n ----------\n dist_params: list\n Distribution parameters.\n size: int or Iterable (optional)\n Numpy-like size of the output (i.e. replications).\n\n \"\"\"\n shape = self._infer_shape(size, dist_params)\n\n # Let's try to do a better job than `_infer_ndim_bcast` when\n # dimension sizes are symbolic.\n bcast = []\n for s in shape:\n s_owner = getattr(s, \"owner\", None)\n try:\n if (\n s_owner\n and isinstance(s_owner.op, tt.Subtensor)\n and s_owner.inputs[0].owner is not None\n ):\n # Handle a special case in which\n # `tensor.get_scalar_constant_value` doesn't really work.\n s_x, s_idx = s_owner.inputs\n s_idx = tt.get_scalar_constant_value(s_idx)\n if isinstance(s_x.owner.op, tt.Shape):\n (x_obj,) = s_x.owner.inputs\n s_val = x_obj.type.broadcastable[s_idx]\n else:\n # TODO: Could go for an existing broadcastable here,\n # too, no?\n s_val = False\n else:\n s_val = tt.get_scalar_constant_value(s)\n except tt.NotScalarConstantError:\n s_val = False\n\n bcast += [s_val == 1]\n return bcast\n\n def infer_shape(self, node, input_shapes):\n size = node.inputs[-2]\n dist_params = tuple(node.inputs[:-2])\n shape = self._infer_shape(size, dist_params, param_shapes=input_shapes[:-2])\n\n return [None, [s for s in shape]]\n\n def make_node(self, *dist_params, size=None, rng=None, name=None):\n \"\"\"Create a random variable node.\n\n XXX: Unnamed/non-keyword arguments are considered distribution\n parameters! If you want to set `size`, `rng`, and/or `name`, use their\n keywords.\n\n Parameters\n ----------\n dist_params: list\n Distribution parameters.\n size: int or Iterable (optional)\n Numpy-like size of the output (i.e. replications).\n rng: RandomState (optional)\n Existing Theano `RandomState` object to be used. Creates a\n new one, if `None`.\n name: str (optional)\n Label for the resulting node.\n\n Results\n -------\n out: `Apply`\n A node with inputs `dist_args + (size, in_rng, name)` and outputs\n `(out_rng, sample_tensorvar)`.\n\n \"\"\"\n if size is None:\n size = tt.constant([], dtype=\"int64\")\n elif isinstance(size, int):\n size = tt.as_tensor_variable([size], ndim=1)\n elif not isinstance(size, Iterable):\n raise ValueError(\"Parameter size must be None, int, or an iterable with ints.\")\n else:\n size = tt.as_tensor_variable(size, ndim=1)\n\n assert size.dtype in tt.int_dtypes\n\n dist_params = tuple(tt.as_tensor_variable(p) for p in dist_params)\n\n if rng is None:\n rng = theano.shared(np.random.RandomState())\n elif not isinstance(rng.type, RandomStateType):\n warn(\"The type of rng should be an instance of RandomStateType\")\n\n bcast = self.compute_bcast(dist_params, size)\n\n # dtype = tt.scal.upcast(self.dtype, *[p.dtype for p in dist_params])\n\n outtype = tt.TensorType(dtype=self.dtype, broadcastable=bcast)\n out_var = outtype(name=name)\n inputs = dist_params + (size, rng)\n outputs = (rng.type(), out_var)\n\n return theano.gof.Apply(self, inputs, outputs)\n\n def perform(self, node, inputs, outputs):\n \"\"\"Draw samples using Numpy/SciPy.\"\"\"\n rng_out, smpl_out = outputs\n\n args = list(inputs)\n rng = args.pop()\n size = args.pop()\n\n assert isinstance(rng, np.random.RandomState), (type(rng), rng)\n\n rng_out[0] = rng\n\n # The symbolic output variable corresponding to value produced here.\n out_var = node.outputs[1]\n\n # If `size == []`, that means no size is enforced, and NumPy is\n # trusted to draw the appropriate number of samples, NumPy uses\n # `size=None` to represent that. Otherwise, NumPy expects a tuple.\n if np.size(size) == 0:\n size = None\n else:\n size = tuple(size)\n\n # Draw from `rng` if `self.inplace` is `True`, and from a copy of `rng`\n # otherwise.\n if not self.inplace:\n rng = copy(rng)\n\n smpl_val = self.rng_fn(rng, *(args + [size]))\n\n if not isinstance(smpl_val, np.ndarray) or str(smpl_val.dtype) != out_var.type.dtype:\n smpl_val = theano._asarray(smpl_val, dtype=out_var.type.dtype)\n\n # When `size` is `None`, NumPy has a tendency to unexpectedly\n # return a scalar instead of a higher-dimension array containing\n # only one element. This value should be reshaped\n # TODO: Really? Why shouldn't the output correctly correspond to\n # the returned NumPy value? Sounds more like a mis-specification of\n # the symbolic output variable.\n if size is None and smpl_val.ndim == 0 and out_var.ndim > 0:\n smpl_val = smpl_val.reshape([1] * out_var.ndim)\n\n smpl_out[0] = smpl_val\n\n def grad(self, inputs, outputs):\n return [\n theano.gradient.grad_undefined(\n self, k, inp, \"No gradient defined through raw random numbers op\"\n )\n for k, inp in enumerate(inputs)\n ]\n\n def R_op(self, inputs, eval_points):\n return [None for i in eval_points]\n" ]
[ [ "numpy.size", "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
johli/scrambler
[ "608f6b50efc1cb222d8df8a9f0231a4a9c1a9c1a" ]
[ "scrambler/models/scrambler_models.py" ]
[ "import keras\nfrom keras.models import Sequential, Model, load_model\n\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Input, Lambda\nfrom keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Conv1D, MaxPooling1D, LSTM, ConvLSTM2D, GRU, CuDNNLSTM, CuDNNGRU, BatchNormalization, LocallyConnected2D, Permute, TimeDistributed, Bidirectional\nfrom keras.layers import Concatenate, Reshape, Softmax, Conv2DTranspose, Embedding, Multiply\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, Callback\nfrom keras import regularizers\nfrom keras import backend as K\nfrom keras.utils.generic_utils import Progbar\nfrom keras.layers.merge import _Merge\nimport keras.losses\n\nfrom functools import partial\n\nfrom collections import defaultdict\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\n\nimport isolearn.keras as iso\n\nimport numpy as np\n\nimport tensorflow as tf\nimport logging\nlogging.getLogger('tensorflow').setLevel(logging.ERROR)\n\nimport os\nimport pickle\nimport numpy as np\nimport pandas as pd\n\nimport scipy.sparse as sp\nimport scipy.io as spio\n\nimport matplotlib.pyplot as plt\n\nfrom keras.backend.tensorflow_backend import set_session\n\nfrom scipy.signal import gaussian\n\ndef contain_tf_gpu_mem_usage() :\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n set_session(sess)\n\ncontain_tf_gpu_mem_usage()\n\nclass EpochVariableCallback(Callback) :\n \n def __init__(self, my_variable, my_func) :\n self.my_variable = my_variable \n self.my_func = my_func\n \n def on_epoch_begin(self, epoch, logs={}) :\n K.set_value(self.my_variable, self.my_func(K.get_value(self.my_variable), epoch))\n\nfrom tensorflow.python.framework import ops\n\n\nfrom keras.layers import Layer, InputSpec\nfrom keras import initializers, regularizers, constraints\n\nclass InstanceNormalization(Layer):\n def __init__(self, axes=(1, 2), trainable=True, **kwargs):\n super(InstanceNormalization, self).__init__(**kwargs)\n self.axes = axes\n self.trainable = trainable\n def build(self, input_shape):\n self.beta = self.add_weight(name='beta',shape=(input_shape[-1],),\n initializer='zeros',trainable=self.trainable)\n self.gamma = self.add_weight(name='gamma',shape=(input_shape[-1],),\n initializer='ones',trainable=self.trainable)\n def call(self, inputs):\n mean, variance = tf.nn.moments(inputs, self.axes, keep_dims=True)\n return tf.nn.batch_normalization(inputs, mean, variance, self.beta, self.gamma, 1e-6)\n\n#Stochastic Binarized Neuron helper functions (Tensorflow)\n#ST Estimator code adopted from https://r2rt.com/beyond-binary-ternary-and-one-hot-neurons.html\n#See Github https://github.com/spitis/\n\ndef st_sampled_softmax(logits):\n with ops.name_scope(\"STSampledSoftmax\") as namescope :\n nt_probs = tf.nn.softmax(logits)\n onehot_dim = logits.get_shape().as_list()[1]\n sampled_onehot = tf.one_hot(tf.squeeze(tf.multinomial(logits, 1), 1), onehot_dim, 1.0, 0.0)\n with tf.get_default_graph().gradient_override_map({'Ceil': 'Identity', 'Mul': 'STMul'}):\n return tf.ceil(sampled_onehot * nt_probs)\n\ndef st_hardmax_softmax(logits):\n with ops.name_scope(\"STHardmaxSoftmax\") as namescope :\n nt_probs = tf.nn.softmax(logits)\n onehot_dim = logits.get_shape().as_list()[1]\n sampled_onehot = tf.one_hot(tf.argmax(nt_probs, 1), onehot_dim, 1.0, 0.0)\n with tf.get_default_graph().gradient_override_map({'Ceil': 'Identity', 'Mul': 'STMul'}):\n return tf.ceil(sampled_onehot * nt_probs)\n\[email protected](\"STMul\")\ndef st_mul(op, grad):\n return [grad, grad]\n\n#Gumbel Distribution Sampler\ndef gumbel_softmax(logits, temperature=0.5) :\n gumbel_dist = tf.contrib.distributions.RelaxedOneHotCategorical(temperature, logits=logits)\n batch_dim = logits.get_shape().as_list()[0]\n onehot_dim = logits.get_shape().as_list()[1]\n return gumbel_dist.sample()\n\n#PWM Masking and Sampling helper functions\n\ndef mask_pwm(inputs) :\n pwm, onehot_template, onehot_mask = inputs\n\n return pwm * onehot_mask + onehot_template\n\ndef sample_pwm_st(pwm_logits, n_channels=4, temperature=None) :\n n_examples = K.shape(pwm_logits)[0]\n input_size_x = K.shape(pwm_logits)[1]\n input_size_y = K.shape(pwm_logits)[2]\n\n flat_pwm = K.reshape(pwm_logits, (n_examples * input_size_x * input_size_y, n_channels))\n sampled_pwm = st_sampled_softmax(flat_pwm)\n\n return K.reshape(sampled_pwm, (n_examples, input_size_x, input_size_y, n_channels))\n\ndef sample_pwm_gumbel(pwm_logits, n_channels=4, temperature=0.5) :\n n_examples = K.shape(pwm_logits)[0]\n input_size_x = K.shape(pwm_logits)[1]\n input_size_y = K.shape(pwm_logits)[2]\n\n flat_pwm = K.reshape(pwm_logits, (n_examples * input_size_x * input_size_y, n_channels))\n sampled_pwm = gumbel_softmax(flat_pwm, temperature=temperature)\n\n return K.reshape(sampled_pwm, (n_examples, input_size_x, input_size_y, n_channels))\n\n#Generator helper functions\ndef initialize_templates(model, template_matrices, background_matrices, model_prefix='') :\n\n n_channels = template_matrices[0].shape[-1]\n \n embedding_templates = []\n embedding_masks = []\n embedding_backgrounds = []\n\n for k in range(len(template_matrices)) :\n onehot_template = template_matrices[k]\n onehot_template_log = np.zeros(onehot_template.shape)\n\n for i in range(onehot_template.shape[0]) :\n for j in range(onehot_template.shape[1]) :\n if np.sum(onehot_template[i, j, :]) >= 1. :\n channel_ix = np.argmax(onehot_template[i, j, :])\n onehot_template_log[i, j, :] = -4.0\n onehot_template_log[i, j, channel_ix] = 10.0\n\n onehot_mask = np.zeros(onehot_template.shape)\n for i in range(onehot_template.shape[0]) :\n for j in range(onehot_template.shape[1]) :\n if np.sum(onehot_template[i, j, :]) <= 0. :\n onehot_mask[i, j, :] = 1.0\n\n embedding_templates.append(onehot_template_log.reshape(1, -1))\n embedding_masks.append(onehot_mask.reshape(1, -1))\n embedding_backgrounds.append(background_matrices[k].reshape(1, -1))\n\n embedding_templates = np.concatenate(embedding_templates, axis=0)\n embedding_masks = np.concatenate(embedding_masks, axis=0)\n embedding_backgrounds = np.concatenate(embedding_backgrounds, axis=0)\n\n model.get_layer(model_prefix + 'template_dense').set_weights([embedding_templates])\n model.get_layer(model_prefix + 'template_dense').trainable = False\n\n model.get_layer(model_prefix + 'mask_dense').set_weights([embedding_masks])\n model.get_layer(model_prefix + 'mask_dense').trainable = False\n \n model.get_layer(model_prefix + 'background_dense').set_weights([embedding_backgrounds])\n model.get_layer(model_prefix + 'background_dense').trainable = False\n\n#Generator construction function\ndef build_sampler(batch_size, input_size_x, input_size_y, n_classes=1, n_samples=1, sample_mode='st', n_channels=4, gumbel_temp=0.5, model_prefix='') :\n\n #Initialize Reshape layer\n reshape_layer = Reshape((input_size_x, input_size_y, n_channels))\n \n #Initialize background matrix\n onehot_background_dense = Embedding(n_classes, input_size_x * input_size_y * n_channels, embeddings_initializer='zeros', name=model_prefix + 'background_dense')\n\n #Initialize template and mask matrices\n onehot_template_dense = Embedding(n_classes, input_size_x * input_size_y * n_channels, embeddings_initializer='zeros', name=model_prefix + 'template_dense')\n onehot_mask_dense = Embedding(n_classes, input_size_x * input_size_y * n_channels, embeddings_initializer='ones', name=model_prefix + 'mask_dense')\n\n #Initialize Templating and Masking Lambda layer\n masking_layer = Lambda(mask_pwm, output_shape = (input_size_x, input_size_y, n_channels), name=model_prefix + 'masking_layer')\n background_layer = Lambda(lambda x: x[0] + x[1], name=model_prefix + 'background_layer')\n \n #Initialize PWM normalization layer\n pwm_layer = Softmax(axis=-1, name=model_prefix + 'pwm')\n \n #Initialize sampling layers\n sample_func = None\n if sample_mode == 'st' :\n sample_func = sample_pwm_st\n elif sample_mode == 'gumbel' :\n sample_func = sample_pwm_gumbel\n \n upsampling_layer = Lambda(lambda x: K.tile(x, [n_samples, 1, 1, 1]), name=model_prefix + 'upsampling_layer')\n sampling_layer = Lambda(lambda x: sample_func(x, n_channels=n_channels, temperature=gumbel_temp), name=model_prefix + 'pwm_sampler')\n permute_layer = Lambda(lambda x: K.permute_dimensions(K.reshape(x, (n_samples, batch_size, input_size_x, input_size_y, n_channels)), (1, 0, 2, 3, 4)), name=model_prefix + 'permute_layer')\n \n def _sampler_func(class_input, raw_logits) :\n \n #Get Template and Mask\n onehot_background = reshape_layer(onehot_background_dense(class_input))\n onehot_template = reshape_layer(onehot_template_dense(class_input))\n onehot_mask = reshape_layer(onehot_mask_dense(class_input))\n \n #Add Template and Multiply Mask\n pwm_logits = masking_layer([background_layer([raw_logits, onehot_background]), onehot_template, onehot_mask])\n \n #Compute PWM (Nucleotide-wise Softmax)\n pwm = pwm_layer(pwm_logits)\n \n #Tile each PWM to sample from and create sample axis\n pwm_logits_upsampled = upsampling_layer(pwm_logits)\n sampled_pwm = sampling_layer(pwm_logits_upsampled)\n sampled_pwm = permute_layer(sampled_pwm)\n\n sampled_mask = permute_layer(upsampling_layer(onehot_mask))\n \n return pwm_logits, pwm, sampled_pwm, onehot_mask, sampled_mask\n \n return _sampler_func\n\n#Scrambler network definition\n\ndef make_resblock(n_channels=64, window_size=8, dilation_rate=1, group_ix=0, layer_ix=0, drop_rate=0.0, norm_mode='instance') :\n\n #Initialize res block layers\n batch_norm_0 = lambda x: x\n if norm_mode == 'instance' :\n batch_norm_0 = InstanceNormalization(name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_norm_0')\n elif norm_mode == 'batch' :\n batch_norm_0 = BatchNormalization(name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_norm_0')\n\n relu_0 = Lambda(lambda x: K.relu(x, alpha=0.0))\n\n conv_0 = Conv2D(n_channels, window_size, dilation_rate=dilation_rate, strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_conv_0')\n\n batch_norm_1 = lambda x: x\n if norm_mode == 'instance' :\n batch_norm_1 = InstanceNormalization(name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_norm_1')\n elif norm_mode == 'batch' :\n batch_norm_1 = BatchNormalization(name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_norm_1')\n\n relu_1 = Lambda(lambda x: K.relu(x, alpha=0.0))\n\n conv_1 = Conv2D(n_channels, window_size, dilation_rate=dilation_rate, strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_conv_1')\n\n skip_1 = Lambda(lambda x: x[0] + x[1], name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_skip_1')\n\n drop_1 = None\n if drop_rate > 0.0 :\n drop_1 = Dropout(drop_rate)\n \n #Execute res block\n def _resblock_func(input_tensor) :\n batch_norm_0_out = batch_norm_0(input_tensor)\n relu_0_out = relu_0(batch_norm_0_out)\n conv_0_out = conv_0(relu_0_out)\n\n batch_norm_1_out = batch_norm_1(conv_0_out)\n relu_1_out = relu_1(batch_norm_1_out)\n \n if drop_rate > 0.0 :\n conv_1_out = drop_1(conv_1(relu_1_out))\n else :\n conv_1_out = conv_1(relu_1_out)\n\n skip_1_out = skip_1([conv_1_out, input_tensor])\n \n return skip_1_out\n\n return _resblock_func\n\ndef mask_dropout_multi_scale(mask, n_spatial_dims=1, drop_scales=[1, 2, 4, 7], min_drop_rate=0.0, max_drop_rate=0.5) :\n \n rates = K.random_uniform(shape=(K.shape(mask)[0], 1, 1, 1), minval=min_drop_rate, maxval=max_drop_rate)\n \n scale_logits = K.random_uniform(shape=(K.shape(mask)[0], len(drop_scales), 1, 1, 1), minval=-5., maxval=5.)\n scale_probs = K.softmax(scale_logits, axis=1)\n \n ret_mask = mask\n for drop_scale_ix, drop_scale in enumerate(drop_scales) :\n ret_mask = mask_dropout(ret_mask, rates * scale_probs[:, drop_scale_ix, ...], drop_scale=drop_scale, n_spatial_dims=n_spatial_dims)\n \n return K.switch(K.learning_phase(), ret_mask, mask)\n\ndef mask_dropout(mask, drop_rates, drop_scale=1, n_spatial_dims=1) :\n \n random_tensor_downsampled = K.random_uniform(shape=(\n K.shape(mask)[0],\n 1 if n_spatial_dims == 1 else K.cast(K.shape(mask)[1] / drop_scale, dtype=tf.int32),\n K.cast(K.shape(mask)[2] / drop_scale, dtype=tf.int32),\n K.shape(mask)[3]\n ), minval=0.0, maxval=1.0)\n \n keep_mask_downsampled = random_tensor_downsampled >= drop_rates\n \n keep_mask = K.repeat_elements(keep_mask_downsampled, rep=drop_scale, axis=2)\n if n_spatial_dims > 1 :\n keep_mask = K.repeat_elements(keep_mask, rep=drop_scale, axis=1)\n \n ret_mask = mask * K.cast(keep_mask, dtype=tf.float32)\n \n return ret_mask\n\ndef mask_dropout_single_scale(mask, n_spatial_dims=1, drop_scale=1, min_drop_rate=0.0, max_drop_rate=0.5) :\n \n rates = K.random_uniform(shape=(K.shape(mask)[0], 1, 1, 1), minval=min_drop_rate, maxval=max_drop_rate)\n \n random_tensor_downsampled = K.random_uniform(shape=(\n K.shape(mask)[0],\n 1 if n_spatial_dims == 1 else K.cast(K.shape(mask)[1] / drop_scale, dtype=tf.int32),\n K.cast(K.shape(mask)[2] / drop_scale, dtype=tf.int32),\n K.shape(mask)[3]\n ), minval=0.0, maxval=1.0)\n \n keep_mask_downsampled = random_tensor_downsampled >= rates\n \n keep_mask = K.repeat_elements(keep_mask_downsampled, rep=drop_scale, axis=2)\n if n_spatial_dims > 1 :\n keep_mask = K.repeat_elements(keep_mask, rep=drop_scale, axis=1)\n \n ret_mask = mask * K.cast(keep_mask, dtype=tf.float32)\n \n return K.switch(K.learning_phase(), ret_mask, mask)\n\ndef load_scrambler_network(input_size_x, input_size_y, scrambler_mode='inclusion', n_out_channels=4, n_spatial_dims=1, n_groups=1, n_resblocks_per_group=4, n_channels=32, window_size=8, mask_smoothing=False, smooth_window_size=None, dilation_rates=[1], drop_rate=0.0, norm_mode='instance', mask_dropout=False, mask_drop_scales=[1, 5], mask_min_drop_rate=0.0, mask_max_drop_rate=0.5, use_label_input=False) :\n\n conv_0 = Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_conv_0')\n \n label_concat = None\n if use_label_input :\n label_concat = Lambda(lambda x: K.concatenate([x[0], K.tile(K.expand_dims(K.expand_dims(x[1], axis=-1), axis=-1), (1, K.shape(x[0])[1], K.shape(x[0])[2], 1))], axis=-1))\n \n mask_drop = None\n mask_concat = None\n mask_multiply = None\n if mask_dropout :\n if len(mask_drop_scales) <= 1 :\n mask_drop = Lambda(lambda x: mask_dropout_single_scale(x, drop_scale=mask_drop_scales[0], min_drop_rate=mask_min_drop_rate, max_drop_rate=mask_max_drop_rate), output_shape=(1, input_size_y, 1) if n_spatial_dims == 1 else (input_size_x, input_size_y, 1), name='scrambler_mask_drop')\n else :\n mask_drop = Lambda(lambda x: mask_dropout_multi_scale(x, drop_scales=mask_drop_scales, min_drop_rate=mask_min_drop_rate, max_drop_rate=mask_max_drop_rate), output_shape=(1, input_size_y, 1) if n_spatial_dims == 1 else (input_size_x, input_size_y, 1), name='scrambler_mask_drop')\n \n mask_concat = Concatenate(axis=-1)\n mask_multiply = Lambda(lambda x: x[0] * x[1])\n \n skip_convs = []\n resblock_groups = []\n for group_ix in range(n_groups) :\n \n skip_convs.append(Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_skip_conv_' + str(group_ix)))\n \n resblocks = []\n for layer_ix in range(n_resblocks_per_group) :\n resblocks.append(make_resblock(n_channels=n_channels, window_size=(1, window_size) if n_spatial_dims == 1 else (window_size, window_size), dilation_rate=dilation_rates[group_ix], group_ix=group_ix, layer_ix=layer_ix, drop_rate=drop_rate, norm_mode=norm_mode))\n \n resblock_groups.append(resblocks)\n\n last_block_conv = Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_last_block_conv')\n \n skip_add = Lambda(lambda x: x[0] + x[1], name='scrambler_skip_add')\n \n final_conv = Conv2D(1, (1, 1), strides=(1, 1), padding='same', activation='softplus', kernel_initializer='glorot_normal', name='scrambler_final_conv')\n \n smooth_conv = None\n if mask_smoothing :\n smooth_conv = Conv2D(1, (1, smooth_window_size) if n_spatial_dims == 1 else (smooth_window_size, smooth_window_size), strides=(1, 1), use_bias=False, padding='same', activation='linear', kernel_initializer='ones', name='scrambler_smooth_conv')\n \n onehot_to_logits = Lambda(lambda x: 2. * x - 1., name='scrambler_onehot_to_logits')\n \n scale_logits = Lambda(lambda x: x[1] * K.tile(x[0], (1, 1, 1, n_out_channels)), name='scrambler_logit_scale')\n if scrambler_mode == 'occlusion' :\n scale_logits = Lambda(lambda x: x[1] / K.maximum(K.tile(x[0], (1, 1, 1, n_out_channels)), K.epsilon()), name='scrambler_logit_scale')\n \n def _scrambler_func(example_input, mask_input=None, label_input=None) :\n \n total_input = example_input\n if use_label_input :\n total_input = label_concat([total_input, label_input])\n if mask_dropout :\n mask_dropped = mask_drop(mask_input)\n total_input = mask_concat([total_input, mask_dropped])\n \n conv_0_out = conv_0(total_input)\n\n #Connect group of res blocks\n output_tensor = conv_0_out\n\n #Res block group execution\n skip_conv_outs = []\n for group_ix in range(n_groups) :\n skip_conv_out = skip_convs[group_ix](output_tensor)\n skip_conv_outs.append(skip_conv_out)\n\n for layer_ix in range(n_resblocks_per_group) :\n output_tensor = resblock_groups[group_ix][layer_ix](output_tensor)\n \n #Last res block extr conv\n last_block_conv_out = last_block_conv(output_tensor)\n\n skip_add_out = last_block_conv_out\n for group_ix in range(n_groups) :\n skip_add_out = skip_add([skip_add_out, skip_conv_outs[group_ix]])\n\n #Final conv out\n final_conv_out = final_conv(skip_add_out)\n \n if mask_dropout :\n final_conv_out = mask_multiply([final_conv_out, mask_dropped])\n \n if mask_smoothing :\n final_conv_out = smooth_conv(final_conv_out)\n \n #Scale logits by importance scores\n scaled_logits = scale_logits([final_conv_out, onehot_to_logits(example_input)])\n \n return scaled_logits, final_conv_out\n\n return _scrambler_func\n\ndef load_finetuning_model(batch_size, input_size_x, input_size_y, scrambler_mode='inclusion', n_out_channels=4, n_spatial_dims=1, mask_smoothing=False, smooth_window_size=None, norm_mode='instance', max_score_clip=4.) :\n\n #seed_input = Lambda(lambda x: K.zeros((K.shape(x)[0], 1), dtype=tf.int32))\n seed_input = Lambda(lambda x: K.constant(np.arange(batch_size), dtype=tf.int32))\n \n mask_dense = Embedding(batch_size, input_size_x * input_size_y, embeddings_initializer='glorot_normal', name='ft_scrambler_mask_dense')\n \n mask_reshape = Reshape((input_size_x, input_size_y, 1))\n \n mask_conv, mask_norm = None, None\n if norm_mode is not None and norm_mode == 'conv' :\n mask_conv = Conv2D(1, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='ft_scrambler_mask_conv')\n elif norm_mode is not None and norm_mode == 'instance' :\n mask_norm = InstanceNormalization(name='ft_scrambler_mask_norm')\n \n mask_act = Activation('softplus')\n \n smooth_conv = None\n if mask_smoothing :\n smooth_conv = Conv2D(1, (1, smooth_window_size) if n_spatial_dims == 1 else (smooth_window_size, smooth_window_size), strides=(1, 1), use_bias=False, padding='same', activation='linear', kernel_initializer='ones', name='ft_scrambler_smooth_conv')\n \n onehot_to_logits = Lambda(lambda x: 2. * x - 1., name='ft_scrambler_onehot_to_logits')\n \n scale_logits = Lambda(lambda x: x[1] * K.tile(x[0], (1, 1, 1, n_out_channels)), name='ft_scrambler_logit_scale')\n if scrambler_mode == 'occlusion' :\n scale_logits = Lambda(lambda x: x[1] / K.maximum(K.tile(x[0], (1, 1, 1, n_out_channels)), K.epsilon()), name='ft_scrambler_logit_scale')\n \n clip_scores = Lambda(lambda x, max_score_clip=max_score_clip: K.relu(K.clip(x[1], 0., max_score_clip) - x[0]), name='ft_scrambler_clip_scores')\n \n drop_multiply = Lambda(lambda x: x[0] * x[1], name='ft_scrambler_drop_multiply')\n \n def _scrambler_func(sequence_input, drop_input, pretrained_scores) :\n\n mask_in = mask_reshape(mask_dense(seed_input(sequence_input)))\n \n #Final conv out\n if norm_mode is not None and norm_mode == 'conv' : \n mask_out = mask_conv(mask_in)\n elif norm_mode is not None and norm_mode == 'instance' :\n mask_out = mask_norm(mask_in)\n #mask_out = mask_norm(mask_in, training=True)\n else :\n mask_out = mask_in\n \n mask_act_out = mask_act(mask_out)\n \n scores_out = drop_multiply([clip_scores([mask_act_out, pretrained_scores]), drop_input])\n \n if mask_smoothing :\n scores_out = smooth_conv(scores_out)\n \n #Scale inputs by importance scores\n scaled_inputs = scale_logits([scores_out, onehot_to_logits(sequence_input)])\n \n return scaled_inputs, scores_out\n\n return _scrambler_func\n\ndef load_optimization_model(batch_size, input_size_x, input_size_y, scrambler_mode='inclusion', n_out_channels=4, n_spatial_dims=1, mask_smoothing=False, smooth_window_size=None, norm_mode='instance') :\n\n #seed_input = Lambda(lambda x: K.zeros((K.shape(x)[0], 1), dtype=tf.int32))\n seed_input = Lambda(lambda x: K.constant(np.arange(batch_size), dtype=tf.int32))\n \n mask_dense = Embedding(batch_size, input_size_x * input_size_y, embeddings_initializer='glorot_normal', name='ot_scrambler_mask_dense')\n \n mask_reshape = Reshape((input_size_x, input_size_y, 1))\n \n mask_conv, mask_norm = None, None\n if norm_mode is not None and norm_mode == 'conv' :\n mask_conv = Conv2D(1, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='ot_scrambler_mask_conv')\n elif norm_mode is not None and norm_mode == 'instance' :\n mask_norm = InstanceNormalization(name='ot_scrambler_mask_norm')\n \n mask_act = Activation('softplus')\n \n smooth_conv = None\n if mask_smoothing :\n smooth_conv = Conv2D(1, (1, smooth_window_size) if n_spatial_dims == 1 else (smooth_window_size, smooth_window_size), strides=(1, 1), use_bias=False, padding='same', activation='linear', kernel_initializer='ones', name='ot_scrambler_smooth_conv')\n \n onehot_to_logits = Lambda(lambda x: 2. * x - 1., name='ot_scrambler_onehot_to_logits')\n \n scale_logits = Lambda(lambda x: x[1] * K.tile(x[0], (1, 1, 1, n_out_channels)), name='ot_scrambler_logit_scale')\n if scrambler_mode == 'occlusion' :\n scale_logits = Lambda(lambda x: x[1] / K.maximum(K.tile(x[0], (1, 1, 1, n_out_channels)), K.epsilon()), name='ot_scrambler_logit_scale')\n \n drop_multiply = Lambda(lambda x: x[0] * x[1], name='ot_scrambler_drop_multiply')\n \n def _scrambler_func(sequence_input, drop_input) :\n\n mask_in = mask_reshape(mask_dense(seed_input(sequence_input)))\n \n #Final conv out\n if norm_mode is not None and norm_mode == 'conv' : \n mask_out = mask_conv(mask_in)\n elif norm_mode is not None and norm_mode == 'instance' :\n mask_out = mask_norm(mask_in)\n #mask_out = mask_norm(mask_in, training=True)\n else :\n mask_out = mask_in\n \n mask_act_out = mask_act(mask_out)\n \n scores_out = drop_multiply([mask_act_out, drop_input])\n \n if mask_smoothing :\n scores_out = smooth_conv(scores_out)\n \n #Scale inputs by importance scores\n scaled_inputs = scale_logits([scores_out, onehot_to_logits(sequence_input)])\n \n return scaled_inputs, scores_out\n\n return _scrambler_func\n\n#Keras loss functions\n\ndef get_mse() :\n \n def _mse(y_true, y_pred) :\n return K.mean((y_true[..., 0] - y_pred[..., 0])**2, axis=-1)\n \n return _mse\n\ndef get_linear_max_nll() :\n \n def _mse(y_true, y_pred) :\n return -K.mean(y_pred[..., 0], axis=-1)\n \n return _mse\n\ndef get_linear_min_nll() :\n \n def _mse(y_true, y_pred) :\n return K.mean(y_pred[..., 0], axis=-1)\n \n return _mse\n\ndef get_softmax_kl_divergence() :\n\n def _softmax_kl_divergence(y_true, y_pred) :\n\n y_true = K.clip(y_true, K.epsilon(), 1.0 - K.epsilon())\n y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())\n\n return K.mean(K.sum(y_true * K.log(y_true / y_pred), axis=-1), axis=-1)\n \n return _softmax_kl_divergence\n\ndef get_sigmoid_kl_divergence() :\n\n def _kl_divergence(y_true, y_pred) :\n\n y_true = K.clip(y_true[..., 0], K.epsilon(), 1.0 - K.epsilon())\n y_pred = K.clip(y_pred[..., 0], K.epsilon(), 1.0 - K.epsilon())\n\n return K.mean(y_true * K.log(y_true / y_pred) + (1.0 - y_true) * K.log((1.0 - y_true) / (1.0 - y_pred)), axis=-1)\n \n return _kl_divergence\n\ndef get_symmetric_sigmoid_kl_divergence() :\n\n def _kl_divergence(y_true, y_pred) :\n\n y_pred = K.clip(y_pred[..., 0], K.epsilon(), 1.0 - K.epsilon())\n y_true = K.clip(y_true[..., 0], K.epsilon(), 1.0 - K.epsilon())\n \n left_mean_kl = K.mean(y_true * K.log(y_true / y_pred) + (1.0 - y_true) * K.log((1.0 - y_true) / (1.0 - y_pred)), axis=-1)\n right_mean_kl = K.mean(y_pred * K.log(y_pred / y_true) + (1.0 - y_pred) * K.log((1.0 - y_pred) / (1.0 - y_true)), axis=-1)\n\n return left_mean_kl + right_mean_kl\n \n return _kl_divergence\n\ndef get_sigmoid_max_nll() :\n\n def _max_nll(y_pred) :\n\n y_pred = K.clip(y_pred[..., 0], K.epsilon(), 1.0)\n \n return K.mean(-K.log(y_pred), axis=-1)\n \n return _max_nll\n\ndef get_sigmoid_min_nll() :\n\n def _min_nll(y_pred) :\n\n y_pred = K.clip(y_pred[..., 0], 0.0, 1.0 - K.epsilon())\n \n return K.mean(-K.log(1.0 - y_pred), axis=-1)\n \n return _min_nll\n\ndef get_margin_entropy_ame_masked(x_start, x_end, y_start, y_end, max_bits=1.0) :\n \n def _margin_entropy_ame_masked(pwm, pwm_mask, pwm_background) :\n conservation = pwm[:, x_start:x_end, y_start:y_end, :] * K.log(K.clip(pwm[:, x_start:x_end, y_start:y_end, :], K.epsilon(), 1. - K.epsilon()) / pwm_background[:, x_start:x_end, y_start:y_end, :]) / K.log(2.0)\n conservation = K.sum(conservation, axis=-1)\n \n mask = K.max(pwm_mask[:, x_start:x_end, y_start:y_end, :], axis=-1)\n n_unmasked = K.sum(mask, axis=(1, 2))\n \n mean_conservation = K.sum(conservation * mask, axis=(1, 2)) / n_unmasked\n\n margin_conservation = K.switch(mean_conservation > K.constant(max_bits, shape=(1,)), mean_conservation - K.constant(max_bits, shape=(1,)), K.zeros_like(mean_conservation))\n \n return margin_conservation\n \n return _margin_entropy_ame_masked\n\ndef get_target_entropy_sme_masked(x_start, x_end, y_start, y_end, target_bits=1.0) :\n \n def _target_entropy_sme_masked(pwm, pwm_mask, pwm_background) :\n conservation = pwm[:, x_start:x_end, y_start:y_end, :] * K.log(K.clip(pwm[:, x_start:x_end, y_start:y_end, :], K.epsilon(), 1. - K.epsilon()) / pwm_background[:, x_start:x_end, y_start:y_end, :]) / K.log(2.0)\n conservation = K.sum(conservation, axis=-1)\n \n mask = K.max(pwm_mask[:, x_start:x_end, y_start:y_end, :], axis=-1)\n n_unmasked = K.sum(mask, axis=(1, 2))\n \n mean_conservation = K.sum(conservation * mask, axis=(1, 2)) / n_unmasked\n\n return (mean_conservation - target_bits)**2\n \n return _target_entropy_sme_masked\n\ndef get_weighted_loss(loss_coeff=1.) :\n \n def _min_pred(y_true, y_pred) :\n return loss_coeff * y_pred\n \n return _min_pred\n\nclass ScramblerMonitor(Callback):\n def __init__(self, scrambler_model, loss_model, loss_tensors, input_tensors, n_inputs=1, track_mode='batch', batch_freq_dict=None, batch_size=32) :\n \n self.scrambler_model = scrambler_model\n self.loss_model = loss_model\n self.track_mode = track_mode\n self.batch_freq_dict = batch_freq_dict\n self.batch_size = batch_size\n \n self.loss_tensors = loss_tensors\n self.input_tensors = input_tensors\n self.n_inputs = n_inputs\n \n self.batch_history = []\n self.epoch_history = []\n self.nll_loss_history = []\n self.entropy_loss_history = []\n \n self.scores_history = []\n self.pwm_history = []\n\n self.n_epochs = 0\n self.n_batches = 0\n \n self.batch_freq = 10\n if self.batch_freq_dict is not None and 0 in self.batch_freq_dict :\n self.batch_freq = self.batch_freq_dict[0]\n\n nll_loss, entropy_loss, scores, pwms = self._predict_vals()\n\n #Track metrics\n self.batch_history.append(self.n_batches)\n self.epoch_history.append(self.n_epochs)\n self.scores_history.append(scores)\n self.pwm_history.append(pwms)\n self.nll_loss_history.append(nll_loss)\n self.entropy_loss_history.append(entropy_loss)\n \n def _predict_vals(self) :\n \n nll_loss, entropy_loss = self.loss_model.predict(x=self.loss_tensors, batch_size=self.batch_size)\n pred_bundle = self.scrambler_model.predict(x=self.input_tensors, batch_size=self.batch_size)\n \n pwms = []\n scores = []\n for input_ix in range(self.n_inputs) :\n pwm = pred_bundle[self.n_inputs + input_ix]\n score = pred_bundle[3 * self.n_inputs + input_ix]\n \n pwms.append(pwm)\n scores.append(score)\n \n return nll_loss, entropy_loss, scores, pwms\n \n def on_batch_end(self, batch, logs={}) :\n self.n_batches += 1\n \n #if batch == 0 and self.batch_freq_dict is not None and self.n_epochs in self.batch_freq_dict :\n # self.batch_freq = self.batch_freq_dict[self.n_epochs]\n if self.batch_freq_dict is not None and self.n_batches in self.batch_freq_dict :\n self.batch_freq = self.batch_freq_dict[self.n_batches]\n \n if self.track_mode == 'batch' and batch % self.batch_freq == 0 :\n nll_loss, entropy_loss, scores, pwms = self._predict_vals()\n\n #Track metrics\n self.batch_history.append(self.n_batches)\n self.epoch_history.append(self.n_epochs)\n self.scores_history.append(scores)\n self.pwm_history.append(pwms)\n self.nll_loss_history.append(nll_loss)\n self.entropy_loss_history.append(entropy_loss)\n\n def on_epoch_end(self, epoch, logs={}) :\n self.n_epochs += 1\n\n if self.track_mode == 'epoch' :\n nll_loss, entropy_loss, scores, pwms = self._predict_vals()\n\n #Track metrics\n self.epoch_history.append(self.n_epochs)\n self.scores_history.append(scores)\n self.pwm_history.append(pwms)\n self.nll_loss_history.append(nll_loss)\n self.entropy_loss_history.append(entropy_loss)\n\nclass LossHistory(keras.callbacks.Callback) :\n \n def __init__(self, loss_names=['nll', 'entropy']) :\n self.loss_names = loss_names\n self.loss_dict = {\n loss_name : []\n for loss_name in loss_names\n }\n\n def on_batch_end(self, batch, logs={}) :\n for loss_name in self.loss_names :\n self.loss_dict[loss_name].append(logs.get(loss_name + '_loss'))\n\ndef initialize_backgrounds(model, background_matrices, model_prefix='') :\n\n flat_background_matrices = []\n\n for k in range(len(background_matrices)) :\n flat_background_matrices.append(background_matrices[k].reshape(1, -1))\n\n flat_background_matrices = np.concatenate(flat_background_matrices, axis=0)\n\n model.get_layer(model_prefix + 'x_mean_dense').set_weights([flat_background_matrices])\n model.get_layer(model_prefix + 'x_mean_dense').trainable = False\n\nclass Scrambler :\n \n def __init__(self, n_inputs=1, n_classes=1, multi_input_mode='siamese', scrambler_mode='inclusion', input_size_x=1, input_size_y=100, n_out_channels=4, input_templates=None, input_backgrounds=None, batch_size=32, n_samples=32, sample_mode='st', zeropad_input=False, mask_dropout=False, network_config={'n_groups' : 1, 'n_resblocks_per_group' : 4, 'n_channels' : 32, 'window_size' : 8, 'dilation_rates' : [1], 'drop_rate' : 0.25, 'norm_mode' : 'instance', 'mask_smoothing' : True, 'mask_smoothing_window_size' : 7, 'mask_smoothing_std' : 1.5, 'mask_drop_scales' : [1, 5], 'mask_min_drop_rate' : 0.0, 'mask_max_drop_rate' : 0.5, 'label_input' : False}) :\n \n self.n_inputs = n_inputs\n self.n_classes = n_classes\n self.multi_input_mode = multi_input_mode\n self.scrambler_mode = scrambler_mode\n self.input_size_x = input_size_x if input_size_x is not None else 1\n self.input_size_y = input_size_y\n self.n_out_channels = n_out_channels\n self.batch_size = batch_size\n self.n_samples = n_samples\n self.sample_mode = sample_mode\n self.zeropad_input = zeropad_input\n \n self.n_groups = network_config['n_groups']\n self.n_resblocks_per_group = network_config['n_resblocks_per_group']\n self.n_channels = network_config['n_channels']\n self.window_size = network_config['window_size']\n self.dilation_rates = network_config['dilation_rates']\n self.drop_rate = network_config['drop_rate']\n self.norm_mode = network_config['norm_mode']\n \n self.mask_smoothing = network_config['mask_smoothing']\n self.mask_smoothing_window_size = network_config['mask_smoothing_window_size']\n self.mask_smoothing_std = network_config['mask_smoothing_std']\n \n self.mask_dropout = mask_dropout\n self.mask_drop_scales = network_config['mask_drop_scales']\n self.mask_min_drop_rate = network_config['mask_min_drop_rate']\n self.mask_max_drop_rate = network_config['mask_max_drop_rate']\n \n self.label_input = network_config['label_input']\n \n self.input_templates = input_templates\n if self.input_templates is None :\n self.input_templates = [np.zeros((input_size_x, input_size_y, n_channels))]\n \n self.input_backgrounds = input_backgrounds\n if self.input_backgrounds is None :\n self.input_backgrounds = [np.ones((input_size_x, input_size_y, n_channels)) * (1. / float(n_channels))]\n \n self.input_backgrounds_log = [np.log(input_background) for input_background in input_backgrounds]\n \n mask_smoothing_conv_weight = gaussian(self.mask_smoothing_window_size, self.mask_smoothing_std)\n \n #Load scrambler\n scrambler = load_scrambler_network(\n self.input_size_x,\n self.input_size_y,\n scrambler_mode=self.scrambler_mode,\n n_out_channels=self.n_out_channels,\n n_spatial_dims=1 if self.input_size_x == 1 else 2,\n n_groups=self.n_groups,\n n_resblocks_per_group=self.n_resblocks_per_group,\n n_channels=self.n_channels,\n window_size=self.window_size,\n mask_smoothing=self.mask_smoothing,\n smooth_window_size=self.mask_smoothing_window_size,\n dilation_rates=self.dilation_rates,\n drop_rate=self.drop_rate,\n norm_mode=self.norm_mode,\n mask_dropout=self.mask_dropout,\n mask_drop_scales=self.mask_drop_scales,\n mask_min_drop_rate=self.mask_min_drop_rate,\n mask_max_drop_rate=self.mask_max_drop_rate,\n use_label_input=self.label_input\n )\n \n self.scrambler = scrambler\n\n #Load sampler\n sampler = build_sampler(self.batch_size, self.input_size_x, self.input_size_y, n_classes=len(self.input_templates), n_samples=self.n_samples, sample_mode=self.sample_mode, n_channels=self.n_out_channels)\n \n self.sampler = sampler\n \n #Build scrambler model\n scrambler_classes = []\n scrambler_inputs = []\n scrambler_drops = []\n for input_ix in range(self.n_inputs) :\n scrambler_classes.append(Input(shape=(1,), name='scrambler_group_' + str(input_ix)))\n scrambler_inputs.append(Input(shape=(self.input_size_x, self.input_size_y, self.n_out_channels), name='scrambler_input_' + str(input_ix)))\n if self.mask_dropout :\n scrambler_drops.append(Input(shape=(self.input_size_x, self.input_size_y, 1), name='scrambler_drop_' + str(input_ix)))\n else :\n scrambler_drops.append(None)\n \n scrambler_label = None\n if self.label_input :\n scrambler_label = Input(shape=(self.n_classes,), name='scrambler_label')\n \n scrambled_logits = []\n importance_scores = []\n pwm_logits = []\n pwms = []\n sampled_pwms = []\n if self.multi_input_mode == 'siamese' or self.n_inputs == 1 :\n \n for input_ix in range(self.n_inputs) :\n \n scrambled_logit, importance_score = scrambler(scrambler_inputs[input_ix], scrambler_drops[input_ix], scrambler_label)\n\n scrambled_logits.append(scrambled_logit)\n importance_scores.append(importance_score)\n else :\n \n scrambler_input_concat = Lambda(lambda x: K.concatenate(x, axis=2))\n scrambler_drop_concat = Lambda(lambda x: K.concatenate(x, axis=2))\n \n scrambler_input = scrambler_input_concat(scrambler_inputs)\n scrambler_drop = scrambler_input_concat(scrambler_drops) if self.mask_dropout else None\n\n scrambled_logit, importance_score = scrambler(scrambler_input, scrambler_drop, scrambler_label)\n\n scrambler_logit_split = Lambda(lambda x: [x[:, :, k*input_size_y:(k+1)*input_size_y, :] for k in range(self.n_inputs)])\n scrambler_score_split = Lambda(lambda x: [x[:, :, k*input_size_y:(k+1)*input_size_y, :] for k in range(self.n_inputs)])\n \n scrambled_logits = scrambler_logit_split(scrambled_logit)\n importance_scores = scrambler_score_split(importance_score)\n\n for input_ix in range(self.n_inputs) :\n \n pwm_logit, pwm, sampled_pwm, _, sampled_mask = sampler(scrambler_classes[input_ix], scrambled_logits[input_ix])\n\n if zeropad_input :\n zeropad_layer = Lambda(lambda x: x[0] * x[1], name='zeropad_' + str(input_ix))\n sampled_pwm = zeropad_layer([sampled_pwm, sampled_mask])\n\n pwm_logits.append(pwm_logit)\n pwms.append(pwm)\n sampled_pwms.append(sampled_pwm)\n \n scrambler_model = Model(\n scrambler_classes + scrambler_inputs + (scrambler_drops if scrambler_drops[0] is not None else []) + ([scrambler_label] if scrambler_label is not None else []),\n pwm_logits + pwms + sampled_pwms + importance_scores\n )\n\n #Initialize Templates and Masks\n initialize_templates(scrambler_model, self.input_templates, self.input_backgrounds_log)\n\n #Freeze gaussian smoothing kernel\n if self.mask_smoothing :\n scrambler_model.get_layer(\"scrambler_smooth_conv\").set_weights([\n np.reshape(np.array(mask_smoothing_conv_weight), (1, self.mask_smoothing_window_size, 1, 1) if self.input_size_x == 1 else (self.mask_smoothing_window_size, self.mask_smoothing_window_size, 1, 1))\n ])\n scrambler_model.get_layer(\"scrambler_smooth_conv\").trainable = False\n\n scrambler_model.compile(\n optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999),\n loss='mean_squared_error'\n )\n \n self.scrambler_model = scrambler_model\n \n def load_model(self, model_path) :\n #Load model\n \n self.scrambler_model.load_weights(model_path, by_name=True)\n print('Loaded scrambler model from %s ' % (model_path))\n \n def save_model(self, model_path) :\n #Save model and weights\n \n self.scrambler_model.save(model_path)\n print('Saved scrambler model at %s ' % (model_path))\n\n def get_model(self) :\n return self.scrambler_model\n\n def interpret(self, x, y=None, drop=None, group=None) :\n \n if not isinstance(x, list) :\n x = [x]\n \n signal_is_1d = False\n if len(x[0].shape) == 3 :\n signal_is_1d = True\n \n for i in range(len(x)) :\n x[i] = x[i][:, None, ...]\n \n if group is None :\n group = [np.zeros((x[0].shape[0], 1)) for input_ix in range(self.n_inputs)]\n \n if not isinstance(group, list) :\n group = [group]\n \n if drop is None :\n drop = [\n np.ones((x[0].shape[0], x[0].shape[1], x[0].shape[2], 1)) for input_ix in range(self.n_inputs)\n ] if self.mask_dropout else []\n \n if not isinstance(drop, list) :\n drop = [drop]\n \n label = []\n if y is not None :\n label = [y] if self.label_input else []\n \n input_tensors = group + x + drop + label\n \n #Pad data\n n_pad = self.batch_size - x[0].shape[0] % self.batch_size\n if n_pad == self.batch_size :\n n_pad = 0\n \n if n_pad > 0 :\n input_tensors = [\n np.concatenate([input_tensor, np.zeros(tuple([n_pad] + list(input_tensor.shape)[1:]))], axis=0)\n for input_tensor in input_tensors\n ]\n\n pred_bundle = self.scrambler_model.predict(x=input_tensors, batch_size=self.batch_size, verbose=True)\n \n if n_pad > 0 :\n pred_bundle = [\n pred_bundle_member[:-n_pad, ...] for pred_bundle_member in pred_bundle\n ]\n \n pwms = []\n samples = []\n scores = []\n for input_ix in range(self.n_inputs) :\n pwm = pred_bundle[self.n_inputs + input_ix]\n sample = pred_bundle[2 * self.n_inputs + input_ix]\n score = pred_bundle[3 * self.n_inputs + input_ix]\n \n if signal_is_1d :\n pwms.append(pwm[:, 0, ...])\n samples.append(sample[:, :, 0, ...])\n scores.append(score[:, 0, ...])\n else:\n pwms.append(pwm)\n samples.append(sample)\n scores.append(score)\n \n if len(pwms) <= 1 :\n return pwms[0], samples[0], scores[0]\n \n return pwms, samples, scores\n \n def train(self, predictor, x_train, y_train, x_test, y_test, n_epochs, extra_input_train=None, extra_input_test=None, group_train=None, group_test=None, monitor_test_indices=None, monitor_batch_freq_dict={0 : 1, 1 : 5, 5 : 10}, adam_lr=0.0001, adam_beta_1=0.5, adam_beta_2=0.9, nll_mode='reconstruction', predictor_task='classification', custom_loss_func=None, reference='predictor', entropy_mode='target', entropy_bits=0., entropy_weight=1.) :\n \n if not isinstance(x_train, list) :\n x_train = [x_train]\n x_test = [x_test]\n \n if group_train is None :\n group_train = [np.zeros((x_train[0].shape[0], 1)) for input_ix in range(self.n_inputs)]\n group_test = [np.zeros((x_test[0].shape[0], 1)) for input_ix in range(self.n_inputs)]\n \n if not isinstance(group_train, list) :\n group_train = [group_train]\n group_test = [group_test]\n \n if extra_input_train is None :\n extra_input_train = []\n extra_input_test = []\n \n if not isinstance(extra_input_train, list) :\n extra_input_train = [extra_input_train]\n extra_input_test = [extra_input_test]\n \n n_trim_train = x_train[0].shape[0] % self.batch_size\n n_trim_test = x_test[0].shape[0] % self.batch_size\n \n if n_trim_train > 0 :\n print(\"(Trimming size of training data to \" + str(int(x_train[0].shape[0] - n_trim_train)) + \" examples).\")\n \n for i in range(len(x_train)) :\n x_train[i] = x_train[i][:-n_trim_train]\n \n for i in range(len(group_train)) :\n group_train[i] = group_train[i][:-n_trim_train]\n \n for i in range(len(extra_input_train)) :\n extra_input_train[i] = extra_input_train[i][:-n_trim_train]\n \n y_train = y_train[:-n_trim_train]\n \n if n_trim_test > 0 :\n print(\"(Trimming size of test data to \" + str(int(x_test[0].shape[0] - n_trim_test)) + \" examples).\")\n \n for i in range(len(x_test)) :\n x_test[i] = x_test[i][:-n_trim_test]\n \n for i in range(len(group_test)) :\n group_test[i] = group_test[i][:-n_trim_test]\n \n for i in range(len(extra_input_test)) :\n extra_input_test[i] = extra_input_test[i][:-n_trim_test]\n \n y_test = y_test[:-n_trim_test]\n \n if monitor_test_indices is not None and len(monitor_test_indices) % self.batch_size > 0 :\n monitor_test_indices = monitor_test_indices[:-len(monitor_test_indices) % self.batch_size]\n if len(monitor_test_indices) <= 0 :\n monitor_test_indices = None\n \n signal_is_1d = False\n if len(x_train[0].shape) == 3 :\n signal_is_1d = True\n \n for i in range(len(x_train)) :\n x_train[i] = x_train[i][:, None, ...]\n x_test[i] = x_test[i][:, None, ...]\n \n #Freeze predictor\n predictor.trainable = False\n predictor.compile(optimizer=keras.optimizers.SGD(lr=0.1), loss='mean_squared_error')\n \n #Build loss model pipeline\n\n #Define model inputs\n scrambler_classes = []\n scrambler_inputs = []\n scrambler_drops = []\n scrambler_extra_inputs = []\n for input_ix in range(self.n_inputs) :\n scrambler_classes.append(Input(shape=(1,), name='t_scrambler_group_' + str(input_ix)))\n scrambler_inputs.append(Input(shape=(self.input_size_x, self.input_size_y, self.n_out_channels), name='t_scrambler_input_' + str(input_ix)))\n if self.mask_dropout :\n scrambler_drops.append(Input(shape=(self.input_size_x, self.input_size_y, 1), name='t_scrambler_drop_' + str(input_ix)))\n else :\n scrambler_drops.append(None)\n \n scrambler_label = None\n if self.label_input or reference == 'label' :\n scrambler_label = Input(shape=(self.n_classes,), name='t_scrambler_label')\n \n for extra_in_ix, extra_in in enumerate(extra_input_train) :\n scrambler_extra_inputs.append(Input(shape=tuple(list(extra_in.shape)[1:]), name='t_scrambler_extra_' + str(extra_in_ix)))\n \n scrambled_logits = []\n importance_scores = []\n pwm_logits = []\n pwms = []\n sampled_pwms = []\n if self.multi_input_mode == 'siamese' or self.n_inputs == 1 :\n \n for input_ix in range(self.n_inputs) :\n \n scrambled_logit, importance_score = self.scrambler(scrambler_inputs[input_ix], scrambler_drops[input_ix], scrambler_label)\n\n scrambled_logits.append(scrambled_logit)\n importance_scores.append(importance_score)\n else :\n \n scrambler_input_concat = Lambda(lambda x: K.concatenate(x, axis=2))\n scrambler_drop_concat = Lambda(lambda x: K.concatenate(x, axis=2))\n \n scrambler_input = scrambler_input_concat(scrambler_inputs)\n scrambler_drop = scrambler_input_concat(scrambler_drops) if self.mask_dropout else None\n\n scrambled_logit, importance_score = self.scrambler(scrambler_input, scrambler_drop, scrambler_label)\n\n scrambler_logit_split = Lambda(lambda x: [x[:, :, k*self.input_size_y:(k+1)*self.input_size_y, :] for k in range(self.n_inputs)])\n scrambler_score_split = Lambda(lambda x: [x[:, :, k*self.input_size_y:(k+1)*self.input_size_y, :] for k in range(self.n_inputs)])\n \n scrambled_logits = scrambler_logit_split(scrambled_logit)\n importance_scores = scrambler_score_split(importance_score)\n\n deflated_sampled_pwms = []\n pwm_masks = []\n sampled_masks = []\n for input_ix in range(self.n_inputs) :\n \n pwm_logit, pwm, sampled_pwm, pwm_mask, sampled_mask = self.sampler(scrambler_classes[input_ix], scrambled_logits[input_ix])\n\n if self.zeropad_input :\n zeropad_layer = Lambda(lambda x: x[0] * x[1], name='t_zeropad_' + str(input_ix))\n sampled_pwm = zeropad_layer([sampled_pwm, sampled_mask])\n\n pwm_logits.append(pwm_logit)\n pwms.append(pwm)\n sampled_pwms.append(sampled_pwm)\n pwm_masks.append(pwm_mask)\n sampled_masks.append(sampled_mask)\n\n #Define layer to deflate sample axis\n deflate_scrambled_sample = Lambda(lambda x: K.reshape(x, (self.batch_size * self.n_samples, self.input_size_x, self.input_size_y, self.n_out_channels)), name='t_deflate_scrambled_sample_' + str(input_ix))\n\n #Deflate sample axis\n deflated_sampled_pwm = deflate_scrambled_sample(sampled_pwm)\n deflated_sampled_pwms.append(deflated_sampled_pwm)\n\n #Make reference prediction on non-scrambled input sequence\n switch_to_1d_non_scrambled = Lambda(lambda x, signal_is_1d=signal_is_1d: x[:, 0, ...] if signal_is_1d else x, name='t_switch_to_1d_non_scrambled')\n scrambler_inputs_to_pred = [switch_to_1d_non_scrambled(scrambler_input) for scrambler_input in scrambler_inputs]\n y_pred_non_scrambled_deflated = predictor(scrambler_inputs_to_pred + scrambler_extra_inputs) if reference == 'predictor' else scrambler_label\n\n #Make prediction on scrambled sequence samples\n switch_to_1d_scrambled = Lambda(lambda x, signal_is_1d=signal_is_1d: x[:, 0, ...] if signal_is_1d else x, name='t_switch_to_1d_scrambled')\n deflated_sampled_pwms_to_pred = [switch_to_1d_scrambled(deflated_sampled_pwm) for deflated_sampled_pwm in deflated_sampled_pwms]\n scrambler_extra_inputs_repeated = []\n for extra_in_ix, scrambler_extra_inp in enumerate(scrambler_extra_inputs) :\n repeat_scrambler_extra_input = Lambda(lambda x: K.repeat_elements(x, self.n_samples, axis=0), name='repeat_scrambler_extra_input_' + str(extra_in_ix))\n scrambler_extra_inputs_repeated.append(repeat_scrambler_extra_input(scrambler_extra_inp))\n y_pred_scrambled_deflated = predictor(deflated_sampled_pwms_to_pred + scrambler_extra_inputs_repeated)\n\n #Define layer to inflate sample axis\n inflate_non_scrambled_prediction = Lambda(lambda x: K.tile(K.expand_dims(x, axis=1), (1, self.n_samples, 1)), name='t_inflate_non_scrambled_prediction')\n inflate_scrambled_prediction = Lambda(lambda x: K.reshape(x, (self.batch_size, self.n_samples, self.n_classes)), name='t_inflate_scrambled_prediction')\n\n #Inflate sample axis\n y_pred_non_scrambled = inflate_non_scrambled_prediction(y_pred_non_scrambled_deflated)\n y_pred_scrambled = inflate_scrambled_prediction(y_pred_scrambled_deflated)\n \n #Define background matrix embeddings\n seq_reshape_layer = Reshape((self.input_size_x, self.input_size_y, self.n_out_channels))\n\n x_mean_dense = Embedding(len(self.input_templates), self.input_size_x * self.input_size_y * self.n_out_channels, embeddings_initializer='zeros', name='x_mean_dense')\n \n x_means = [seq_reshape_layer(x_mean_dense(scrambler_class)) for scrambler_class in scrambler_classes]\n\n scrambler_mode_coeff = 1.\n if self.scrambler_mode == 'occlusion' and reference == 'predictor' :\n scrambler_mode_coeff = -1.\n elif self.scrambler_mode == 'occlusion' and reference == 'label' :\n if self.n_classes <= 1 :\n y_pred_non_scrambled = Lambda(lambda x: 1. - x, name=\"t_invert_binary_label\")(y_pred_non_scrambled)\n \n #NLL cost\n nll_loss_func = None\n \n if custom_loss_func is not None :\n nll_loss_func = custom_loss_func\n scrambler_mode_coeff = 1.\n else :\n if nll_mode == 'reconstruction' and predictor_task == 'classification' :\n if self.n_classes > 1 :\n nll_loss_func = get_softmax_kl_divergence()\n else :\n nll_loss_func = get_sigmoid_kl_divergence()\n elif nll_mode == 'reconstruction' and predictor_task == 'classification_sym' :\n nll_loss_func = get_symmetric_sigmoid_kl_divergence()\n elif nll_mode == 'maximization' and predictor_task == 'classification' :\n nll_loss_func = get_sigmoid_max_nll()\n elif nll_mode == 'minimization' and predictor_task == 'classification' :\n nll_loss_func = get_sigmoid_min_nll()\n elif nll_mode == 'reconstruction' and predictor_task == 'regression' :\n nll_loss_func = get_mse()\n elif nll_mode == 'maximization' and predictor_task == 'regression' :\n nll_loss_func = get_linear_max_nll()\n elif nll_mode == 'minimization' and predictor_task == 'regression' :\n nll_loss_func = get_linear_min_nll()\n\n #Entropy cost\n entropy_loss_func = None\n if entropy_mode == 'target' :\n entropy_loss_func = get_target_entropy_sme_masked(x_start=0, x_end=self.input_size_x, y_start=0, y_end=self.input_size_y, target_bits=entropy_bits)\n elif entropy_mode == 'maximization' :\n entropy_loss_func = get_margin_entropy_ame_masked(x_start=0, x_end=self.input_size_x, y_start=0, y_end=self.input_size_y, max_bits=entropy_bits)\n\n #Execute NLL cost\n nll_loss = Lambda(lambda x: scrambler_mode_coeff * nll_loss_func(x[0], x[1]), name='nll')([\n y_pred_non_scrambled,\n y_pred_scrambled\n ])\n\n #Execute entropy cost\n entropy_losses = []\n for input_ix in range(self.n_inputs) :\n entropy_loss = Lambda(lambda x: K.expand_dims(entropy_loss_func(x[0], x[1], x[2]), axis=-1), name='entropy_' + str(input_ix))([\n pwms[input_ix],\n pwm_masks[input_ix],\n x_means[input_ix]\n ])\n entropy_losses.append(entropy_loss)\n \n entropy_loss = None\n if len(entropy_losses) > 1 :\n entropy_loss = Lambda(lambda x: K.mean(x, axis=-1), name='entropy')(Concatenate(axis=-1)(entropy_losses))\n else :\n entropy_loss = Lambda(lambda x: K.mean(x[0], axis=-1), name='entropy')(entropy_losses)\n \n loss_model = Model(\n scrambler_classes + scrambler_inputs + (scrambler_drops if scrambler_drops[0] is not None else []) + ([scrambler_label] if scrambler_label is not None else []) + scrambler_extra_inputs,\n [nll_loss, entropy_loss]\n )\n\n #Initialize Templates and Masks\n initialize_templates(loss_model, self.input_templates, self.input_backgrounds_log)\n\n #Initialize Sequence Length Parameters\n initialize_backgrounds(loss_model, self.input_backgrounds)\n\n loss_model.compile(\n optimizer=keras.optimizers.Adam(lr=adam_lr, beta_1=adam_beta_1, beta_2=adam_beta_2),\n loss={\n 'nll' : get_weighted_loss(loss_coeff=1.0),\n 'entropy' : get_weighted_loss(loss_coeff=entropy_weight)\n }\n )\n \n self.loss_model = loss_model\n\n #Execute training procedure\n callbacks = []\n \n dummy_train = np.zeros((x_train[0].shape[0], 1))\n dummy_test = np.zeros((x_test[0].shape[0], 1))\n \n drop_train = [\n np.ones((x_train[0].shape[0], x_train[0].shape[1], x_train[0].shape[2], 1)) for input_ix in range(self.n_inputs)\n ] if self.mask_dropout else []\n drop_test = [\n np.ones((x_test[0].shape[0], x_test[0].shape[1], x_test[0].shape[2], 1)) for input_ix in range(self.n_inputs)\n ] if self.mask_dropout else []\n \n label_train = [y_train] if (self.label_input or reference == 'label') else []\n label_test = [y_test] if (self.label_input or reference == 'label') else []\n \n monitor = None\n if monitor_test_indices is not None :\n \n group_track = [g[monitor_test_indices] for g in group_test]\n x_track = [x[monitor_test_indices] for x in x_test]\n drop_track = [d[monitor_test_indices] for d in drop_test]\n label_track = [l[monitor_test_indices] for l in label_test]\n extra_input_track = [e[monitor_test_indices] for e in extra_input_test]\n \n monitor_loss_tensors = group_track + x_track + drop_track + label_track + extra_input_track\n monitor_tensors = group_track + x_track + drop_track + (label_track if self.label_input else [])\n \n monitor = ScramblerMonitor(self.scrambler_model, self.loss_model, monitor_loss_tensors, monitor_tensors, n_inputs=self.n_inputs, track_mode='batch', batch_freq_dict=monitor_batch_freq_dict, batch_size=self.batch_size)\n callbacks.append(monitor)\n\n train_history = loss_model.fit(\n group_train + x_train + drop_train + label_train + extra_input_train,\n [dummy_train, dummy_train],\n shuffle=True,\n epochs=n_epochs,\n batch_size=self.batch_size,\n validation_data=(\n group_test + x_test + drop_test + label_test + extra_input_test,\n [dummy_test, dummy_test]\n ),\n callbacks=callbacks\n )\n \n train_history = train_history.history\n \n if monitor is not None :\n train_history['monitor_batches'] = monitor.batch_history\n train_history['monitor_epochs'] = monitor.epoch_history\n train_history['monitor_importance_scores'] = monitor.scores_history\n train_history['monitor_pwms'] = monitor.pwm_history\n train_history['monitor_nll_losses'] = monitor.nll_loss_history\n train_history['monitor_entropy_losses'] = monitor.entropy_loss_history\n \n return train_history\n \n def finetune(self, predictor, x, y, batch_size, n_iters, drop=None, extra_input=None, group=None, norm_mode='instance', max_score_clip=4., adam_lr=0.01, adam_beta_1=0.5, adam_beta_2=0.9, nll_mode='reconstruction', predictor_task='classification', custom_loss_func=None, reference='predictor', entropy_mode='target', entropy_bits=0., entropy_weight=1.) :\n \n #Collect pre-trained importance scores\n print(\"Generating pre-trained scores...\")\n _, _, pretrained_scores = self.interpret(x, y=y, drop=drop, group=group)\n \n #Load finetuner\n finetuner = load_finetuning_model(\n batch_size,\n self.input_size_x,\n self.input_size_y,\n scrambler_mode=self.scrambler_mode,\n n_out_channels=self.n_out_channels,\n n_spatial_dims=1 if self.input_size_x == 1 else 2,\n mask_smoothing=self.mask_smoothing,\n smooth_window_size=self.mask_smoothing_window_size,\n norm_mode=norm_mode,\n max_score_clip=max_score_clip\n )\n \n return self._optimize(predictor, finetuner, [pretrained_scores], x, y, batch_size, n_iters, 'finetune', drop=drop, extra_input=extra_input, group=group, adam_lr=adam_lr, adam_beta_1=adam_beta_1, adam_beta_2=adam_beta_2, nll_mode=nll_mode, predictor_task=predictor_task, custom_loss_func=custom_loss_func, reference=reference, entropy_mode=entropy_mode, entropy_bits=entropy_bits, entropy_weight=entropy_weight)\n \n def optimize(self, predictor, x, y, batch_size, n_iters, drop=None, extra_input=None, group=None, norm_mode='instance', adam_lr=0.01, adam_beta_1=0.5, adam_beta_2=0.9, nll_mode='reconstruction', predictor_task='classification', custom_loss_func=None, reference='predictor', entropy_mode='target', entropy_bits=0., entropy_weight=1.) :\n \n #Load optimizer\n optimizer = load_optimization_model(\n batch_size,\n self.input_size_x,\n self.input_size_y,\n scrambler_mode=self.scrambler_mode,\n n_out_channels=self.n_out_channels,\n n_spatial_dims=1 if self.input_size_x == 1 else 2,\n mask_smoothing=self.mask_smoothing,\n smooth_window_size=self.mask_smoothing_window_size,\n norm_mode=norm_mode\n )\n \n return self._optimize(predictor, optimizer, [], x, y, batch_size, n_iters, 'optimize', drop=drop, extra_input=extra_input, group=group, adam_lr=adam_lr, adam_beta_1=adam_beta_1, adam_beta_2=adam_beta_2, nll_mode=nll_mode, predictor_task=predictor_task, custom_loss_func=custom_loss_func, reference=reference, entropy_mode=entropy_mode, entropy_bits=entropy_bits, entropy_weight=entropy_weight)\n \n def _optimize(self, predictor, finetuner, pretrained_scores, x, y, batch_size, n_iters, opt_mode, drop=None, extra_input=None, group=None, adam_lr=0.01, adam_beta_1=0.5, adam_beta_2=0.9, nll_mode='reconstruction', predictor_task='classification', custom_loss_func=None, reference='predictor', entropy_mode='target', entropy_bits=0., entropy_weight=1.) :\n \n if not isinstance(x, list) :\n x = [x]\n \n signal_is_1d = False\n if len(x[0].shape) == 3 :\n signal_is_1d = True\n \n for i in range(len(x)) :\n x[i] = x[i][:, None, ...]\n \n if group is None :\n group = [np.zeros((x[0].shape[0], 1)) for input_ix in range(self.n_inputs)]\n \n if not isinstance(group, list) :\n group = [group]\n \n if extra_input is None :\n extra_input = []\n \n if not isinstance(extra_input, list) :\n extra_input = [extra_input]\n \n if drop is None :\n drop = [\n np.ones((x[0].shape[0], x[0].shape[1], x[0].shape[2], 1)) for input_ix in range(self.n_inputs)\n ]\n \n if not isinstance(drop, list) :\n drop = [drop]\n \n #Freeze predictor\n predictor.trainable = False\n predictor.compile(optimizer=keras.optimizers.SGD(lr=0.1), loss='mean_squared_error')\n \n #Build finetuning model\n mask_smoothing_conv_weight = gaussian(self.mask_smoothing_window_size, self.mask_smoothing_std)\n \n #self.finetuner = finetuner\n\n #Load sampler\n finetuning_sampler = build_sampler(batch_size, self.input_size_x, self.input_size_y, n_classes=len(self.input_templates), n_samples=self.n_samples, sample_mode=self.sample_mode, n_channels=self.n_out_channels, model_prefix='ft_')\n \n #self.finetuning_sampler = finetuning_sampler\n \n #Build scrambler model\n scrambler_classes = []\n scrambler_inputs = []\n scrambler_drops = []\n scrambler_pretrained_scores = []\n for input_ix in range(self.n_inputs) :\n scrambler_classes.append(Input(batch_shape=(batch_size, 1), name='f_scrambler_group_' + str(input_ix)))\n scrambler_inputs.append(Input(batch_shape=(batch_size, self.input_size_x, self.input_size_y, self.n_out_channels), name='f_scrambler_input_' + str(input_ix)))\n scrambler_drops.append(Input(batch_shape=(batch_size, self.input_size_x, self.input_size_y, 1), name='f_scrambler_drop_' + str(input_ix)))\n if opt_mode == 'finetune' :\n scrambler_pretrained_scores.append(Input(batch_shape=(batch_size, self.input_size_x, self.input_size_y, 1), name='f_scrambler_pretrained_scores_' + str(input_ix)))\n \n scrambled_logits = []\n importance_scores = []\n pwm_logits = []\n pwms = []\n sampled_pwms = []\n if self.multi_input_mode == 'siamese' or self.n_inputs == 1 :\n \n for input_ix in range(self.n_inputs) :\n \n scrambled_logit, importance_score = None, None\n if opt_mode == 'finetune' :\n scrambled_logit, importance_score = finetuner(scrambler_inputs[input_ix], scrambler_drops[input_ix], scrambler_pretrained_scores[input_ix])\n else :\n scrambled_logit, importance_score = finetuner(scrambler_inputs[input_ix], scrambler_drops[input_ix])\n \n scrambled_logits.append(scrambled_logit)\n importance_scores.append(importance_score)\n else :\n \n scrambler_input_concat = Lambda(lambda x: K.concatenate(x, axis=2))\n scrambler_drop_concat = Lambda(lambda x: K.concatenate(x, axis=2))\n scrambler_pretrained_score_concat = Lambda(lambda x: K.concatenate(x, axis=2))\n \n scrambler_input = scrambler_input_concat(scrambler_inputs)\n scrambler_drop = scrambler_input_concat(scrambler_drops) if self.mask_dropout else None\n scrambler_pretrained_score = scrambler_pretrained_score_concat(scrambler_pretrained_scores) if opt_mode == 'finetune' else None\n \n scrambled_logit, importance_score = None, None\n if opt_mode == 'finetune' :\n scrambled_logit, importance_score = finetuner(scrambler_input, scrambler_drop, scrambler_pretrained_score)\n else :\n scrambled_logit, importance_score = finetuner(scrambler_input, scrambler_drop)\n \n scrambler_logit_split = Lambda(lambda x: [x[:, :, k*input_size_y:(k+1)*input_size_y, :] for k in range(self.n_inputs)])\n scrambler_score_split = Lambda(lambda x: [x[:, :, k*input_size_y:(k+1)*input_size_y, :] for k in range(self.n_inputs)])\n \n scrambled_logits = scrambler_logit_split(scrambled_logit)\n importance_scores = scrambler_score_split(importance_score)\n\n for input_ix in range(self.n_inputs) :\n \n pwm_logit, pwm, sampled_pwm, _, sampled_mask = finetuning_sampler(scrambler_classes[input_ix], scrambled_logits[input_ix])\n\n if self.zeropad_input :\n zeropad_layer = Lambda(lambda x: x[0] * x[1], name='f_zeropad_' + str(input_ix))\n sampled_pwm = zeropad_layer([sampled_pwm, sampled_mask])\n\n pwm_logits.append(pwm_logit)\n pwms.append(pwm)\n sampled_pwms.append(sampled_pwm)\n \n finetuning_model = Model(\n scrambler_classes + scrambler_inputs + scrambler_drops + scrambler_pretrained_scores,\n pwm_logits + pwms + sampled_pwms + importance_scores\n )\n\n #Initialize Templates and Masks\n initialize_templates(finetuning_model, self.input_templates, self.input_backgrounds_log, model_prefix='ft_')\n\n #Freeze gaussian smoothing kernel\n if self.mask_smoothing :\n finetuning_model.get_layer(\"ft_scrambler_smooth_conv\").set_weights([\n np.reshape(np.array(mask_smoothing_conv_weight), (1, self.mask_smoothing_window_size, 1, 1) if input_size_x == 1 else (self.mask_smoothing_window_size, self.mask_smoothing_window_size, 1, 1))\n ])\n finetuning_model.get_layer(\"ft_scrambler_smooth_conv\").trainable = False\n\n finetuning_model.compile(\n optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999),\n loss='mean_squared_error'\n )\n \n #self.finetuning_model = finetuning_model\n \n #Build loss model pipeline\n \n #Define model inputs\n scrambler_classes = []\n scrambler_inputs = []\n scrambler_drops = []\n scrambler_pretrained_scores = []\n scrambler_extra_inputs = []\n for input_ix in range(self.n_inputs) :\n scrambler_classes.append(Input(batch_shape=(batch_size, 1), name='ft_scrambler_group_' + str(input_ix)))\n scrambler_inputs.append(Input(batch_shape=(batch_size, self.input_size_x, self.input_size_y, self.n_out_channels), name='ft_scrambler_input_' + str(input_ix)))\n scrambler_drops.append(Input(batch_shape=(batch_size, self.input_size_x, self.input_size_y, 1), name='ft_scrambler_drop_' + str(input_ix)))\n if opt_mode == 'finetune' :\n scrambler_pretrained_scores.append(Input(batch_shape=(batch_size, self.input_size_x, self.input_size_y, 1), name='ft_scrambler_pretrained_scores_' + str(input_ix)))\n \n scrambler_label = None\n if reference == 'label' :\n scrambler_label = Input(batch_shape=(batch_size, self.n_classes), name='ft_scrambler_label')\n \n for extra_in_ix, extra_in in enumerate(extra_input) :\n scrambler_extra_inputs.append(Input(batch_shape=tuple([batch_size] + list(extra_in.shape)[1:]), name='ft_scrambler_extra_' + str(extra_in_ix)))\n \n scrambled_logits = []\n importance_scores = []\n pwm_logits = []\n pwms = []\n sampled_pwms = []\n if self.multi_input_mode == 'siamese' or self.n_inputs == 1 :\n \n for input_ix in range(self.n_inputs) :\n \n scrambled_logit, importance_score = None, None\n if opt_mode == 'finetune' :\n scrambled_logit, importance_score = finetuner(scrambler_inputs[input_ix], scrambler_drops[input_ix], scrambler_pretrained_scores[input_ix])\n else :\n scrambled_logit, importance_score = finetuner(scrambler_inputs[input_ix], scrambler_drops[input_ix])\n \n scrambled_logits.append(scrambled_logit)\n importance_scores.append(importance_score)\n else :\n \n scrambler_input_concat = Lambda(lambda x: K.concatenate(x, axis=2))\n scrambler_drop_concat = Lambda(lambda x: K.concatenate(x, axis=2))\n scrambler_pretrained_score_concat = Lambda(lambda x: K.concatenate(x, axis=2))\n \n scrambler_input = scrambler_input_concat(scrambler_inputs)\n scrambler_drop = scrambler_input_concat(scrambler_drops) if self.mask_dropout else None\n scrambler_pretrained_score = scrambler_pretrained_score_concat(scrambler_pretrained_scores) if opt_mode == 'finetune' else None\n \n scrambled_logit, importance_score = None, None\n if opt_mode == 'finetune' :\n scrambled_logit, importance_score = finetuner(scrambler_input, scrambler_drop, scrambler_pretrained_score)\n else :\n scrambled_logit, importance_score = finetuner(scrambler_input, scrambler_drop)\n \n scrambler_logit_split = Lambda(lambda x: [x[:, :, k*input_size_y:(k+1)*input_size_y, :] for k in range(self.n_inputs)])\n scrambler_score_split = Lambda(lambda x: [x[:, :, k*input_size_y:(k+1)*input_size_y, :] for k in range(self.n_inputs)])\n \n scrambled_logits = scrambler_logit_split(scrambled_logit)\n importance_scores = scrambler_score_split(importance_score)\n\n deflated_sampled_pwms = []\n pwm_masks = []\n sampled_masks = []\n for input_ix in range(self.n_inputs) :\n \n pwm_logit, pwm, sampled_pwm, pwm_mask, sampled_mask = finetuning_sampler(scrambler_classes[input_ix], scrambled_logits[input_ix])\n\n if self.zeropad_input :\n zeropad_layer = Lambda(lambda x: x[0] * x[1], name='ft_zeropad_' + str(input_ix))\n sampled_pwm = zeropad_layer([sampled_pwm, sampled_mask])\n\n pwm_logits.append(pwm_logit)\n pwms.append(pwm)\n sampled_pwms.append(sampled_pwm)\n pwm_masks.append(pwm_mask)\n sampled_masks.append(sampled_mask)\n\n #Define layer to deflate sample axis\n deflate_scrambled_sample = Lambda(lambda x: K.reshape(x, (self.batch_size * self.n_samples, self.input_size_x, self.input_size_y, self.n_out_channels)), name='t_deflate_scrambled_sample_' + str(input_ix))\n\n #Deflate sample axis\n deflated_sampled_pwm = deflate_scrambled_sample(sampled_pwm)\n deflated_sampled_pwms.append(deflated_sampled_pwm)\n\n #Make reference prediction on non-scrambled input sequence\n switch_to_1d_non_scrambled = Lambda(lambda x, signal_is_1d=signal_is_1d: x[:, 0, ...] if signal_is_1d else x, name='t_switch_to_1d_non_scrambled')\n scrambler_inputs_to_pred = [switch_to_1d_non_scrambled(scrambler_input) for scrambler_input in scrambler_inputs]\n y_pred_non_scrambled_deflated = predictor(scrambler_inputs_to_pred + scrambler_extra_inputs) if reference == 'predictor' else scrambler_label\n\n #Make prediction on scrambled sequence samples\n switch_to_1d_scrambled = Lambda(lambda x, signal_is_1d=signal_is_1d: x[:, 0, ...] if signal_is_1d else x, name='t_switch_to_1d_scrambled')\n deflated_sampled_pwms_to_pred = [switch_to_1d_scrambled(deflated_sampled_pwm) for deflated_sampled_pwm in deflated_sampled_pwms]\n scrambler_extra_inputs_repeated = []\n for extra_in_ix, scrambler_extra_inp in enumerate(scrambler_extra_inputs) :\n repeat_scrambler_extra_input = Lambda(lambda x: K.repeat_elements(x, self.n_samples, axis=0), name='repeat_scrambler_extra_input_' + str(extra_in_ix))\n scrambler_extra_inputs_repeated.append(repeat_scrambler_extra_input(scrambler_extra_inp))\n y_pred_scrambled_deflated = predictor(deflated_sampled_pwms_to_pred + scrambler_extra_inputs_repeated)\n\n #Define layer to inflate sample axis\n inflate_non_scrambled_prediction = Lambda(lambda x: K.tile(K.expand_dims(x, axis=1), (1, self.n_samples, 1)), name='ft_inflate_non_scrambled_prediction')\n inflate_scrambled_prediction = Lambda(lambda x: K.reshape(x, (self.batch_size, self.n_samples, self.n_classes)), name='ft_inflate_scrambled_prediction')\n\n #Inflate sample axis\n y_pred_non_scrambled = inflate_non_scrambled_prediction(y_pred_non_scrambled_deflated)\n y_pred_scrambled = inflate_scrambled_prediction(y_pred_scrambled_deflated)\n \n #Define background matrix embeddings\n seq_reshape_layer = Reshape((self.input_size_x, self.input_size_y, self.n_out_channels))\n\n x_mean_dense = Embedding(len(self.input_templates), self.input_size_x * self.input_size_y * self.n_out_channels, embeddings_initializer='zeros', name='ft_x_mean_dense')\n \n x_means = [seq_reshape_layer(x_mean_dense(scrambler_class)) for scrambler_class in scrambler_classes]\n\n scrambler_mode_coeff = 1.\n if self.scrambler_mode == 'occlusion' and reference == 'predictor' :\n scrambler_mode_coeff = -1.\n elif self.scrambler_mode == 'occlusion' and reference == 'label' :\n if self.n_classes <= 1 :\n y_pred_non_scrambled = Lambda(lambda x: 1. - x, name=\"ft_invert_binary_label\")(y_pred_non_scrambled)\n \n #NLL cost\n nll_loss_func = None\n \n if custom_loss_func is not None :\n nll_loss_func = custom_loss_func\n scrambler_mode_coeff = 1.\n else :\n if nll_mode == 'reconstruction' and predictor_task == 'classification' :\n if self.n_classes > 1 :\n nll_loss_func = get_softmax_kl_divergence()\n else :\n nll_loss_func = get_sigmoid_kl_divergence()\n elif nll_mode == 'reconstruction' and predictor_task == 'classification_sym' :\n nll_loss_func = get_symmetric_sigmoid_kl_divergence()\n elif nll_mode == 'maximization' and predictor_task == 'classification' :\n nll_loss_func = get_sigmoid_max_nll()\n elif nll_mode == 'minimization' and predictor_task == 'classification' :\n nll_loss_func = get_sigmoid_min_nll()\n elif nll_mode == 'reconstruction' and predictor_task == 'regression' :\n nll_loss_func = get_mse()\n elif nll_mode == 'maximization' and predictor_task == 'regression' :\n nll_loss_func = get_linear_max_nll()\n elif nll_mode == 'minimization' and predictor_task == 'regression' :\n nll_loss_func = get_linear_min_nll()\n\n #Entropy cost\n entropy_loss_func = None\n if entropy_mode == 'target' :\n entropy_loss_func = get_target_entropy_sme_masked(x_start=0, x_end=self.input_size_x, y_start=0, y_end=self.input_size_y, target_bits=entropy_bits)\n elif entropy_mode == 'maximization' :\n entropy_loss_func = get_margin_entropy_ame_masked(x_start=0, x_end=self.input_size_x, y_start=0, y_end=self.input_size_y, max_bits=entropy_bits)\n\n #Execute NLL cost\n \n nll_loss = Lambda(lambda x: K.reshape(K.sum(scrambler_mode_coeff * nll_loss_func(x[0], x[1]), axis=0), (1,)), name='ft_nll')([\n y_pred_non_scrambled,\n y_pred_scrambled\n ])\n\n #Execute entropy cost\n entropy_losses = []\n for input_ix in range(self.n_inputs) :\n entropy_loss = Lambda(lambda x: K.expand_dims(entropy_loss_func(x[0], x[1], x[2]), axis=-1), name='ft_entropy_' + str(input_ix))([\n pwms[input_ix],\n pwm_masks[input_ix],\n x_means[input_ix]\n ])\n entropy_losses.append(entropy_loss)\n \n entropy_loss = None\n if len(entropy_losses) > 1 :\n entropy_loss = Lambda(lambda x: K.reshape(K.sum(K.mean(x, axis=-1), axis=0), (1,)), name='ft_entropy')(Concatenate(axis=-1)(entropy_losses))\n else :\n entropy_loss = Lambda(lambda x: K.reshape(K.sum(K.mean(x, axis=-1), axis=0), (1,)), name='ft_entropy')(entropy_losses[0])\n \n finetuning_loss_model = Model(\n scrambler_classes + scrambler_inputs + scrambler_drops + scrambler_pretrained_scores + ([scrambler_label] if scrambler_label is not None else []) + scrambler_extra_inputs,\n [nll_loss, entropy_loss]\n )\n\n #Initialize Templates and Masks\n initialize_templates(finetuning_loss_model, self.input_templates, self.input_backgrounds_log, model_prefix='ft_')\n\n #Initialize Sequence Length Parameters\n initialize_backgrounds(finetuning_loss_model, self.input_backgrounds, model_prefix='ft_')\n \n opt = keras.optimizers.Adam(lr=adam_lr, beta_1=adam_beta_1, beta_2=adam_beta_2)\n \n finetuning_loss_model.compile(\n optimizer=opt,\n loss={\n 'ft_nll' : get_weighted_loss(loss_coeff=1.0),\n 'ft_entropy' : get_weighted_loss(loss_coeff=entropy_weight)\n }\n )\n \n #self.finetuning_loss_model = finetuning_loss_model\n\n #(Re-)Initialize scrambler mask\n def _reset_generator(scrambler_model, verbose=False) :\n session = K.get_session()\n for layer in scrambler_model.layers :\n if 'scrambler' in layer.name :\n for v in layer.__dict__:\n v_arg = getattr(layer, v)\n if hasattr(v_arg,'initializer'):\n initializer_method = getattr(v_arg, 'initializer')\n initializer_method.run(session=session)\n if verbose :\n print('reinitializing layer {}.{}'.format(layer.name, v))\n\n #(Re-)Initialize Optimizer\n def _reset_optimizer(opt, verbose=False) :\n session = K.get_session()\n for v in opt.__dict__:\n v_arg = getattr(opt, v)\n if hasattr(v_arg,'initializer'):\n initializer_method = getattr(v_arg, 'initializer')\n initializer_method.run(session=session)\n if verbose :\n print('reinitializing optimizer parameter {}'.format(v))\n\n #Reset mask\n _reset_generator(finetuning_model, verbose=False)\n _reset_generator(finetuning_loss_model, verbose=False)\n _reset_optimizer(opt, verbose=False)\n \n #Execute training procedure\n dummy = np.zeros((batch_size, 1))\n \n label = [y] if reference == 'label' else []\n \n input_tensors = group + x + drop + pretrained_scores + label + extra_input\n \n #Pad data\n n_pad = batch_size - x[0].shape[0] % batch_size\n if n_pad == batch_size :\n n_pad = 0\n \n if n_pad > 0 :\n input_tensors = [\n np.concatenate([input_tensor, np.zeros(tuple([n_pad] + list(input_tensor.shape)[1:]))], axis=0)\n for input_tensor in input_tensors\n ]\n \n n_batches = input_tensors[0].shape[0] // batch_size\n \n train_histories = []\n pwms = [[] for i in range(self.n_inputs)]\n samples = [[] for i in range(self.n_inputs)]\n scores = [[] for i in range(self.n_inputs)]\n for batch_ix in range(n_batches) :\n\n if batch_ix % 20 == 0 :\n print(\"Finetuning batch \" + str(batch_ix) + \"...\")\n \n input_tensors_batch = [\n input_tensor[batch_ix*batch_size:(batch_ix+1)*batch_size] for input_tensor in input_tensors\n ]\n\n train_history = LossHistory(loss_names=['ft_nll', 'ft_entropy'])\n\n # train the autoscrambler\n _ = finetuning_loss_model.fit(\n input_tensors_batch,\n [dummy, dummy],\n epochs=1,\n steps_per_epoch=n_iters,\n #batch_size=batch_size,\n callbacks=[train_history]\n )\n\n pred_bundle = finetuning_model.predict(x=input_tensors_batch, batch_size=batch_size, verbose=False)\n \n if n_pad > 0 :\n pred_bundle = [\n pred_bundle_member[:-n_pad, ...] for pred_bundle_member in pred_bundle\n ]\n \n temp_pwms = []\n temp_samples = []\n temp_scores = []\n for input_ix in range(self.n_inputs) :\n temp_pwm = pred_bundle[self.n_inputs + input_ix]\n temp_sample = pred_bundle[2 * self.n_inputs + input_ix]\n temp_score = pred_bundle[3 * self.n_inputs + input_ix]\n\n temp_pwms.append(temp_pwm)\n temp_samples.append(temp_sample)\n temp_scores.append(temp_score)\n \n for i in range(self.n_inputs) :\n pwms[i].append(temp_pwms[i])\n samples[i].append(temp_samples[i])\n scores[i].append(temp_scores[i])\n \n train_histories.append(train_history.loss_dict)\n\n #Reset mask\n _reset_generator(finetuning_model)\n _reset_generator(finetuning_loss_model)\n _reset_optimizer(opt)\n \n for i in range(self.n_inputs) :\n if signal_is_1d :\n pwms[i] = np.concatenate(pwms[i], axis=0)[:, 0, ...]\n samples[i] = np.concatenate(samples[i], axis=0)[:, :, 0, ...]\n scores[i] = np.concatenate(scores[i], axis=0)[:, 0, ...]\n else :\n pwms[i] = np.concatenate(pwms[i], axis=0)\n samples[i] = np.concatenate(samples[i], axis=0)\n scores[i] = np.concatenate(scores[i], axis=0)\n \n if len(pwms) <= 1 :\n return pwms[0], samples[0], scores[0], train_histories\n \n return pwms, samples, scores, train_histories\n" ]
[ [ "numpy.concatenate", "tensorflow.get_default_graph", "tensorflow.python.framework.ops.RegisterGradient", "numpy.arange", "tensorflow.nn.moments", "tensorflow.ConfigProto", "numpy.argmax", "tensorflow.Session", "tensorflow.ceil", "tensorflow.argmax", "numpy.zeros", "tensorflow.contrib.distributions.RelaxedOneHotCategorical", "numpy.log", "tensorflow.nn.batch_normalization", "numpy.array", "numpy.sum", "tensorflow.nn.softmax", "numpy.ones", "tensorflow.python.framework.ops.name_scope", "tensorflow.multinomial", "scipy.signal.gaussian" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "1.0", "0.19", "0.18", "0.12", "0.10", "0.17", "0.16" ], "tensorflow": [ "1.10" ] } ]
atlan-antillia/EfficientDet-Japanese-RoadSigns
[ "401c37c19972e5571e1f189943f01b16bc202274" ]
[ "TFRecordInspector.py" ]
[ "#******************************************************************************\n#\n# Copyright (c) 2021 Antillia.com TOSHIYUKI ARAI. ALL RIGHTS RESERVED.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#******************************************************************************\n\n\"\"\"\n\nThis is based on view_tfrecords_tf2.py for tensorflow 2 in https://github.com/EricThomson/tfrecord-view,\nwhich is modified by antillia.com \n\nThe tf2 version is based in https://github.com/jschw/tfrecord-view/blob/master/tfrecord_view_gui.py\n\nview_records.py:\nConsume and display data from a tfrecord file: pulls image and bounding boxes for display\nso you can make sure things look reasonabloe, e.g., after augmentation.\n\nPart of tensorflow-view repo: https://github.com/EricThomson/tfrecord-view\n\n\"\"\"\n\n# TFRecordInspector.py\n#\n# 2021/11/05 sarah-antillia\n# 2021/11/08 Added the following method\n# def save_objects_count(self, objects_count):\n\n# This is for tensorflow 2\n\nimport os\nimport sys\nfrom pprint import pprint\nimport cv2\n\nfrom PIL import Image, ImageDraw, ImageFont\nimport numpy as np\nimport tensorflow as tf\nimport warnings\nimport traceback\nfrom LabelMapReader import LabelMapReader\n\n\nclass TFRecordInspector:\n\n def __init__(self, tfrecord_filepath, label_map_filepath, output_dir, with_annotation=True):\n self.tfrecord_filepath = tfrecord_filepath\n self.label_map_filepath = label_map_filepath\n self.output_dir = output_dir\n self.with_annotation = with_annotation\n \n self.class_labels = {}\n reader = LabelMapReader()\n self.class_labels, self.classes, = reader.read(self.label_map_filepath)\n print(\"---- class_labels {}\".format(self.class_labels))\n\n\n def parse_record(self, data_record):\n \n example = None\n try:\n feature = {\n 'image/encoded': tf.io.FixedLenFeature([], tf.string),\n 'image/object/class/label': tf.io.VarLenFeature(tf.int64),\n 'image/object/bbox/xmin': tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32),\n 'image/filename': tf.io.FixedLenFeature([], tf.string)\n }\n example = tf.io.parse_single_example(data_record, feature)\n \n except:\n # 2021/11/18\n # if 'image/filename' were missing.\n feature = {\n 'image/encoded': tf.io.FixedLenFeature([], tf.string),\n 'image/object/class/label': tf.io.VarLenFeature(tf.int64),\n 'image/object/bbox/xmin': tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32),\n 'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32),\n #'image/filename': tf.io.FixedLenFeature([], tf.string)\n }\n example = tf.io.parse_single_example(data_record, feature)\n\n #traceback.print_exc()\n \n return example\n\n # 2021/11/08 \n def save_objects_count(self, objects_count):\n objects_count_dir = os.path.join(self.output_dir, \"objects_count\")\n if os.path.exists(objects_count_dir) == False:\n os.makedirs(objects_count_dir)\n objects_count_path = os.path.join(objects_count_dir, \"objects_count.csv\")\n \n NL = \"\\n\"\n SEP = \",\"\n \n with open(objects_count_path, mode='w') as s:\n \n objects_count = sorted(objects_count.items(), key=lambda x:x[0])\n # This returns a list\n print(objects_count)\n \n keys = \"\"\n values = \"\" \n for (key,value) in objects_count:\n keys = keys + str(key) + SEP\n values = values + str(value) + SEP\n\n s.write(keys + NL)\n s.write(values + NL)\n print(\"=== Saved objects_count file {}\".format(objects_count_path))\n \n \n def extract_images(self):\n dataset = tf.data.TFRecordDataset([self.tfrecord_filepath])\n \n record_iterator = iter(dataset)\n num_records = dataset.reduce(np.int64(0), lambda x, _: x + 1).numpy()\n print(\"----- Num records {}\".format(num_records))\n \n objects_count = {}\n\n for im_ind in range(num_records):\n #Parse and process example\n parsed_example = self.parse_record(record_iterator.get_next())\n encoded_image = parsed_example['image/encoded']\n image_np = tf.image.decode_image(encoded_image, channels=3).numpy()\n filename = \"\"\n try:\n filename = parsed_example['image/filename']\n filename = \"{}\".format(filename)\n filename = filename.strip('b').strip(\"'\")\n #print(\"=== filename {}\".format(filename))\n except:\n #If 'image/filename' were missing. \n filename = str(im_ind) + \".jpg\"\n traceback.print_exc()\n\n labels = tf.sparse.to_dense( parsed_example['image/object/class/label'], default_value=0).numpy()\n x1norm = tf.sparse.to_dense( parsed_example['image/object/bbox/xmin'], default_value=0).numpy()\n x2norm = tf.sparse.to_dense( parsed_example['image/object/bbox/xmax'], default_value=0).numpy()\n y1norm = tf.sparse.to_dense( parsed_example['image/object/bbox/ymin'], default_value=0).numpy()\n y2norm = tf.sparse.to_dense( parsed_example['image/object/bbox/ymax'], default_value=0).numpy()\n num_bboxes = len(labels)\n\n height, width = image_np[:, :, 1].shape\n image_copy = image_np.copy()\n image_rgb = cv2.cvtColor(image_copy, cv2.COLOR_BGR2RGB)\n cv_rgb = image_rgb[:, :, ::-1]\n pil_image = Image.fromarray(cv_rgb)\n #print(\" --- class_labels {}\".format(self.class_labels))\n draw = ImageDraw.Draw(pil_image)\n pil_font = ImageFont.truetype(\"arial.ttf\", 18)\n\n if num_bboxes > 0:\n x1 = np.int64(x1norm * width)\n \n x2 = np.int64(x2norm * width)\n y1 = np.int64(y1norm * height)\n y2 = np.int64(y2norm * height)\n for bbox_ind in range(num_bboxes):\n bbox = (x1[bbox_ind], y1[bbox_ind], x2[bbox_ind], y2[bbox_ind])\n #print(\"--- labels {}\".format(labels))\n category = labels[bbox_ind] # -1\n #print(\"--- bbox_ind {}\".format(bbox_ind))\n \n #print(\"--- category {}\".format(category))\n label_name = self.class_labels[category]\n print(\"--- category_id {} label_name {}\".format(category, label_name))\n \n # Store the number of objects into objects_count dict.\n if label_name not in objects_count:\n objects_count[label_name] = 1\n else:\n count = int(objects_count[label_name]) +1 \n objects_count.update({label_name: count})\n \n if self.with_annotation:\n label_position = (bbox[0] + 5, bbox[1] + 5)\n \n draw.rectangle([(bbox[0], bbox[1]), (bbox[2], bbox[3])], \n outline=(255, 0, 0),width=2)\n \n draw.text(xy = label_position, text = label_name, \n font = pil_font, fill = (255,0,0,0) )\n \n output_image_file = os.path.join(output_images_dir, filename)\n print(\"=== Saved image file {}\".format(output_image_file))\n pil_image.save(output_image_file)\n \n self.save_objects_count(objects_count)\n \n \n#\n# python TFRecordInspector.py ./tfrecord/sample.tfrecord ./tfrecord/label_map.pbtxt ./output [True/False]\n# python TFRecordInspector.py ./tfrecord/valid/valid.tfrecord ./label_map.pbtxt ./output/valid [True/False]\n\n#\nif __name__ == '__main__':\n #tf.disable_eager_execution()\n tfrecord_file = \"\"\n label_map_pbtxt = \"\"\n output_images_dir = \"\"\n with_annotation = True\n \n try:\n if len(sys.argv) <4:\n raise Exception(\"Usage: python TFRecordInsepector.py ./tfrecord/sample.tfrecord ./tfrecord/label_map.pbtxt ./output\")\n if len(sys.argv) >=4:\n tfrecord_file = sys.argv[1]\n label_map_pbtxt = sys.argv[2]\n output_images_dir = sys.argv[3]\n if len(sys.argv) == 5:\n with_annotation = eval(sys.argv[4])\n\n if not os.path.exists(tfrecord_file):\n raise Exception(\" Not found \" + tfrecord_file)\n\n if not os.path.exists(label_map_pbtxt):\n raise Exception(\" Not found \" + label_map_pbtxt)\n\n if not os.path.exists(output_images_dir):\n os.makedirs(output_images_dir)\n print(\"=== tfrecord_file {}\".format(tfrecord_file))\n print(\"=== label_map_pbtxt {}\".format(label_map_pbtxt))\n print(\"=== output_images_dir {}\".format(output_images_dir))\n print(\"=== with_annotation {}\".format(with_annotation))\n\n inspector = TFRecordInspector(tfrecord_file, \n label_map_pbtxt, \n output_images_dir, \n with_annotation=with_annotation)\n inspector.extract_images()\n \n except Exception as ex:\n traceback.print_exc()\n" ]
[ [ "tensorflow.sparse.to_dense", "tensorflow.data.TFRecordDataset", "tensorflow.io.parse_single_example", "tensorflow.io.VarLenFeature", "tensorflow.io.FixedLenFeature", "numpy.int64", "tensorflow.image.decode_image" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
lucaskup/PorosityAnalisys
[ "e2c443b886a440b67a80ecde510365697064a4ca" ]
[ "code/compare_models.py" ]
[ "from sklearn.model_selection import cross_validate\n\nimport pandas as pd\nimport numpy as np\n\nfrom scipy.stats import t\nfrom sklearn.model_selection import RepeatedKFold\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Ridge\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.neural_network import MLPRegressor\n\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nmpl.rcParams['figure.dpi'] = 400\n\nRESIDUALS_FILE_NAME = '../results/modelComparison/residualsUnder10Rep10foldCV.csv'\n\n\ndef corrected_std(differences, n_train, n_test):\n \"\"\"Corrects standard deviation using Nadeau and Bengio's approach.\n\n Parameters\n ----------\n differences : ndarray of shape (n_samples, 1)\n Vector containing the differences in the score metrics of two models.\n n_train : int\n Number of samples in the training set.\n n_test : int\n Number of samples in the testing set.\n\n Returns\n -------\n corrected_std : int\n Variance-corrected standard deviation of the set of differences.\n \"\"\"\n n = n_train + n_test\n corrected_var = (\n np.var(differences, ddof=1) * ((1 / n) + (n_test / n_train))\n )\n corrected_std = np.sqrt(corrected_var)\n return corrected_std\n\n\ndef compute_corrected_ttest(differences, df, n_train, n_test):\n \"\"\"Computes right-tailed paired t-test with corrected variance.\n\n Parameters\n ----------\n differences : array-like of shape (n_samples, 1)\n Vector containing the differences in the score metrics of two models.\n df : int\n Degrees of freedom.\n n_train : int\n Number of samples in the training set.\n n_test : int\n Number of samples in the testing set.\n\n Returns\n -------\n t_stat : float\n Variance-corrected t-statistic.\n p_val : float\n Variance-corrected p-value.\n \"\"\"\n mean = np.mean(differences)\n std = corrected_std(differences, n_train, n_test)\n t_stat = mean / std\n p_val = t.sf(np.abs(t_stat), df) # right-tailed t-test\n return t_stat, p_val\n\n\ndef execute_experiment():\n linear = LinearRegression()\n ridge = Ridge(alpha=0.1, max_iter=100)\n lasso = Lasso(alpha=0.00025, max_iter=1000)\n elasticNet = ElasticNet(alpha=0.00025, l1_ratio=1, max_iter=1000)\n knn = KNeighborsRegressor(n_neighbors=2, metric='minkowski')\n svr = SVR(gamma=5, C=10, epsilon=0.01, kernel='rbf')\n forest = RandomForestRegressor(n_estimators=500, criterion='mae')\n mlp = MLPRegressor(max_iter=3000, hidden_layer_sizes=(20, 15, 15, 10),\n activation='relu', alpha=0.001,\n learning_rate='adaptive', learning_rate_init=0.001,\n batch_size=3, solver='adam')\n\n results = list(map(lambda model: cross_validate(model,\n X,\n y=Y,\n cv=cv,\n scoring='neg_mean_squared_error'),\n [linear, ridge, lasso, elasticNet, knn, forest, svr, mlp]))\n boxplot_data = list(map(lambda x: x['test_score'], results))\n\n plt.boxplot(boxplot_data)\n plt.show()\n\n linear_results, ridge_results, lasso_results, \\\n elasticnet_results, knn_results, forest_results, \\\n svr_results, mlp_results = boxplot_data\n\n residuals_df = pd.DataFrame({'Linear': linear_results,\n 'Lasso': lasso_results,\n 'Ridge': ridge_results,\n 'ElasticNet': elasticnet_results,\n 'KNN': knn_results,\n 'RF': forest_results,\n 'SVR': svr_results,\n 'MLP': mlp_results})\n\n residuals_df.to_csv(RESIDUALS_FILE_NAME,\n sep=';', decimal='.')\n\n\n# Import the data\ncv = RepeatedKFold(\n n_splits=10, n_repeats=10, random_state=0\n)\n\ndataset = pd.read_csv('../results/featureSelection/featureSelectedData.csv',\n index_col=0)\n\nX = dataset.values[:, :-1].astype(np.float64)\nY = dataset['Porosity (%)'].values.astype(np.float64)\n\nmmY = MinMaxScaler()\nY = mmY.fit_transform(Y.reshape(-1, 1)).ravel()\n\n# execute_experiment()\n\nresidual = pd.read_csv(\n RESIDUALS_FILE_NAME, sep=';', decimal='.')\nmodel_1_scores = residual['SVR'].values # scores of the best model\n# scores of the second-best model\nmodel_2_scores = residual['ElasticNet'].values\n\ndifferences = model_1_scores - model_2_scores\n\nn = 10 # number of test sets\ndf = n - 1\nn_train = len(list(cv.split(X, Y))[0][0])\nn_test = len(list(cv.split(X, Y))[0][1])\n\nt_stat, p_val = compute_corrected_ttest(differences, df, n_train, n_test)\nprint(f\"Corrected t-value: {t_stat:.3f}\\n\"\n f\"Corrected p-value: {p_val:.3f}\")\n\nt_stat_uncorrected = (\n np.mean(differences) / np.sqrt(np.var(differences, ddof=1) / n)\n)\np_val_uncorrected = t.sf(np.abs(t_stat_uncorrected), df)\n\nprint(f\"Uncorrected t-value: {t_stat_uncorrected:.3f}\\n\"\n f\"Uncorrected p-value: {p_val_uncorrected:.3f}\")\n" ]
[ [ "sklearn.ensemble.RandomForestRegressor", "numpy.sqrt", "sklearn.linear_model.ElasticNet", "pandas.DataFrame", "numpy.mean", "numpy.var", "sklearn.preprocessing.MinMaxScaler", "pandas.read_csv", "sklearn.linear_model.Lasso", "sklearn.svm.SVR", "sklearn.linear_model.Ridge", "matplotlib.pyplot.show", "matplotlib.pyplot.boxplot", "sklearn.model_selection.RepeatedKFold", "numpy.abs", "sklearn.neighbors.KNeighborsRegressor", "sklearn.linear_model.LinearRegression", "sklearn.model_selection.cross_validate", "sklearn.neural_network.MLPRegressor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
sameelab/mutprediction-with-shape
[ "8ca44a25d57903564f1e187d03880ba8a6babe37" ]
[ "TFBS_analysis/script_TFBS_mutrates.py" ]
[ "# python3\n\n# Libraries\nimport os\nimport sys\nimport re\nimport numpy as np\nimport pandas as pd\nfrom collections import Counter\nfrom Bio import SeqIO, motifs\nfrom Bio.Seq import Seq\nfrom scipy.stats import pearsonr, spearmanr, kstest, entropy\n\n\n# Import filenames list\nfile_shape, file_muts, file_logo, filename_out = sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]\n\n\n# Sequence logo and conservation score\nTF_logo = pd.read_csv(file_logo, sep=\" \", header=None, skiprows=[0])\nTF_logo.pop(0)\n#TF_conserve = (2 * np.max(TF_logo, axis=1) - np.sum(TF_logo, axis=1)).values\nTF_conserve = entropy(TF_logo, qk=np.full(np.shape(TF_logo), fill_value=0.25), axis=1)\n\n# Define TF length\nlen_tf = len(TF_conserve)\n\n# TFBS shape distribution\nDF_pos_shape = pd.read_csv(file_shape)\n# TFBS mutation ref and alt distribution\nDF_pos_muts = pd.read_csv(file_muts, sep=\"\\t\", index_col=None, header=None)\nDF_pos_muts.columns = [\"chr\", \"start\", \"end\", \"mut\", \"MAF\", \"pos\", \"kmer_xtend\", \"kmer\"]\n# 5-mer reference DF\nDF_strucval_5mersheet = pd.read_csv(\"ref_5mers_structure.csv\", index_col=0)\n\ntemp_altks = [0] * len(DF_pos_muts)\ntemp_alt7 = [0] * len(DF_pos_muts)\nfor i in range(len(temp_altks)):\n temp_kmer, temp_7mer = DF_pos_muts['kmer'][i].upper(), DF_pos_muts['kmer_xtend'][i].upper()\n temp_alt = DF_pos_muts['mut'][i].split(\">\")[1]\n temp_altks[i] = temp_kmer[0:2] + temp_alt + temp_kmer[3:5]\n temp_alt7[i] = temp_7mer[0:3] + temp_alt + temp_7mer[4:7]\nDF_pos_muts['kmer_alt'] = temp_altks\nDF_pos_muts['kmer_alt_xtend'] = temp_alt7\n\n\nDF_pos_muts.index = [item.upper() for item in DF_pos_muts['kmer'].values]\nDF_pos_muts_ref = DF_pos_muts.join(DF_strucval_5mersheet, how=\"left\")\nDF_pos_muts_ref.sort_values(by=[\"pos\", \"kmer\"], inplace=True)\nDF_pos_muts.index = DF_pos_muts['kmer_alt']\nDF_pos_muts_alt = DF_pos_muts.join(DF_strucval_5mersheet, how=\"left\")\nDF_pos_muts_alt.sort_values(by=[\"pos\", \"kmer\"], inplace=True)\n\n\ntemp_counter = Counter(DF_pos_muts_ref['pos'])\nfor i in range(len_tf):\n if i not in temp_counter.keys():\n temp_counter[i] = 0\nDF_observed_mut = pd.DataFrame([temp_counter]).transpose()\nDF_observed_mut.sort_index(inplace=True)\nDF_observed_mut = DF_observed_mut / len(DF_pos_shape)\n\ntemp_arr = DF_observed_mut.values.flatten()\ntemp_stat = np.max(temp_arr) / np.min(temp_arr[np.nonzero(temp_arr)])\nprint(file_shape, temp_stat)\n\nwith open(filename_out, \"a+\") as f:\n f.write(file_shape + \"\\t\" + str(temp_stat))\n" ]
[ [ "pandas.read_csv", "numpy.nonzero", "pandas.DataFrame", "numpy.max", "numpy.shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
srishag/Machine-Learning-For-Trading
[ "34ea564c9f1645d5cf1ebf1e4948ada0cfd957b7" ]
[ "Lesson 4/23_access_array_elements.py" ]
[ "\"\"\"Accessing NumPy arrays.\"\"\"\nimport numpy as np\n\n\ndef test_run():\n a = np.random.rand(5, 4)\n print(\"Array:\\n\", a)\n\n # Accessing element at position (3, 2)\n element = a[3, 2]\n print(element)\n\n # Elements in defined range\n print(a[0, 1:3])\n\n # Top left corner of array\n print(a[0:2, 0:2])\n\n print(a[:, 0:3:2]) # will select columns 0, 2 for every row\n\n\nif __name__ == \"__main__\":\n test_run()\n" ]
[ [ "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yangjiang001/pdia-1
[ "8dcd0960bf2e540f11cd37ccb92cbac1bbd51368" ]
[ "pdia/qc/dropStudentsWithRepeatedBlock.py" ]
[ "import pandas as pd\n\nfrom pdia.utils.createUniqueRunID import createUniqueRunID\nfrom pdia.qc.dropStudents import dropStudents\n\n\ndef dropStudentsWithRepeatedBlock(df,\n saveDroppedAs=None,\n studentId='BookletNumber',\n blockId=\"BlockCode\",\n runId=\"blockRunID\",\n verbose=True):\n \"\"\"\n Drop students with repeated blocks.\n\n We keep track of whether the same blocks have been run multiple times. This could happen when, for example,\n a student started a block, got interrupted, and did another block, and asked by the admin to go back to a block.\n\n But more likely, some intervening \"blockID\" get inserted into a contiguous blockID, creating the illusion of\n a block being run multiple times. The latter case is often salvageable.\n\n So when possible, investigate such cases and come up with a fix.\n\n :param df: input data frame with data from multiple students\n :param saveDroppedAs: optionally saving the dropped data to a csv or pickle file. Remember to specify .csv or .pickle\n :param studentId: name of the column containing the student ID info; default ot \"BookletNumber\"\n :param runId: name of the column containing the run counter of blocknames; default to \"blockRunID\"\n :param verbose: default to True\n :return: a data frame with students having any of these events dropped.\n\n \"\"\"\n\n # error checks\n assert (isinstance(df, pd.DataFrame))\n for v in [studentId, blockId]:\n assert (v in df.columns)\n\n if verbose:\n print(\"\\ndropStudentsWithRepeatedBlock:\")\n\n # compute the blockRunID, and keep only the first line per Student by blockRunID\n t2 = df.groupby([studentId]) \\\n .apply(lambda x: createUniqueRunID(x, var=blockId, runId=runId)) \\\n .groupby([studentId, runId]).first() \\\n .reset_index()\n # find the # of unique BlockCode != the total number of block runs\n idx = t2.groupby([studentId])[blockId].nunique() < t2.groupby([studentId])[runId].max()\n # find the studentID: make sure it's a Pandas Series\n studentsToDrop = pd.Series(idx[idx == True].index)\n\n if verbose:\n print(\"dropStudentsWithRepeatedBlock:\")\n\n return dropStudents(df, studentsToDrop, saveDroppedAs, studentId, verbose)\n" ]
[ [ "pandas.Series" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
DhruvaBansal00/MultimodalGNN-VQA
[ "658da6f7f18883fb1302cd13c0b5603de836e199" ]
[ "mgn/models/seq2seq.py" ]
[ "import torch\nimport torch.nn as nn\n\n\nclass Seq2seq(nn.Module):\n \"\"\"Seq2seq model module\n To do: add docstring to methods\n \"\"\"\n \n def __init__(self, encoder, decoder):\n super(Seq2seq, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n\n def forward(self, x, y, g_embd, input_lengths=None):\n encoder_outputs, encoder_hidden = self.encoder(x, g_embd, input_lengths)\n decoder_outputs, decoder_hidden = self.decoder(y, encoder_outputs, encoder_hidden)\n return decoder_outputs\n\n def sample_output(self, x, g_embd, input_lengths=None):\n encoder_outputs, encoder_hidden = self.encoder(x, g_embd, input_lengths)\n output_symbols, _ = self.decoder.forward_sample(encoder_outputs, encoder_hidden)\n return torch.stack(output_symbols).transpose(0,1)\n\n def reinforce_forward(self, x, g_embd, input_lengths=None):\n encoder_outputs, encoder_hidden = self.encoder(x, g_embd, input_lengths)\n self.output_symbols, self.output_logprobs = self.decoder.forward_sample(encoder_outputs, encoder_hidden, reinforce_sample=True)\n return torch.stack(self.output_symbols).transpose(0,1)\n\n def reinforce_backward(self, reward, entropy_factor=0.0):\n assert self.output_logprobs is not None and self.output_symbols is not None, 'must call reinforce_forward first'\n losses = []\n grad_output = []\n for i, symbol in enumerate(self.output_symbols):\n if len(self.output_symbols[0].shape) == 1: # one-dim index values\n logprob_pred_symbol = torch.index_select(self.output_logprobs[i], 1, symbol)\n logprob_pred = torch.diag(logprob_pred_symbol).sum()\n\n probs_i = torch.exp(self.output_logprobs[i])\n plogp = self.output_logprobs[i] * probs_i\n entropy_offset = entropy_factor * plogp.sum()\n\n loss = - logprob_pred*reward + entropy_offset\n print(f\"i = {i}: loss = - {logprob_pred} * {reward} + {entropy_offset} = {loss}\")\n else:\n loss = - self.output_logprobs[i]*reward\n losses.append(loss.sum())\n grad_output.append(None)\n torch.autograd.backward(losses, grad_output, retain_graph=True)" ]
[ [ "torch.autograd.backward", "torch.exp", "torch.diag", "torch.stack", "torch.index_select" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]