prompt
stringlengths
15
655k
completion
stringlengths
3
32.4k
api
stringlengths
8
52
import unittest from functools import reduce import hypothesis.strategies as st from hypothesis import given, settings import numpy as np from caffe2.proto import caffe2_pb2 from caffe2.python import core, workspace import caffe2.python.hypothesis_test_util as hu import caffe2.python.ideep_test_util as mu @unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.") class FcTest(hu.HypothesisTestCase): @given(n=st.integers(1, 5), m=st.integers(1, 5), k=st.integers(1, 5), **mu.gcs) @settings(deadline=1000) def test_fc_2_dims(self, n, m, k, gc, dc): X = np.random.rand(m, k).astype(np.float32) - 0.5 W = np.random.rand(n, k).astype(np.float32) - 0.5 b = np.random.rand(n).astype(np.float32) - 0.5 op = core.CreateOperator( 'FC', ['X', 'W', 'b'], ["Y"] ) self.assertDeviceChecks(dc, op, [X, W, b], [0]) for i in range(3): self.assertGradientChecks(gc, op, [X, W, b], i, [0]) @given(n=st.integers(1, 5), m=st.integers(1, 5), c=st.integers(1, 5), h=st.integers(1, 5), w=st.integers(1, 5), axis=st.integers(1, 3), **mu.gcs) def test_fc_with_axis(self, n, m, c, h, w, axis, gc, dc): X = np.random.rand(n, c, h, w).astype(np.float32) - 0.5 k = reduce((lambda x, y: x * y), [n, c, h, w][axis - 4:]) nn = reduce((lambda x, y: x * y), [n, c, h, w][:axis]) W = np.random.rand(m, k).astype(np.float32) - 0.5 b = np.random.rand(m).astype(np.float32) - 0.5 dY = np.random.rand(nn, m).astype(np.float32) - 0.5 op0 = core.CreateOperator( 'FC', ['X', 'W', 'b'], ["Y"], axis=axis, device_option=dc[0] ) op0_bw = core.CreateOperator( 'FCGradient', ['X', 'W', 'dY'], ["dW", "db"], axis=axis, device_option=dc[0] ) workspace.ResetWorkspace() workspace.FeedBlob('X', X, dc[0]) workspace.FeedBlob('W', W, dc[0]) workspace.FeedBlob('b', b, dc[0]) workspace.RunOperatorOnce(op0) Y0 = workspace.FetchBlob('Y') workspace.FeedBlob('dY', dY, dc[0]) workspace.RunOperatorOnce(op0_bw) dW0 = workspace.FetchBlob('dW') db0 = workspace.FetchBlob('db') op1 = core.CreateOperator( 'FC', ['X', 'W', 'b'], ["Y"], axis=axis, device_option=dc[1] ) op1_bw = core.CreateOperator( 'FCGradient', ['X', 'W', 'dY'], ["dW", "db"], axis=axis, device_option=dc[1] ) workspace.SwitchWorkspace("_device_check_", True) workspace.FeedBlob('X', X, dc[1]) workspace.FeedBlob('W', W, dc[1]) workspace.FeedBlob('b', b, dc[1]) workspace.RunOperatorOnce(op1) Y1 = workspace.FetchBlob('Y') workspace.FeedBlob('dY', dY, dc[1]) workspace.RunOperatorOnce(op1_bw) dW1 = workspace.FetchBlob('dW') db1 = workspace.FetchBlob('db') Y0 = Y0.flatten() Y1 = Y1.flatten() if not np.allclose(Y0, Y1, atol=0.01, rtol=0.01): print(Y1) print(Y0) print(np.max(np.abs(Y1 - Y0))) self.assertTrue(False) dW0 = dW0.flatten() dW1 = dW1.flatten() if not np.allclose(dW0, dW1, atol=0.01, rtol=0.01): print(dW1) print(dW0) print(np.max(np.abs(dW1 - dW0))) self.assertTrue(False) db0 = db0.flatten() db1 = db1.flatten() if not np.allclose(db0, db1, atol=0.01, rtol=0.01): print(db1) print(db0) print(np.max(np.abs(db1 - db0))) self.assertTrue(False) @given(n=st.integers(1, 5), o=st.integers(1, 5), i=st.integers(1, 5), h=st.integers(1, 5), w=st.integers(1, 5), axis_w=st.integers(1, 3), **mu.gcs) @settings(deadline=1000) def test_fc_with_axis_w(self, n, o, i, h, w, axis_w, gc, dc): W = np.random.rand(o, i, h, w).astype(np.float32) - 0.5 k = reduce((lambda x, y: x * y), [o, i, h, w][axis_w - 4:]) m = reduce((lambda x, y: x * y), [o, i, h, w][:axis_w]) X = np.random.rand(n, k).astype(np.float32) - 0.5 b = np.random.rand(m).astype(np.float32) - 0.5 dY = np.random.rand(n, m).astype(np.float32) - 0.5 op0 = core.CreateOperator( 'FC', ['X', 'W', 'b'], ["Y"], axis_w=axis_w, device_option=dc[0] ) op0_bw = core.CreateOperator( 'FCGradient', ['X', 'W', 'dY'], ["dW", "db"], axis_w=axis_w, device_option=dc[0] ) workspace.ResetWorkspace() workspace.FeedBlob('X', X, dc[0]) workspace.FeedBlob('W', W, dc[0]) workspace.FeedBlob('b', b, dc[0]) workspace.RunOperatorOnce(op0) Y0 = workspace.FetchBlob('Y') workspace.FeedBlob('dY', dY, dc[0]) workspace.RunOperatorOnce(op0_bw) dW0 = workspace.FetchBlob('dW') db0 = workspace.FetchBlob('db') op1 = core.CreateOperator( 'FC', ['X', 'W', 'b'], ["Y"], axis_w=axis_w, device_option=dc[1] ) op1_bw = core.CreateOperator( 'FCGradient', ['X', 'W', 'dY'], ["dW", "db"], axis_w=axis_w, device_option=dc[1] ) workspace.SwitchWorkspace("_device_check_", True) workspace.FeedBlob('X', X, dc[1]) workspace.FeedBlob('W', W, dc[1]) workspace.FeedBlob('b', b, dc[1]) workspace.RunOperatorOnce(op1) Y1 = workspace.FetchBlob('Y') workspace.FeedBlob('dY', dY, dc[1]) workspace.RunOperatorOnce(op1_bw) dW1 = workspace.FetchBlob('dW') db1 = workspace.FetchBlob('db') Y0 = Y0.flatten() Y1 = Y1.flatten() if not np.allclose(Y0, Y1, atol=0.01, rtol=0.01): print(Y1) print(Y0) print(np.max(np.abs(Y1 - Y0))) self.assertTrue(False) dW0 = dW0.flatten() dW1 = dW1.flatten() if not np.allclose(dW0, dW1, atol=0.01, rtol=0.01): print(dW1) print(dW0) print(np.max(np.abs(dW1 - dW0))) self.assertTrue(False) db0 = db0.flatten() db1 = db1.flatten() if not np.allclose(db0, db1, atol=0.01, rtol=0.01): print(db1) print(db0) print(np.max(
np.abs(db1 - db0)
numpy.abs
# coding: utf-8 # /*########################################################################## # Copyright (C) 2016-2017 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ############################################################################*/ """This module provides a h5py-like API to access SpecFile data. API description =============== Specfile data structure exposed by this API: :: / 1.1/ title = "…" start_time = "…" instrument/ specfile/ file_header = "…" scan_header = "…" positioners/ motor_name = value … mca_0/ data = … calibration = … channels = … preset_time = … elapsed_time = … live_time = … mca_1/ … … measurement/ colname0 = … colname1 = … … mca_0/ data -> /1.1/instrument/mca_0/data info -> /1.1/instrument/mca_0/ … 2.1/ … ``file_header`` and ``scan_header`` are the raw headers as they appear in the original file, as a string of lines separated by newline (``\\n``) characters. The title is the content of the ``#S`` scan header line without the leading ``#S`` (e.g ``"1 ascan ss1vo -4.55687 -0.556875 40 0.2"``). The start time is converted to ISO8601 format (``"2016-02-23T22:49:05Z"``), if the original date format is standard. Numeric datasets are stored in *float32* format, except for scalar integers which are stored as *int64*. Motor positions (e.g. ``/1.1/instrument/positioners/motor_name``) can be 1D numpy arrays if they are measured as scan data, or else scalars as defined on ``#P`` scan header lines. A simple test is done to check if the motor name is also a data column header defined in the ``#L`` scan header line. Scan data (e.g. ``/1.1/measurement/colname0``) is accessed by column, the dataset name ``colname0`` being the column label as defined in the ``#L`` scan header line. MCA data is exposed as a 2D numpy array containing all spectra for a given analyser. The number of analysers is calculated as the number of MCA spectra per scan data line. Demultiplexing is then performed to assign the correct spectra to a given analyser. MCA calibration is an array of 3 scalars, from the ``#@CALIB`` header line. It is identical for all MCA analysers, as there can be only one ``#@CALIB`` line per scan. MCA channels is an array containing all channel numbers. This information is computed from the ``#@CHANN`` scan header line (if present), or computed from the shape of the first spectrum in a scan (``[0, … len(first_spectrum] - 1]``). Accessing data ============== Data and groups are accessed in :mod:`h5py` fashion:: from silx.io.spech5 import SpecH5 # Open a SpecFile sfh5 = SpecH5("test.dat") # using SpecH5 as a regular group to access scans scan1group = sfh5["1.1"] instrument_group = scan1group["instrument"] # alternative: full path access measurement_group = sfh5["/1.1/measurement"] # accessing a scan data column by name as a 1D numpy array data_array = measurement_group["Pslit HGap"] # accessing all mca-spectra for one MCA device mca_0_spectra = measurement_group["mca_0/data"] :class:`SpecH5` and :class:`SpecH5Group` provide a :meth:`SpecH5Group.keys` method:: >>> sfh5.keys() ['96.1', '97.1', '98.1'] >>> sfh5['96.1'].keys() ['title', 'start_time', 'instrument', 'measurement'] They can also be treated as iterators: .. code-block:: python for scan_group in SpecH5("test.dat"): dataset_names = [item.name in scan_group["measurement"] if isinstance(item, SpecH5Dataset)] print("Found data columns in scan " + scan_group.name) print(", ".join(dataset_names)) You can test for existence of data or groups:: >>> "/1.1/measurement/Pslit HGap" in sfh5 True >>> "positioners" in sfh5["/2.1/instrument"] True >>> "spam" in sfh5["1.1"] False Strings are stored encoded as ``numpy.string_``, as recommended by `the h5py documentation <http://docs.h5py.org/en/latest/strings.html>`_. This ensures maximum compatibility with third party software libraries, when saving a :class:`SpecH5` to a HDF5 file using :mod:`silx.io.spectoh5`. The type ``numpy.string_`` is a byte-string format. The consequence of this is that you should decode strings before using them in **Python 3**:: >>> from silx.io.spech5 import SpecH5 >>> sfh5 = SpecH5("31oct98.dat") >>> sfh5["/68.1/title"] b'68 ascan tx3 -28.5 -24.5 20 0.5' >>> sfh5["/68.1/title"].decode() '68 ascan tx3 -28.5 -24.5 20 0.5' Classes ======= - :class:`SpecH5` - :class:`SpecH5Group` - :class:`SpecH5Dataset` - :class:`SpecH5LinkToGroup` - :class:`SpecH5LinkToDataset` """ import logging import numpy import posixpath import re import sys from .specfile import SpecFile __authors__ = ["<NAME>", "<NAME>"] __license__ = "MIT" __date__ = "06/02/2017" logging.basicConfig() logger1 = logging.getLogger(__name__) try: import h5py except ImportError: h5py = None logger1.debug("Module h5py optional.", exc_info=True) string_types = (basestring,) if sys.version_info[0] == 2 else (str,) # noqa # Static subitems: all groups and datasets that are present in any # scan (excludes list of scans, data columns, list of mca devices, # optional mca headers) static_items = { "scan": [u"title", u"start_time", u"instrument", u"measurement"], "scan/instrument": [u"specfile", u"positioners"], "scan/instrument/specfile": [u"file_header", u"scan_header"], "scan/measurement/mca": [u"data", u"info"], "scan/instrument/mca": [u"data", u"calibration", u"channels"], } # Patterns for group keys root_pattern = re.compile(r"/$") scan_pattern = re.compile(r"/[0-9]+\.[0-9]+/?$") instrument_pattern = re.compile(r"/[0-9]+\.[0-9]+/instrument/?$") specfile_group_pattern = re.compile(r"/[0-9]+\.[0-9]+/instrument/specfile/?$") positioners_group_pattern = re.compile(r"/[0-9]+\.[0-9]+/instrument/positioners/?$") measurement_group_pattern = re.compile(r"/[0-9]+\.[0-9]+/measurement/?$") measurement_mca_group_pattern = re.compile(r"/[0-9]+\.[0-9]+/measurement/mca_[0-9]+/?$") instrument_mca_group_pattern = re.compile(r"/[0-9]+\.[0-9]+/instrument/mca_[0-9]+/?$") # Link to group measurement_mca_info_pattern = re.compile(r"/[0-9]+\.[0-9]+/measurement/mca_([0-9]+)/info/?$") # Patterns for dataset keys header_pattern = re.compile(r"/[0-9]+\.[0-9]+/header$") title_pattern = re.compile(r"/[0-9]+\.[0-9]+/title$") start_time_pattern = re.compile(r"/[0-9]+\.[0-9]+/start_time$") file_header_data_pattern = re.compile(r"/[0-9]+\.[0-9]+/instrument/specfile/file_header$") scan_header_data_pattern = re.compile(r"/[0-9]+\.[0-9]+/instrument/specfile/scan_header$") positioners_data_pattern = re.compile(r"/[0-9]+\.[0-9]+/instrument/positioners/([^/]+)$") measurement_data_pattern = re.compile(r"/[0-9]+\.[0-9]+/measurement/([^/]+)$") instrument_mca_data_pattern = re.compile(r"/[0-9]+\.[0-9]+/instrument/mca_([0-9]+)/data$") instrument_mca_calib_pattern = re.compile(r"/[0-9]+\.[0-9]+/instrument/mca_([0-9]+)/calibration$") instrument_mca_chann_pattern = re.compile(r"/[0-9]+\.[0-9]+/instrument/mca_([0-9])+/channels$") instrument_mca_preset_t_pattern = re.compile(r"/[0-9]+\.[0-9]+/instrument/mca_[0-9]+/preset_time$") instrument_mca_elapsed_t_pattern = re.compile(r"/[0-9]+\.[0-9]+/instrument/mca_[0-9]+/elapsed_time$") instrument_mca_live_t_pattern = re.compile(r"/[0-9]+\.[0-9]+/instrument/mca_[0-9]+/live_time$") # Links to dataset measurement_mca_data_pattern = re.compile(r"/[0-9]+\.[0-9]+/measurement/mca_([0-9]+)/data$") # info/ + calibration, channel, preset_time, live_time, elapsed_time (not data) measurement_mca_info_dataset_pattern = re.compile(r"/[0-9]+\.[0-9]+/measurement/mca_([0-9]+)/info/([^d/][^/]+)$") # info/data measurement_mca_info_data_pattern = re.compile(r"/[0-9]+\.[0-9]+/measurement/mca_([0-9]+)/info/data$") def _bulk_match(string_, list_of_patterns): """Check whether a string matches any regular expression pattern in a list """ for pattern in list_of_patterns: if pattern.match(string_): return True return False def is_group(name): """Check if ``name`` matches a valid group name pattern in a :class:`SpecH5`. :param name: Full name of member :type name: str For example: - ``is_group("/123.456/instrument/")`` returns ``True``. - ``is_group("spam")`` returns ``False`` because :literal:`\"spam\"` is not at all a valid group name. - ``is_group("/1.2/instrument/positioners/xyz")`` returns ``False`` because this key would point to a motor position, which is a dataset and not a group. """ group_patterns = ( root_pattern, scan_pattern, instrument_pattern, specfile_group_pattern, positioners_group_pattern, measurement_group_pattern, measurement_mca_group_pattern, instrument_mca_group_pattern ) return _bulk_match(name, group_patterns) def is_dataset(name): """Check if ``name`` matches a valid dataset name pattern in a :class:`SpecH5`. :param name: Full name of member :type name: str For example: - ``is_dataset("/1.2/instrument/positioners/xyz")`` returns ``True`` because this name could be the key to the dataset recording motor positions for motor ``xyz`` in scan ``1.2``. - ``is_dataset("/123.456/instrument/")`` returns ``False`` because this name points to a group. - ``is_dataset("spam")`` returns ``False`` because :literal:`\"spam\"` is not at all a valid dataset name. """ # /1.1/measurement/mca_0 could be interpreted as a data column # with label "mca_0" if measurement_mca_group_pattern.match(name): return False data_patterns = ( header_pattern, title_pattern, start_time_pattern, file_header_data_pattern, scan_header_data_pattern, positioners_data_pattern, measurement_data_pattern, instrument_mca_data_pattern, instrument_mca_calib_pattern, instrument_mca_chann_pattern, instrument_mca_preset_t_pattern, instrument_mca_elapsed_t_pattern, instrument_mca_live_t_pattern ) return _bulk_match(name, data_patterns) def is_link_to_group(name): """Check if ``name`` is a valid link to a group in a :class:`SpecH5`. Return ``True`` or ``False`` :param name: Full name of member :type name: str """ # so far we only have one type of link to a group if measurement_mca_info_pattern.match(name): return True return False def is_link_to_dataset(name): """Check if ``name`` is a valid link to a dataset in a :class:`SpecH5`. Return ``True`` or ``False`` :param name: Full name of member :type name: str """ list_of_link_patterns = ( measurement_mca_data_pattern, measurement_mca_info_dataset_pattern, measurement_mca_info_data_pattern ) return _bulk_match(name, list_of_link_patterns) def _get_attrs_dict(name): """Return attributes dictionary corresponding to the group or dataset pointed to by name. :param name: Full name/path to data or group :return: attributes dictionary """ # Associate group and dataset patterns to their attributes pattern_attrs = { root_pattern: {"NX_class": "NXroot", }, scan_pattern: {"NX_class": "NXentry", }, title_pattern: {}, start_time_pattern: {}, instrument_pattern: {"NX_class": "NXinstrument", }, specfile_group_pattern: {"NX_class": "NXcollection", }, file_header_data_pattern: {}, scan_header_data_pattern: {}, positioners_group_pattern: {"NX_class": "NXcollection", }, positioners_data_pattern: {}, instrument_mca_group_pattern: {"NX_class": "NXdetector", }, instrument_mca_data_pattern: {"interpretation": "spectrum", }, instrument_mca_calib_pattern: {}, instrument_mca_chann_pattern: {}, instrument_mca_preset_t_pattern: {}, instrument_mca_elapsed_t_pattern: {}, instrument_mca_live_t_pattern: {}, measurement_group_pattern: {"NX_class": "NXcollection", }, measurement_data_pattern: {}, measurement_mca_group_pattern: {}, measurement_mca_data_pattern: {"interpretation": "spectrum", }, measurement_mca_info_pattern: {"NX_class": "NXdetector", }, measurement_mca_info_dataset_pattern: {}, measurement_mca_info_data_pattern: {"interpretation": "spectrum"}, } for pattern in pattern_attrs: if pattern.match(name): return pattern_attrs[pattern] logger1.warning("%s not a known pattern, assigning empty dict to attrs", name) return {} def _get_scan_key_in_name(item_name): """ :param item_name: Name of a group or dataset :return: Scan identification key (e.g. ``"1.1"``) :rtype: str on None """ scan_match = re.match(r"/([0-9]+\.[0-9]+)", item_name) if not scan_match: return None return scan_match.group(1) def _get_mca_index_in_name(item_name): """ :param item_name: Name of a group or dataset :return: MCA analyser index, ``None`` if item name does not reference a mca dataset :rtype: int or None """ mca_match = re.match(r"/.*/mca_([0-9]+)[^0-9]*", item_name) if not mca_match: return None return int(mca_match.group(1)) def _get_motor_in_name(item_name): """ :param item_name: Name of a group or dataset :return: Motor name or ``None`` :rtype: str on None """ motor_match = positioners_data_pattern.match(item_name) if not motor_match: return None return motor_match.group(1) def _get_data_column_label_in_name(item_name): """ :param item_name: Name of a group or dataset :return: Data column label or ``None`` :rtype: str on None """ # /1.1/measurement/mca_0 should not be interpreted as the label of a # data column (let's hope no-one ever uses mca_0 as a label) if measurement_mca_group_pattern.match(item_name): return None data_column_match = measurement_data_pattern.match(item_name) if not data_column_match: return None return data_column_match.group(1) def _get_number_of_mca_analysers(scan): """ :param SpecFile sf: :class:`SpecFile` instance :param str scan_key: Scan identification key (e.g. ``1.1``) """ number_of_MCA_spectra = len(scan.mca) # Scan.data is transposed number_of_data_lines = scan.data.shape[1] if not number_of_data_lines == 0: # Number of MCA spectra must be a multiple of number of data lines assert number_of_MCA_spectra % number_of_data_lines == 0 return number_of_MCA_spectra // number_of_data_lines elif number_of_MCA_spectra: # Case of a scan without data lines, only MCA. # Our only option is to assume that the number of analysers # is the number of #@CHANN lines return len(scan.mca.channels) else: return 0 def _mca_analyser_in_scan(sf, scan_key, mca_analyser_index): """ :param sf: :class:`SpecFile` instance :param scan_key: Scan identification key (e.g. ``1.1``) :param mca_analyser_index: 0-based index of MCA analyser :return: ``True`` if MCA analyser exists in Scan, else ``False`` :raise: ``KeyError`` if scan_key not found in SpecFile :raise: ``AssertionError`` if number of MCA spectra is not a multiple of the number of data lines """ if scan_key not in sf: raise KeyError("Scan key %s " % scan_key + "does not exist in SpecFile %s" % sf.filename) number_of_analysers = _get_number_of_mca_analysers(sf[scan_key]) return 0 <= mca_analyser_index < number_of_analysers def _motor_in_scan(sf, scan_key, motor_name): """ :param sf: :class:`SpecFile` instance :param scan_key: Scan identification key (e.g. ``1.1``) :param motor_name: Name of motor as defined in file header lines :return: ``True`` if motor exists in scan, else ``False`` :raise: ``KeyError`` if scan_key not found in SpecFile """ if scan_key not in sf: raise KeyError("Scan key %s " % scan_key + "does not exist in SpecFile %s" % sf.filename) return motor_name in sf[scan_key].motor_names def _column_label_in_scan(sf, scan_key, column_label): """ :param sf: :class:`SpecFile` instance :param scan_key: Scan identification key (e.g. ``1.1``) :param column_label: Column label as defined in scan header :return: ``True`` if data column label exists in scan, else ``False`` :raise: ``KeyError`` if scan_key not found in SpecFile """ if scan_key not in sf: raise KeyError("Scan key %s " % scan_key + "does not exist in SpecFile %s" % sf.filename) return column_label in sf[scan_key].labels def _parse_ctime(ctime_lines, analyser_index=0): """ :param ctime_lines: e.g ``@CTIME %f %f %f``, first word ``@CTIME`` optional When multiple CTIME lines are present in a scan header, this argument is a concatenation of them separated by a ``\n`` character. :param analyser_index: MCA device/analyser index, when multiple devices are in a scan. :return: (preset_time, live_time, elapsed_time) """ ctime_lines = ctime_lines.lstrip("@CTIME ") ctimes_lines_list = ctime_lines.split("\n") if len(ctimes_lines_list) == 1: # single @CTIME line for all devices ctime_line = ctimes_lines_list[0] else: ctime_line = ctimes_lines_list[analyser_index] if not len(ctime_line.split()) == 3: raise ValueError("Incorrect format for @CTIME header line " + '(expected "@CTIME %f %f %f").') return list(map(float, ctime_line.split())) def spec_date_to_iso8601(date, zone=None): """Convert SpecFile date to Iso8601. :param date: Date (see supported formats below) :type date: str :param zone: Time zone as it appears in a ISO8601 date Supported formats: * ``DDD MMM dd hh:mm:ss YYYY`` * ``DDD YYYY/MM/dd hh:mm:ss YYYY`` where `DDD` is the abbreviated weekday, `MMM` is the month abbreviated name, `MM` is the month number (zero padded), `dd` is the weekday number (zero padded) `YYYY` is the year, `hh` the hour (zero padded), `mm` the minute (zero padded) and `ss` the second (zero padded). All names are expected to be in english. Examples:: >>> spec_date_to_iso8601("Thu Feb 11 09:54:35 2016") '2016-02-11T09:54:35' >>> spec_date_to_iso8601("Sat 2015/03/14 03:53:50") '2015-03-14T03:53:50' """ months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] days_rx = '(?P<day>' + '|'.join(days) + ')' months_rx = '(?P<month>' + '|'.join(months) + ')' year_rx = '(?P<year>\d{4})' day_nb_rx = '(?P<day_nb>[0-3]\d)' month_nb_rx = '(?P<month_nb>[0-1]\d)' hh_rx = '(?P<hh>[0-2]\d)' mm_rx = '(?P<mm>[0-5]\d)' ss_rx = '(?P<ss>[0-5]\d)' tz_rx = '(?P<tz>[+-]\d\d:\d\d){0,1}' # date formats must have either month_nb (1..12) or month (Jan, Feb, ...) re_tpls = ['{days} {months} {day_nb} {hh}:{mm}:{ss}{tz} {year}', '{days} {year}/{month_nb}/{day_nb} {hh}:{mm}:{ss}{tz}'] grp_d = None for rx in re_tpls: full_rx = rx.format(days=days_rx, months=months_rx, year=year_rx, day_nb=day_nb_rx, month_nb=month_nb_rx, hh=hh_rx, mm=mm_rx, ss=ss_rx, tz=tz_rx) m = re.match(full_rx, date) if m: grp_d = m.groupdict() break if not grp_d: raise ValueError('Date format not recognized : {0}'.format(date)) year = grp_d['year'] month = grp_d.get('month_nb') if not month: month = '{0:02d}'.format(months.index(grp_d.get('month')) + 1) day = grp_d['day_nb'] tz = grp_d['tz'] if not tz: tz = zone time = '{0}:{1}:{2}'.format(grp_d['hh'], grp_d['mm'], grp_d['ss']) full_date = '{0}-{1}-{2}T{3}{4}'.format(year, month, day, time, tz if tz else '') return full_date def _fixed_length_strings(strings, length=0): """Return list of fixed length strings, left-justified and right-padded with spaces. :param strings: List of variable length strings :param length: Length of strings in returned list, defaults to the maximum length in the original list if set to 0. :type length: int or None """ if length == 0 and strings: length = max(len(s) for s in strings) return [s.ljust(length) for s in strings] class SpecH5Dataset(object): """Emulate :class:`h5py.Dataset` for a SpecFile object. A :class:`SpecH5Dataset` instance is basically a proxy for the numpy array :attr:`value` attribute, with additional attributes for compatibility with *h5py* datasets. :param value: Actual dataset value :param name: Dataset full name (posix path format, starting with ``/``) :type name: str :param file_: Parent :class:`SpecH5` :param parent: Parent :class:`SpecH5Group` which contains this dataset """ def __init__(self, value, name, file_, parent): object.__init__(self) self.value = None """Actual dataset, can be a *numpy array*, a *numpy.string_*, a *numpy.int_* or a *numpy.float32* All operations applied to an instance of the class use this.""" # get proper value types, to inherit from numpy # attributes (dtype, shape, size) if isinstance(value, string_types): # use bytes for maximum compatibility # (see http://docs.h5py.org/en/latest/strings.html) self.value = numpy.string_(value) elif isinstance(value, float): # use 32 bits for float scalars self.value = numpy.float32(value) elif isinstance(value, int): self.value = numpy.int_(value) else: # Enforce numpy array array = numpy.array(value) data_kind = array.dtype.kind if data_kind in ["S", "U"]: self.value = numpy.asarray(array, dtype=numpy.string_) elif data_kind in ["f"]: self.value = numpy.asarray(array, dtype=numpy.float32) else: self.value = array # numpy array attributes (more attributes handled in __getattribute__) self.shape = self.value.shape """Dataset shape, as a tuple with the length of each dimension of the dataset.""" self.dtype = self.value.dtype """Dataset dtype""" self.size = self.value.size """Dataset size (number of elements)""" # h5py dataset specific attributes self.name = name """"Dataset name (posix path format, starting with ``/``)""" self.parent = parent """Parent :class:`SpecH5Group` object which contains this dataset""" self.file = file_ """Parent :class:`SpecH5` object""" self.attrs = _get_attrs_dict(name) """Attributes dictionary""" self.compression = None """Compression attribute as provided by h5py.Dataset""" self.compression_opts = None """Compression options attribute as provided by h5py.Dataset""" self.chunks = None @property def h5py_class(self): """Return h5py class which is mimicked by this class: :class:`h5py.dataset`. Accessing this attribute if :mod:`h5py` is not installed causes an ``ImportError`` to be raised """ if h5py is None: raise ImportError("Cannot return h5py.Dataset class, " + "unable to import h5py module") return h5py.Dataset def __getattribute__(self, item): if item in ["value", "name", "parent", "file", "attrs", "shape", "dtype", "size", "h5py_class", "chunks", "compression", "compression_opts", "target"]: return object.__getattribute__(self, item) if hasattr(self.value, item): return getattr(self.value, item) raise AttributeError("SpecH5Dataset has no attribute %s" % item) def __len__(self): return len(self.value) def __getitem__(self, item): if not isinstance(self.value, numpy.ndarray): if item == Ellipsis: return numpy.array(self.value) elif item == tuple(): return self.value else: raise ValueError("Scalar can only be reached with an ellipsis or an empty tuple") return self.value.__getitem__(item) def __getslice__(self, i, j): # deprecated but still in use for python 2.7 return self.__getitem__(slice(i, j, None)) def __iter__(self): return self.value.__iter__() def __dir__(self): attrs = set(dir(self.value) + ["value", "name", "parent", "file", "attrs", "shape", "dtype", "size", "h5py_class", "chunks", "compression", "compression_opts"]) return sorted(attrs) # casting def __repr__(self): return '<SpecH5Dataset "%s": shape %s, type "%s">' % \ (self.name, self.shape, self.dtype.str) def __float__(self): return float(self.value) def __int__(self): return int(self.value) def __str__(self): basename = self.name.split("/")[-1] return '<SPEC dataset "%s": shape %s, type "%s">' % \ (basename, self.shape, self.dtype.str) def __bool__(self): if self.value: return True return False def __nonzero__(self): # python 2 return self.__bool__() def __array__(self, dtype=None): if dtype is None: return numpy.array(self.value) else: return
numpy.array(self.value, dtype=dtype)
numpy.array