repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
39
1.84M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
SoftwareDefinedBuildings/XBOS
python/xbos/services/pundat.py
DataClient.tags
def tags(self, where, archiver="", timeout=DEFAULT_TIMEOUT): """ Retrieves tags for all streams matching the given WHERE clause Arguments: [where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"') [archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed into the constructor for the client [timeout]: time in seconds to wait for a response from the archiver """ return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{})
python
def tags(self, where, archiver="", timeout=DEFAULT_TIMEOUT): return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{})
[ "def", "tags", "(", "self", ",", "where", ",", "archiver", "=", "\"\"", ",", "timeout", "=", "DEFAULT_TIMEOUT", ")", ":", "return", "self", ".", "query", "(", "\"select * where {0}\"", ".", "format", "(", "where", ")", ",", "archiver", ",", "timeout", ")", ".", "get", "(", "'metadata'", ",", "{", "}", ")" ]
Retrieves tags for all streams matching the given WHERE clause Arguments: [where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"') [archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed into the constructor for the client [timeout]: time in seconds to wait for a response from the archiver
[ "Retrieves", "tags", "for", "all", "streams", "matching", "the", "given", "WHERE", "clause" ]
train
https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L129-L139
SoftwareDefinedBuildings/XBOS
python/xbos/services/pundat.py
DataClient.tags_uuids
def tags_uuids(self, uuids, archiver="", timeout=DEFAULT_TIMEOUT): """ Retrieves tags for all streams with the provided UUIDs Arguments: [uuids]: list of UUIDs [archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed into the constructor for the client [timeout]: time in seconds to wait for a response from the archiver """ if not isinstance(uuids, list): uuids = [uuids] where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids]) return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{})
python
def tags_uuids(self, uuids, archiver="", timeout=DEFAULT_TIMEOUT): if not isinstance(uuids, list): uuids = [uuids] where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids]) return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{})
[ "def", "tags_uuids", "(", "self", ",", "uuids", ",", "archiver", "=", "\"\"", ",", "timeout", "=", "DEFAULT_TIMEOUT", ")", ":", "if", "not", "isinstance", "(", "uuids", ",", "list", ")", ":", "uuids", "=", "[", "uuids", "]", "where", "=", "\" or \"", ".", "join", "(", "[", "'uuid = \"{0}\"'", ".", "format", "(", "uuid", ")", "for", "uuid", "in", "uuids", "]", ")", "return", "self", ".", "query", "(", "\"select * where {0}\"", ".", "format", "(", "where", ")", ",", "archiver", ",", "timeout", ")", ".", "get", "(", "'metadata'", ",", "{", "}", ")" ]
Retrieves tags for all streams with the provided UUIDs Arguments: [uuids]: list of UUIDs [archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed into the constructor for the client [timeout]: time in seconds to wait for a response from the archiver
[ "Retrieves", "tags", "for", "all", "streams", "with", "the", "provided", "UUIDs" ]
train
https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L141-L154
SoftwareDefinedBuildings/XBOS
python/xbos/services/pundat.py
DataClient.data
def data(self, where, start, end, archiver="", timeout=DEFAULT_TIMEOUT): """ With the given WHERE clause, retrieves all RAW data between the 2 given timestamps Arguments: [where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"') [start, end]: time references: [archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed into the constructor for the client [timeout]: time in seconds to wait for a response from the archiver """ return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{})
python
def data(self, where, start, end, archiver="", timeout=DEFAULT_TIMEOUT): return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{})
[ "def", "data", "(", "self", ",", "where", ",", "start", ",", "end", ",", "archiver", "=", "\"\"", ",", "timeout", "=", "DEFAULT_TIMEOUT", ")", ":", "return", "self", ".", "query", "(", "\"select data in ({0}, {1}) where {2}\"", ".", "format", "(", "start", ",", "end", ",", "where", ")", ",", "archiver", ",", "timeout", ")", ".", "get", "(", "'timeseries'", ",", "{", "}", ")" ]
With the given WHERE clause, retrieves all RAW data between the 2 given timestamps Arguments: [where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"') [start, end]: time references: [archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed into the constructor for the client [timeout]: time in seconds to wait for a response from the archiver
[ "With", "the", "given", "WHERE", "clause", "retrieves", "all", "RAW", "data", "between", "the", "2", "given", "timestamps" ]
train
https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L156-L167
SoftwareDefinedBuildings/XBOS
python/xbos/services/pundat.py
DataClient.data_uuids
def data_uuids(self, uuids, start, end, archiver="", timeout=DEFAULT_TIMEOUT): """ With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps Arguments: [uuids]: list of UUIDs [start, end]: time references: [archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed into the constructor for the client [timeout]: time in seconds to wait for a response from the archiver """ if not isinstance(uuids, list): uuids = [uuids] where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids]) return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{})
python
def data_uuids(self, uuids, start, end, archiver="", timeout=DEFAULT_TIMEOUT): if not isinstance(uuids, list): uuids = [uuids] where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids]) return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{})
[ "def", "data_uuids", "(", "self", ",", "uuids", ",", "start", ",", "end", ",", "archiver", "=", "\"\"", ",", "timeout", "=", "DEFAULT_TIMEOUT", ")", ":", "if", "not", "isinstance", "(", "uuids", ",", "list", ")", ":", "uuids", "=", "[", "uuids", "]", "where", "=", "\" or \"", ".", "join", "(", "[", "'uuid = \"{0}\"'", ".", "format", "(", "uuid", ")", "for", "uuid", "in", "uuids", "]", ")", "return", "self", ".", "query", "(", "\"select data in ({0}, {1}) where {2}\"", ".", "format", "(", "start", ",", "end", ",", "where", ")", ",", "archiver", ",", "timeout", ")", ".", "get", "(", "'timeseries'", ",", "{", "}", ")" ]
With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps Arguments: [uuids]: list of UUIDs [start, end]: time references: [archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed into the constructor for the client [timeout]: time in seconds to wait for a response from the archiver
[ "With", "the", "given", "list", "of", "UUIDs", "retrieves", "all", "RAW", "data", "between", "the", "2", "given", "timestamps" ]
train
https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L169-L183
SoftwareDefinedBuildings/XBOS
python/xbos/services/pundat.py
DataClient.stats
def stats(self, where, start, end, pw, archiver="", timeout=DEFAULT_TIMEOUT): """ With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given pointwidth Arguments: [where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"') [start, end]: time references: [pw]: pointwidth (window size of 2^pw nanoseconds) [archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed into the constructor for the client [timeout]: time in seconds to wait for a response from the archiver """ return self.query("select statistical({3}) data in ({0}, {1}) where {2}".format(start, end, where, pw), archiver, timeout).get('timeseries',{})
python
def stats(self, where, start, end, pw, archiver="", timeout=DEFAULT_TIMEOUT): return self.query("select statistical({3}) data in ({0}, {1}) where {2}".format(start, end, where, pw), archiver, timeout).get('timeseries',{})
[ "def", "stats", "(", "self", ",", "where", ",", "start", ",", "end", ",", "pw", ",", "archiver", "=", "\"\"", ",", "timeout", "=", "DEFAULT_TIMEOUT", ")", ":", "return", "self", ".", "query", "(", "\"select statistical({3}) data in ({0}, {1}) where {2}\"", ".", "format", "(", "start", ",", "end", ",", "where", ",", "pw", ")", ",", "archiver", ",", "timeout", ")", ".", "get", "(", "'timeseries'", ",", "{", "}", ")" ]
With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given pointwidth Arguments: [where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"') [start, end]: time references: [pw]: pointwidth (window size of 2^pw nanoseconds) [archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed into the constructor for the client [timeout]: time in seconds to wait for a response from the archiver
[ "With", "the", "given", "WHERE", "clause", "retrieves", "all", "statistical", "data", "between", "the", "2", "given", "timestamps", "using", "the", "given", "pointwidth" ]
train
https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L185-L197
SoftwareDefinedBuildings/XBOS
python/xbos/services/pundat.py
DataClient.window
def window(self, where, start, end, width, archiver="", timeout=DEFAULT_TIMEOUT): """ With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given window size Arguments: [where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"') [start, end]: time references: [width]: a time expression for the window size, e.g. "5s", "365d" [archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed into the constructor for the client [timeout]: time in seconds to wait for a response from the archiver """ return self.query("select window({3}) data in ({0}, {1}) where {2}".format(start, end, where, width), archiver, timeout).get('timeseries',{})
python
def window(self, where, start, end, width, archiver="", timeout=DEFAULT_TIMEOUT): return self.query("select window({3}) data in ({0}, {1}) where {2}".format(start, end, where, width), archiver, timeout).get('timeseries',{})
[ "def", "window", "(", "self", ",", "where", ",", "start", ",", "end", ",", "width", ",", "archiver", "=", "\"\"", ",", "timeout", "=", "DEFAULT_TIMEOUT", ")", ":", "return", "self", ".", "query", "(", "\"select window({3}) data in ({0}, {1}) where {2}\"", ".", "format", "(", "start", ",", "end", ",", "where", ",", "width", ")", ",", "archiver", ",", "timeout", ")", ".", "get", "(", "'timeseries'", ",", "{", "}", ")" ]
With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given window size Arguments: [where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"') [start, end]: time references: [width]: a time expression for the window size, e.g. "5s", "365d" [archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed into the constructor for the client [timeout]: time in seconds to wait for a response from the archiver
[ "With", "the", "given", "WHERE", "clause", "retrieves", "all", "statistical", "data", "between", "the", "2", "given", "timestamps", "using", "the", "given", "window", "size" ]
train
https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L216-L228
Danielhiversen/flux_led
flux_led/__main__.py
WifiLedBulb.brightness
def brightness(self): """Return current brightness 0-255. For warm white return current led level. For RGB calculate the HSV and return the 'value'. """ if self.mode == "ww": return int(self.raw_state[9]) else: _, _, v = colorsys.rgb_to_hsv(*self.getRgb()) return v
python
def brightness(self): if self.mode == "ww": return int(self.raw_state[9]) else: _, _, v = colorsys.rgb_to_hsv(*self.getRgb()) return v
[ "def", "brightness", "(", "self", ")", ":", "if", "self", ".", "mode", "==", "\"ww\"", ":", "return", "int", "(", "self", ".", "raw_state", "[", "9", "]", ")", "else", ":", "_", ",", "_", ",", "v", "=", "colorsys", ".", "rgb_to_hsv", "(", "*", "self", ".", "getRgb", "(", ")", ")", "return", "v" ]
Return current brightness 0-255. For warm white return current led level. For RGB calculate the HSV and return the 'value'.
[ "Return", "current", "brightness", "0", "-", "255", "." ]
train
https://github.com/Danielhiversen/flux_led/blob/13e87e06ff7589356c83e084a6be768ad1290557/flux_led/__main__.py#L544-L554
kyrus/python-junit-xml
junit_xml/__init__.py
decode
def decode(var, encoding): """ If not already unicode, decode it. """ if PY2: if isinstance(var, unicode): ret = var elif isinstance(var, str): if encoding: ret = var.decode(encoding) else: ret = unicode(var) else: ret = unicode(var) else: ret = str(var) return ret
python
def decode(var, encoding): if PY2: if isinstance(var, unicode): ret = var elif isinstance(var, str): if encoding: ret = var.decode(encoding) else: ret = unicode(var) else: ret = unicode(var) else: ret = str(var) return ret
[ "def", "decode", "(", "var", ",", "encoding", ")", ":", "if", "PY2", ":", "if", "isinstance", "(", "var", ",", "unicode", ")", ":", "ret", "=", "var", "elif", "isinstance", "(", "var", ",", "str", ")", ":", "if", "encoding", ":", "ret", "=", "var", ".", "decode", "(", "encoding", ")", "else", ":", "ret", "=", "unicode", "(", "var", ")", "else", ":", "ret", "=", "unicode", "(", "var", ")", "else", ":", "ret", "=", "str", "(", "var", ")", "return", "ret" ]
If not already unicode, decode it.
[ "If", "not", "already", "unicode", "decode", "it", "." ]
train
https://github.com/kyrus/python-junit-xml/blob/9bb2675bf0058742da04285dcdcf8781eee03db0/junit_xml/__init__.py#L57-L73
esheldon/fitsio
fitsio/util.py
cfitsio_version
def cfitsio_version(asfloat=False): """ Return the cfitsio version as a string. """ # use string version to avoid roundoffs ver = '%0.3f' % _fitsio_wrap.cfitsio_version() if asfloat: return float(ver) else: return ver
python
def cfitsio_version(asfloat=False): ver = '%0.3f' % _fitsio_wrap.cfitsio_version() if asfloat: return float(ver) else: return ver
[ "def", "cfitsio_version", "(", "asfloat", "=", "False", ")", ":", "# use string version to avoid roundoffs", "ver", "=", "'%0.3f'", "%", "_fitsio_wrap", ".", "cfitsio_version", "(", ")", "if", "asfloat", ":", "return", "float", "(", "ver", ")", "else", ":", "return", "ver" ]
Return the cfitsio version as a string.
[ "Return", "the", "cfitsio", "version", "as", "a", "string", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/util.py#L19-L28
esheldon/fitsio
fitsio/util.py
is_little_endian
def is_little_endian(array): """ Return True if array is little endian, False otherwise. Parameters ---------- array: numpy array A numerical python array. Returns ------- Truth value: True for little-endian Notes ----- Strings are neither big or little endian. The input must be a simple numpy array, not an array with fields. """ if numpy.little_endian: machine_little = True else: machine_little = False byteorder = array.dtype.base.byteorder return (byteorder == '<') or (machine_little and byteorder == '=')
python
def is_little_endian(array): if numpy.little_endian: machine_little = True else: machine_little = False byteorder = array.dtype.base.byteorder return (byteorder == '<') or (machine_little and byteorder == '=')
[ "def", "is_little_endian", "(", "array", ")", ":", "if", "numpy", ".", "little_endian", ":", "machine_little", "=", "True", "else", ":", "machine_little", "=", "False", "byteorder", "=", "array", ".", "dtype", ".", "base", ".", "byteorder", "return", "(", "byteorder", "==", "'<'", ")", "or", "(", "machine_little", "and", "byteorder", "==", "'='", ")" ]
Return True if array is little endian, False otherwise. Parameters ---------- array: numpy array A numerical python array. Returns ------- Truth value: True for little-endian Notes ----- Strings are neither big or little endian. The input must be a simple numpy array, not an array with fields.
[ "Return", "True", "if", "array", "is", "little", "endian", "False", "otherwise", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/util.py#L73-L98
esheldon/fitsio
fitsio/util.py
array_to_native
def array_to_native(array, inplace=False): """ Convert an array to the native byte order. NOTE: the inplace keyword argument is not currently used. """ if numpy.little_endian: machine_little = True else: machine_little = False data_little = False if array.dtype.names is None: if array.dtype.base.byteorder == '|': # strings and 1 byte integers return array data_little = is_little_endian(array) else: # assume all are same byte order: we only need to find one with # little endian for fname in array.dtype.names: if is_little_endian(array[fname]): data_little = True break if ((machine_little and not data_little) or (not machine_little and data_little)): output = array.byteswap(inplace) else: output = array return output
python
def array_to_native(array, inplace=False): if numpy.little_endian: machine_little = True else: machine_little = False data_little = False if array.dtype.names is None: if array.dtype.base.byteorder == '|': return array data_little = is_little_endian(array) else: for fname in array.dtype.names: if is_little_endian(array[fname]): data_little = True break if ((machine_little and not data_little) or (not machine_little and data_little)): output = array.byteswap(inplace) else: output = array return output
[ "def", "array_to_native", "(", "array", ",", "inplace", "=", "False", ")", ":", "if", "numpy", ".", "little_endian", ":", "machine_little", "=", "True", "else", ":", "machine_little", "=", "False", "data_little", "=", "False", "if", "array", ".", "dtype", ".", "names", "is", "None", ":", "if", "array", ".", "dtype", ".", "base", ".", "byteorder", "==", "'|'", ":", "# strings and 1 byte integers", "return", "array", "data_little", "=", "is_little_endian", "(", "array", ")", "else", ":", "# assume all are same byte order: we only need to find one with", "# little endian", "for", "fname", "in", "array", ".", "dtype", ".", "names", ":", "if", "is_little_endian", "(", "array", "[", "fname", "]", ")", ":", "data_little", "=", "True", "break", "if", "(", "(", "machine_little", "and", "not", "data_little", ")", "or", "(", "not", "machine_little", "and", "data_little", ")", ")", ":", "output", "=", "array", ".", "byteswap", "(", "inplace", ")", "else", ":", "output", "=", "array", "return", "output" ]
Convert an array to the native byte order. NOTE: the inplace keyword argument is not currently used.
[ "Convert", "an", "array", "to", "the", "native", "byte", "order", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/util.py#L101-L134
esheldon/fitsio
fitsio/util.py
mks
def mks(val): """ make sure the value is a string, paying mind to python3 vs 2 """ if sys.version_info > (3, 0, 0): if isinstance(val, bytes): sval = str(val, 'utf-8') else: sval = str(val) else: sval = str(val) return sval
python
def mks(val): if sys.version_info > (3, 0, 0): if isinstance(val, bytes): sval = str(val, 'utf-8') else: sval = str(val) else: sval = str(val) return sval
[ "def", "mks", "(", "val", ")", ":", "if", "sys", ".", "version_info", ">", "(", "3", ",", "0", ",", "0", ")", ":", "if", "isinstance", "(", "val", ",", "bytes", ")", ":", "sval", "=", "str", "(", "val", ",", "'utf-8'", ")", "else", ":", "sval", "=", "str", "(", "val", ")", "else", ":", "sval", "=", "str", "(", "val", ")", "return", "sval" ]
make sure the value is a string, paying mind to python3 vs 2
[ "make", "sure", "the", "value", "is", "a", "string", "paying", "mind", "to", "python3", "vs", "2" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/util.py#L143-L155
esheldon/fitsio
fitsio/hdu/table.py
_extract_vararray_max
def _extract_vararray_max(tform): """ Extract number from PX(number) """ first = tform.find('(') last = tform.rfind(')') if first == -1 or last == -1: # no max length specified return -1 maxnum = int(tform[first+1:last]) return maxnum
python
def _extract_vararray_max(tform): first = tform.find('(') last = tform.rfind(')') if first == -1 or last == -1: return -1 maxnum = int(tform[first+1:last]) return maxnum
[ "def", "_extract_vararray_max", "(", "tform", ")", ":", "first", "=", "tform", ".", "find", "(", "'('", ")", "last", "=", "tform", ".", "rfind", "(", "')'", ")", "if", "first", "==", "-", "1", "or", "last", "==", "-", "1", ":", "# no max length specified", "return", "-", "1", "maxnum", "=", "int", "(", "tform", "[", "first", "+", "1", ":", "last", "]", ")", "return", "maxnum" ]
Extract number from PX(number)
[ "Extract", "number", "from", "PX", "(", "number", ")" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L2004-L2016
esheldon/fitsio
fitsio/hdu/table.py
_get_col_dimstr
def _get_col_dimstr(tdim, is_string=False): """ not for variable length """ dimstr = '' if tdim is None: dimstr = 'array[bad TDIM]' else: if is_string: if len(tdim) > 1: dimstr = [str(d) for d in tdim[1:]] else: if len(tdim) > 1 or tdim[0] > 1: dimstr = [str(d) for d in tdim] if dimstr != '': dimstr = ','.join(dimstr) dimstr = 'array[%s]' % dimstr return dimstr
python
def _get_col_dimstr(tdim, is_string=False): dimstr = '' if tdim is None: dimstr = 'array[bad TDIM]' else: if is_string: if len(tdim) > 1: dimstr = [str(d) for d in tdim[1:]] else: if len(tdim) > 1 or tdim[0] > 1: dimstr = [str(d) for d in tdim] if dimstr != '': dimstr = ','.join(dimstr) dimstr = 'array[%s]' % dimstr return dimstr
[ "def", "_get_col_dimstr", "(", "tdim", ",", "is_string", "=", "False", ")", ":", "dimstr", "=", "''", "if", "tdim", "is", "None", ":", "dimstr", "=", "'array[bad TDIM]'", "else", ":", "if", "is_string", ":", "if", "len", "(", "tdim", ")", ">", "1", ":", "dimstr", "=", "[", "str", "(", "d", ")", "for", "d", "in", "tdim", "[", "1", ":", "]", "]", "else", ":", "if", "len", "(", "tdim", ")", ">", "1", "or", "tdim", "[", "0", "]", ">", "1", ":", "dimstr", "=", "[", "str", "(", "d", ")", "for", "d", "in", "tdim", "]", "if", "dimstr", "!=", "''", ":", "dimstr", "=", "','", ".", "join", "(", "dimstr", ")", "dimstr", "=", "'array[%s]'", "%", "dimstr", "return", "dimstr" ]
not for variable length
[ "not", "for", "variable", "length" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L2019-L2037
esheldon/fitsio
fitsio/hdu/table.py
_npy2fits
def _npy2fits(d, table_type='binary', write_bitcols=False): """ d is the full element from the descr """ npy_dtype = d[1][1:] if npy_dtype[0] == 'S' or npy_dtype[0] == 'U': name, form, dim = _npy_string2fits(d, table_type=table_type) else: name, form, dim = _npy_num2fits( d, table_type=table_type, write_bitcols=write_bitcols) return name, form, dim
python
def _npy2fits(d, table_type='binary', write_bitcols=False): npy_dtype = d[1][1:] if npy_dtype[0] == 'S' or npy_dtype[0] == 'U': name, form, dim = _npy_string2fits(d, table_type=table_type) else: name, form, dim = _npy_num2fits( d, table_type=table_type, write_bitcols=write_bitcols) return name, form, dim
[ "def", "_npy2fits", "(", "d", ",", "table_type", "=", "'binary'", ",", "write_bitcols", "=", "False", ")", ":", "npy_dtype", "=", "d", "[", "1", "]", "[", "1", ":", "]", "if", "npy_dtype", "[", "0", "]", "==", "'S'", "or", "npy_dtype", "[", "0", "]", "==", "'U'", ":", "name", ",", "form", ",", "dim", "=", "_npy_string2fits", "(", "d", ",", "table_type", "=", "table_type", ")", "else", ":", "name", ",", "form", ",", "dim", "=", "_npy_num2fits", "(", "d", ",", "table_type", "=", "table_type", ",", "write_bitcols", "=", "write_bitcols", ")", "return", "name", ",", "form", ",", "dim" ]
d is the full element from the descr
[ "d", "is", "the", "full", "element", "from", "the", "descr" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L2106-L2117
esheldon/fitsio
fitsio/hdu/table.py
_npy_num2fits
def _npy_num2fits(d, table_type='binary', write_bitcols=False): """ d is the full element from the descr For vector,array columns the form is the total counts followed by the code. For array columns with dimension greater than 1, the dim is set to (dim1, dim2, ...) So it is treated like an extra dimension """ dim = None name = d[0] npy_dtype = d[1][1:] if npy_dtype[0] == 'S' or npy_dtype[0] == 'U': raise ValueError("got S or U type: use _npy_string2fits") if npy_dtype not in _table_npy2fits_form: raise ValueError("unsupported type '%s'" % npy_dtype) if table_type == 'binary': form = _table_npy2fits_form[npy_dtype] else: form = _table_npy2fits_form_ascii[npy_dtype] # now the dimensions if len(d) > 2: if table_type == 'ascii': raise ValueError( "Ascii table columns must be scalar, got %s" % str(d)) if write_bitcols and npy_dtype == 'b1': # multi-dimensional boolean form = 'X' # Note, depending on numpy version, even 1-d can be a tuple if isinstance(d[2], tuple): count = reduce(lambda x, y: x*y, d[2]) form = '%d%s' % (count, form) if len(d[2]) > 1: # this is multi-dimensional array column. the form # should be total elements followed by A dim = list(reversed(d[2])) dim = [str(e) for e in dim] dim = '(' + ','.join(dim)+')' else: # this is a vector (1d array) column count = d[2] form = '%d%s' % (count, form) return name, form, dim
python
def _npy_num2fits(d, table_type='binary', write_bitcols=False): dim = None name = d[0] npy_dtype = d[1][1:] if npy_dtype[0] == 'S' or npy_dtype[0] == 'U': raise ValueError("got S or U type: use _npy_string2fits") if npy_dtype not in _table_npy2fits_form: raise ValueError("unsupported type '%s'" % npy_dtype) if table_type == 'binary': form = _table_npy2fits_form[npy_dtype] else: form = _table_npy2fits_form_ascii[npy_dtype] if len(d) > 2: if table_type == 'ascii': raise ValueError( "Ascii table columns must be scalar, got %s" % str(d)) if write_bitcols and npy_dtype == 'b1': form = 'X' if isinstance(d[2], tuple): count = reduce(lambda x, y: x*y, d[2]) form = '%d%s' % (count, form) if len(d[2]) > 1: dim = list(reversed(d[2])) dim = [str(e) for e in dim] dim = '(' + ','.join(dim)+')' else: count = d[2] form = '%d%s' % (count, form) return name, form, dim
[ "def", "_npy_num2fits", "(", "d", ",", "table_type", "=", "'binary'", ",", "write_bitcols", "=", "False", ")", ":", "dim", "=", "None", "name", "=", "d", "[", "0", "]", "npy_dtype", "=", "d", "[", "1", "]", "[", "1", ":", "]", "if", "npy_dtype", "[", "0", "]", "==", "'S'", "or", "npy_dtype", "[", "0", "]", "==", "'U'", ":", "raise", "ValueError", "(", "\"got S or U type: use _npy_string2fits\"", ")", "if", "npy_dtype", "not", "in", "_table_npy2fits_form", ":", "raise", "ValueError", "(", "\"unsupported type '%s'\"", "%", "npy_dtype", ")", "if", "table_type", "==", "'binary'", ":", "form", "=", "_table_npy2fits_form", "[", "npy_dtype", "]", "else", ":", "form", "=", "_table_npy2fits_form_ascii", "[", "npy_dtype", "]", "# now the dimensions", "if", "len", "(", "d", ")", ">", "2", ":", "if", "table_type", "==", "'ascii'", ":", "raise", "ValueError", "(", "\"Ascii table columns must be scalar, got %s\"", "%", "str", "(", "d", ")", ")", "if", "write_bitcols", "and", "npy_dtype", "==", "'b1'", ":", "# multi-dimensional boolean", "form", "=", "'X'", "# Note, depending on numpy version, even 1-d can be a tuple", "if", "isinstance", "(", "d", "[", "2", "]", ",", "tuple", ")", ":", "count", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "*", "y", ",", "d", "[", "2", "]", ")", "form", "=", "'%d%s'", "%", "(", "count", ",", "form", ")", "if", "len", "(", "d", "[", "2", "]", ")", ">", "1", ":", "# this is multi-dimensional array column. the form", "# should be total elements followed by A", "dim", "=", "list", "(", "reversed", "(", "d", "[", "2", "]", ")", ")", "dim", "=", "[", "str", "(", "e", ")", "for", "e", "in", "dim", "]", "dim", "=", "'('", "+", "','", ".", "join", "(", "dim", ")", "+", "')'", "else", ":", "# this is a vector (1d array) column", "count", "=", "d", "[", "2", "]", "form", "=", "'%d%s'", "%", "(", "count", ",", "form", ")", "return", "name", ",", "form", ",", "dim" ]
d is the full element from the descr For vector,array columns the form is the total counts followed by the code. For array columns with dimension greater than 1, the dim is set to (dim1, dim2, ...) So it is treated like an extra dimension
[ "d", "is", "the", "full", "element", "from", "the", "descr" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L2120-L2175
esheldon/fitsio
fitsio/hdu/table.py
_npy_string2fits
def _npy_string2fits(d, table_type='binary'): """ d is the full element from the descr form for strings is the total number of bytes followed by A. Thus for vector or array columns it is the size of the string times the total number of elements in the array. Then the dim is set to (sizeofeachstring, dim1, dim2, ...) So it is treated like an extra dimension """ dim = None name = d[0] npy_dtype = d[1][1:] if npy_dtype[0] != 'S' and npy_dtype[0] != 'U': raise ValueError("expected S or U type, got %s" % npy_dtype[0]) # get the size of each string string_size_str = npy_dtype[1:] string_size = int(string_size_str) if string_size <= 0: raise ValueError('string sizes must be > 0, ' 'got %s for field %s' % (npy_dtype, name)) # now the dimensions if len(d) == 2: if table_type == 'ascii': form = 'A'+string_size_str else: form = string_size_str+'A' else: if table_type == 'ascii': raise ValueError( "Ascii table columns must be scalar, got %s" % str(d)) if isinstance(d[2], tuple): # this is an array column. the form # should be total elements followed by A # count = 1 # count = [count*el for el in d[2]] count = reduce(lambda x, y: x*y, d[2]) count = string_size*count form = '%dA' % count # will have to do tests to see if this is the right order dim = list(reversed(d[2])) # dim = d[2] dim = [string_size_str] + [str(e) for e in dim] dim = '(' + ','.join(dim)+')' else: # this is a vector (1d array) column count = string_size*d[2] form = '%dA' % count # will have to do tests to see if this is the right order dim = [string_size_str, str(d[2])] dim = '(' + ','.join(dim)+')' return name, form, dim
python
def _npy_string2fits(d, table_type='binary'): dim = None name = d[0] npy_dtype = d[1][1:] if npy_dtype[0] != 'S' and npy_dtype[0] != 'U': raise ValueError("expected S or U type, got %s" % npy_dtype[0]) string_size_str = npy_dtype[1:] string_size = int(string_size_str) if string_size <= 0: raise ValueError('string sizes must be > 0, ' 'got %s for field %s' % (npy_dtype, name)) if len(d) == 2: if table_type == 'ascii': form = 'A'+string_size_str else: form = string_size_str+'A' else: if table_type == 'ascii': raise ValueError( "Ascii table columns must be scalar, got %s" % str(d)) if isinstance(d[2], tuple): count = reduce(lambda x, y: x*y, d[2]) count = string_size*count form = '%dA' % count dim = list(reversed(d[2])) dim = [string_size_str] + [str(e) for e in dim] dim = '(' + ','.join(dim)+')' else: count = string_size*d[2] form = '%dA' % count dim = [string_size_str, str(d[2])] dim = '(' + ','.join(dim)+')' return name, form, dim
[ "def", "_npy_string2fits", "(", "d", ",", "table_type", "=", "'binary'", ")", ":", "dim", "=", "None", "name", "=", "d", "[", "0", "]", "npy_dtype", "=", "d", "[", "1", "]", "[", "1", ":", "]", "if", "npy_dtype", "[", "0", "]", "!=", "'S'", "and", "npy_dtype", "[", "0", "]", "!=", "'U'", ":", "raise", "ValueError", "(", "\"expected S or U type, got %s\"", "%", "npy_dtype", "[", "0", "]", ")", "# get the size of each string", "string_size_str", "=", "npy_dtype", "[", "1", ":", "]", "string_size", "=", "int", "(", "string_size_str", ")", "if", "string_size", "<=", "0", ":", "raise", "ValueError", "(", "'string sizes must be > 0, '", "'got %s for field %s'", "%", "(", "npy_dtype", ",", "name", ")", ")", "# now the dimensions", "if", "len", "(", "d", ")", "==", "2", ":", "if", "table_type", "==", "'ascii'", ":", "form", "=", "'A'", "+", "string_size_str", "else", ":", "form", "=", "string_size_str", "+", "'A'", "else", ":", "if", "table_type", "==", "'ascii'", ":", "raise", "ValueError", "(", "\"Ascii table columns must be scalar, got %s\"", "%", "str", "(", "d", ")", ")", "if", "isinstance", "(", "d", "[", "2", "]", ",", "tuple", ")", ":", "# this is an array column. the form", "# should be total elements followed by A", "# count = 1", "# count = [count*el for el in d[2]]", "count", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "*", "y", ",", "d", "[", "2", "]", ")", "count", "=", "string_size", "*", "count", "form", "=", "'%dA'", "%", "count", "# will have to do tests to see if this is the right order", "dim", "=", "list", "(", "reversed", "(", "d", "[", "2", "]", ")", ")", "# dim = d[2]", "dim", "=", "[", "string_size_str", "]", "+", "[", "str", "(", "e", ")", "for", "e", "in", "dim", "]", "dim", "=", "'('", "+", "','", ".", "join", "(", "dim", ")", "+", "')'", "else", ":", "# this is a vector (1d array) column", "count", "=", "string_size", "*", "d", "[", "2", "]", "form", "=", "'%dA'", "%", "count", "# will have to do tests to see if this is the right order", "dim", "=", "[", "string_size_str", ",", "str", "(", "d", "[", "2", "]", ")", "]", "dim", "=", "'('", "+", "','", ".", "join", "(", "dim", ")", "+", "')'", "return", "name", ",", "form", ",", "dim" ]
d is the full element from the descr form for strings is the total number of bytes followed by A. Thus for vector or array columns it is the size of the string times the total number of elements in the array. Then the dim is set to (sizeofeachstring, dim1, dim2, ...) So it is treated like an extra dimension
[ "d", "is", "the", "full", "element", "from", "the", "descr" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L2178-L2241
esheldon/fitsio
fitsio/hdu/table.py
TableHDU.get_colname
def get_colname(self, colnum): """ Get the name associated with the given column number parameters ---------- colnum: integer The number for the column, zero offset """ if colnum < 0 or colnum > (len(self._colnames)-1): raise ValueError( "colnum out of range [0,%s-1]" % (0, len(self._colnames))) return self._colnames[colnum]
python
def get_colname(self, colnum): if colnum < 0 or colnum > (len(self._colnames)-1): raise ValueError( "colnum out of range [0,%s-1]" % (0, len(self._colnames))) return self._colnames[colnum]
[ "def", "get_colname", "(", "self", ",", "colnum", ")", ":", "if", "colnum", "<", "0", "or", "colnum", ">", "(", "len", "(", "self", ".", "_colnames", ")", "-", "1", ")", ":", "raise", "ValueError", "(", "\"colnum out of range [0,%s-1]\"", "%", "(", "0", ",", "len", "(", "self", ".", "_colnames", ")", ")", ")", "return", "self", ".", "_colnames", "[", "colnum", "]" ]
Get the name associated with the given column number parameters ---------- colnum: integer The number for the column, zero offset
[ "Get", "the", "name", "associated", "with", "the", "given", "column", "number" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L84-L96
esheldon/fitsio
fitsio/hdu/table.py
TableHDU.write
def write(self, data, **keys): """ Write data into this HDU parameters ---------- data: ndarray or list of ndarray A numerical python array. Should be an ordinary array for image HDUs, should have fields for tables. To write an ordinary array to a column in a table HDU, use write_column. If data already exists in this HDU, it will be overwritten. See the append(() method to append new rows to a table HDU. firstrow: integer, optional At which row you should begin writing to tables. Be sure you know what you are doing! For appending see the append() method. Default 0. columns: list, optional If data is a list of arrays, you must send columns as a list of names or column numbers You can also send names= names: list, optional same as columns= """ slow = keys.get('slow', False) isrec = False if isinstance(data, (list, dict)): if isinstance(data, list): data_list = data columns_all = keys.get('columns', None) if columns_all is None: columns_all = keys.get('names', None) if columns_all is None: raise ValueError( "you must send columns with a list of arrays") else: columns_all = list(data.keys()) data_list = [data[n] for n in columns_all] colnums_all = [self._extract_colnum(c) for c in columns_all] names = [self.get_colname(c) for c in colnums_all] isobj = numpy.zeros(len(data_list), dtype=numpy.bool) for i in xrange(len(data_list)): isobj[i] = is_object(data_list[i]) else: if data.dtype.fields is None: raise ValueError("You are writing to a table, so I expected " "an array with fields as input. If you want " "to write a simple array, you should use " "write_column to write to a single column, " "or instead write to an image hdu") if data.shape is (): raise ValueError("cannot write data with shape ()") isrec = True names = data.dtype.names # only write object types (variable-length columns) after # writing the main table isobj = fields_are_object(data) data_list = [] colnums_all = [] for i, name in enumerate(names): colnum = self._extract_colnum(name) data_list.append(data[name]) colnums_all.append(colnum) if slow: for i, name in enumerate(names): if not isobj[i]: self.write_column(name, data_list[i], **keys) else: nonobj_colnums = [] nonobj_arrays = [] for i in xrange(len(data_list)): if not isobj[i]: nonobj_colnums.append(colnums_all[i]) if isrec: # this still leaves possibility of f-order sub-arrays.. colref = array_to_native(data_list[i], inplace=False) else: colref = array_to_native_c(data_list[i], inplace=False) if IS_PY3 and colref.dtype.char == 'U': # for python3, we convert unicode to ascii # this will error if the character is not in ascii colref = colref.astype('S', copy=False) nonobj_arrays.append(colref) for tcolnum, tdata in zip(nonobj_colnums, nonobj_arrays): self._verify_column_data(tcolnum, tdata) if len(nonobj_arrays) > 0: firstrow = keys.get('firstrow', 0) self._FITS.write_columns( self._ext+1, nonobj_colnums, nonobj_arrays, firstrow=firstrow+1, write_bitcols=self.write_bitcols) # writing the object arrays always occurs the same way # need to make sure this works for array fields for i, name in enumerate(names): if isobj[i]: self.write_var_column(name, data_list[i], **keys) self._update_info()
python
def write(self, data, **keys): slow = keys.get('slow', False) isrec = False if isinstance(data, (list, dict)): if isinstance(data, list): data_list = data columns_all = keys.get('columns', None) if columns_all is None: columns_all = keys.get('names', None) if columns_all is None: raise ValueError( "you must send columns with a list of arrays") else: columns_all = list(data.keys()) data_list = [data[n] for n in columns_all] colnums_all = [self._extract_colnum(c) for c in columns_all] names = [self.get_colname(c) for c in colnums_all] isobj = numpy.zeros(len(data_list), dtype=numpy.bool) for i in xrange(len(data_list)): isobj[i] = is_object(data_list[i]) else: if data.dtype.fields is None: raise ValueError("You are writing to a table, so I expected " "an array with fields as input. If you want " "to write a simple array, you should use " "write_column to write to a single column, " "or instead write to an image hdu") if data.shape is (): raise ValueError("cannot write data with shape ()") isrec = True names = data.dtype.names isobj = fields_are_object(data) data_list = [] colnums_all = [] for i, name in enumerate(names): colnum = self._extract_colnum(name) data_list.append(data[name]) colnums_all.append(colnum) if slow: for i, name in enumerate(names): if not isobj[i]: self.write_column(name, data_list[i], **keys) else: nonobj_colnums = [] nonobj_arrays = [] for i in xrange(len(data_list)): if not isobj[i]: nonobj_colnums.append(colnums_all[i]) if isrec: colref = array_to_native(data_list[i], inplace=False) else: colref = array_to_native_c(data_list[i], inplace=False) if IS_PY3 and colref.dtype.char == 'U': colref = colref.astype('S', copy=False) nonobj_arrays.append(colref) for tcolnum, tdata in zip(nonobj_colnums, nonobj_arrays): self._verify_column_data(tcolnum, tdata) if len(nonobj_arrays) > 0: firstrow = keys.get('firstrow', 0) self._FITS.write_columns( self._ext+1, nonobj_colnums, nonobj_arrays, firstrow=firstrow+1, write_bitcols=self.write_bitcols) for i, name in enumerate(names): if isobj[i]: self.write_var_column(name, data_list[i], **keys) self._update_info()
[ "def", "write", "(", "self", ",", "data", ",", "*", "*", "keys", ")", ":", "slow", "=", "keys", ".", "get", "(", "'slow'", ",", "False", ")", "isrec", "=", "False", "if", "isinstance", "(", "data", ",", "(", "list", ",", "dict", ")", ")", ":", "if", "isinstance", "(", "data", ",", "list", ")", ":", "data_list", "=", "data", "columns_all", "=", "keys", ".", "get", "(", "'columns'", ",", "None", ")", "if", "columns_all", "is", "None", ":", "columns_all", "=", "keys", ".", "get", "(", "'names'", ",", "None", ")", "if", "columns_all", "is", "None", ":", "raise", "ValueError", "(", "\"you must send columns with a list of arrays\"", ")", "else", ":", "columns_all", "=", "list", "(", "data", ".", "keys", "(", ")", ")", "data_list", "=", "[", "data", "[", "n", "]", "for", "n", "in", "columns_all", "]", "colnums_all", "=", "[", "self", ".", "_extract_colnum", "(", "c", ")", "for", "c", "in", "columns_all", "]", "names", "=", "[", "self", ".", "get_colname", "(", "c", ")", "for", "c", "in", "colnums_all", "]", "isobj", "=", "numpy", ".", "zeros", "(", "len", "(", "data_list", ")", ",", "dtype", "=", "numpy", ".", "bool", ")", "for", "i", "in", "xrange", "(", "len", "(", "data_list", ")", ")", ":", "isobj", "[", "i", "]", "=", "is_object", "(", "data_list", "[", "i", "]", ")", "else", ":", "if", "data", ".", "dtype", ".", "fields", "is", "None", ":", "raise", "ValueError", "(", "\"You are writing to a table, so I expected \"", "\"an array with fields as input. If you want \"", "\"to write a simple array, you should use \"", "\"write_column to write to a single column, \"", "\"or instead write to an image hdu\"", ")", "if", "data", ".", "shape", "is", "(", ")", ":", "raise", "ValueError", "(", "\"cannot write data with shape ()\"", ")", "isrec", "=", "True", "names", "=", "data", ".", "dtype", ".", "names", "# only write object types (variable-length columns) after", "# writing the main table", "isobj", "=", "fields_are_object", "(", "data", ")", "data_list", "=", "[", "]", "colnums_all", "=", "[", "]", "for", "i", ",", "name", "in", "enumerate", "(", "names", ")", ":", "colnum", "=", "self", ".", "_extract_colnum", "(", "name", ")", "data_list", ".", "append", "(", "data", "[", "name", "]", ")", "colnums_all", ".", "append", "(", "colnum", ")", "if", "slow", ":", "for", "i", ",", "name", "in", "enumerate", "(", "names", ")", ":", "if", "not", "isobj", "[", "i", "]", ":", "self", ".", "write_column", "(", "name", ",", "data_list", "[", "i", "]", ",", "*", "*", "keys", ")", "else", ":", "nonobj_colnums", "=", "[", "]", "nonobj_arrays", "=", "[", "]", "for", "i", "in", "xrange", "(", "len", "(", "data_list", ")", ")", ":", "if", "not", "isobj", "[", "i", "]", ":", "nonobj_colnums", ".", "append", "(", "colnums_all", "[", "i", "]", ")", "if", "isrec", ":", "# this still leaves possibility of f-order sub-arrays..", "colref", "=", "array_to_native", "(", "data_list", "[", "i", "]", ",", "inplace", "=", "False", ")", "else", ":", "colref", "=", "array_to_native_c", "(", "data_list", "[", "i", "]", ",", "inplace", "=", "False", ")", "if", "IS_PY3", "and", "colref", ".", "dtype", ".", "char", "==", "'U'", ":", "# for python3, we convert unicode to ascii", "# this will error if the character is not in ascii", "colref", "=", "colref", ".", "astype", "(", "'S'", ",", "copy", "=", "False", ")", "nonobj_arrays", ".", "append", "(", "colref", ")", "for", "tcolnum", ",", "tdata", "in", "zip", "(", "nonobj_colnums", ",", "nonobj_arrays", ")", ":", "self", ".", "_verify_column_data", "(", "tcolnum", ",", "tdata", ")", "if", "len", "(", "nonobj_arrays", ")", ">", "0", ":", "firstrow", "=", "keys", ".", "get", "(", "'firstrow'", ",", "0", ")", "self", ".", "_FITS", ".", "write_columns", "(", "self", ".", "_ext", "+", "1", ",", "nonobj_colnums", ",", "nonobj_arrays", ",", "firstrow", "=", "firstrow", "+", "1", ",", "write_bitcols", "=", "self", ".", "write_bitcols", ")", "# writing the object arrays always occurs the same way", "# need to make sure this works for array fields", "for", "i", ",", "name", "in", "enumerate", "(", "names", ")", ":", "if", "isobj", "[", "i", "]", ":", "self", ".", "write_var_column", "(", "name", ",", "data_list", "[", "i", "]", ",", "*", "*", "keys", ")", "self", ".", "_update_info", "(", ")" ]
Write data into this HDU parameters ---------- data: ndarray or list of ndarray A numerical python array. Should be an ordinary array for image HDUs, should have fields for tables. To write an ordinary array to a column in a table HDU, use write_column. If data already exists in this HDU, it will be overwritten. See the append(() method to append new rows to a table HDU. firstrow: integer, optional At which row you should begin writing to tables. Be sure you know what you are doing! For appending see the append() method. Default 0. columns: list, optional If data is a list of arrays, you must send columns as a list of names or column numbers You can also send names= names: list, optional same as columns=
[ "Write", "data", "into", "this", "HDU" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L128-L240
esheldon/fitsio
fitsio/hdu/table.py
TableHDU.write_column
def write_column(self, column, data, **keys): """ Write data to a column in this HDU This HDU must be a table HDU. parameters ---------- column: scalar string/integer The column in which to write. Can be the name or number (0 offset) column: ndarray Numerical python array to write. This should match the shape of the column. You are probably better using fits.write_table() to be sure. firstrow: integer, optional At which row you should begin writing. Be sure you know what you are doing! For appending see the append() method. Default 0. """ firstrow = keys.get('firstrow', 0) colnum = self._extract_colnum(column) # need it to be contiguous and native byte order. For now, make a # copy. but we may be able to avoid this with some care. if not data.flags['C_CONTIGUOUS']: # this always makes a copy data_send = numpy.ascontiguousarray(data) # this is a copy, we can make sure it is native # and modify in place if needed array_to_native(data_send, inplace=True) else: # we can avoid the copy with a try-finally block and # some logic data_send = array_to_native(data, inplace=False) if IS_PY3 and data_send.dtype.char == 'U': # for python3, we convert unicode to ascii # this will error if the character is not in ascii data_send = data_send.astype('S', copy=False) self._verify_column_data(colnum, data_send) self._FITS.write_column( self._ext+1, colnum+1, data_send, firstrow=firstrow+1, write_bitcols=self.write_bitcols) del data_send self._update_info()
python
def write_column(self, column, data, **keys): firstrow = keys.get('firstrow', 0) colnum = self._extract_colnum(column) if not data.flags['C_CONTIGUOUS']: data_send = numpy.ascontiguousarray(data) array_to_native(data_send, inplace=True) else: data_send = array_to_native(data, inplace=False) if IS_PY3 and data_send.dtype.char == 'U': data_send = data_send.astype('S', copy=False) self._verify_column_data(colnum, data_send) self._FITS.write_column( self._ext+1, colnum+1, data_send, firstrow=firstrow+1, write_bitcols=self.write_bitcols) del data_send self._update_info()
[ "def", "write_column", "(", "self", ",", "column", ",", "data", ",", "*", "*", "keys", ")", ":", "firstrow", "=", "keys", ".", "get", "(", "'firstrow'", ",", "0", ")", "colnum", "=", "self", ".", "_extract_colnum", "(", "column", ")", "# need it to be contiguous and native byte order. For now, make a", "# copy. but we may be able to avoid this with some care.", "if", "not", "data", ".", "flags", "[", "'C_CONTIGUOUS'", "]", ":", "# this always makes a copy", "data_send", "=", "numpy", ".", "ascontiguousarray", "(", "data", ")", "# this is a copy, we can make sure it is native", "# and modify in place if needed", "array_to_native", "(", "data_send", ",", "inplace", "=", "True", ")", "else", ":", "# we can avoid the copy with a try-finally block and", "# some logic", "data_send", "=", "array_to_native", "(", "data", ",", "inplace", "=", "False", ")", "if", "IS_PY3", "and", "data_send", ".", "dtype", ".", "char", "==", "'U'", ":", "# for python3, we convert unicode to ascii", "# this will error if the character is not in ascii", "data_send", "=", "data_send", ".", "astype", "(", "'S'", ",", "copy", "=", "False", ")", "self", ".", "_verify_column_data", "(", "colnum", ",", "data_send", ")", "self", ".", "_FITS", ".", "write_column", "(", "self", ".", "_ext", "+", "1", ",", "colnum", "+", "1", ",", "data_send", ",", "firstrow", "=", "firstrow", "+", "1", ",", "write_bitcols", "=", "self", ".", "write_bitcols", ")", "del", "data_send", "self", ".", "_update_info", "(", ")" ]
Write data to a column in this HDU This HDU must be a table HDU. parameters ---------- column: scalar string/integer The column in which to write. Can be the name or number (0 offset) column: ndarray Numerical python array to write. This should match the shape of the column. You are probably better using fits.write_table() to be sure. firstrow: integer, optional At which row you should begin writing. Be sure you know what you are doing! For appending see the append() method. Default 0.
[ "Write", "data", "to", "a", "column", "in", "this", "HDU" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L242-L290
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._verify_column_data
def _verify_column_data(self, colnum, data): """ verify the input data is of the correct type and shape """ this_dt = data.dtype.descr[0] if len(data.shape) > 2: this_shape = data.shape[1:] elif len(data.shape) == 2 and data.shape[1] > 1: this_shape = data.shape[1:] else: this_shape = () this_npy_type = this_dt[1][1:] npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum) info = self._info['colinfo'][colnum] if npy_type[0] in ['>', '<', '|']: npy_type = npy_type[1:] col_name = info['name'] col_tdim = info['tdim'] col_shape = _tdim2shape( col_tdim, col_name, is_string=(npy_type[0] == 'S')) if col_shape is None: if this_shape == (): this_shape = None if col_shape is not None and not isinstance(col_shape, tuple): col_shape = (col_shape,) """ print('column name:',col_name) print(data.shape) print('col tdim', info['tdim']) print('column dtype:',npy_type) print('input dtype:',this_npy_type) print('column shape:',col_shape) print('input shape:',this_shape) print() """ # this mismatch is OK if npy_type == 'i1' and this_npy_type == 'b1': this_npy_type = 'i1' if isinstance(self, AsciiTableHDU): # we don't enforce types exact for ascii if npy_type == 'i8' and this_npy_type in ['i2', 'i4']: this_npy_type = 'i8' elif npy_type == 'f8' and this_npy_type == 'f4': this_npy_type = 'f8' if this_npy_type != npy_type: raise ValueError( "bad input data for column '%s': " "expected '%s', got '%s'" % ( col_name, npy_type, this_npy_type)) if this_shape != col_shape: raise ValueError( "bad input shape for column '%s': " "expected '%s', got '%s'" % (col_name, col_shape, this_shape))
python
def _verify_column_data(self, colnum, data): this_dt = data.dtype.descr[0] if len(data.shape) > 2: this_shape = data.shape[1:] elif len(data.shape) == 2 and data.shape[1] > 1: this_shape = data.shape[1:] else: this_shape = () this_npy_type = this_dt[1][1:] npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum) info = self._info['colinfo'][colnum] if npy_type[0] in ['>', '<', '|']: npy_type = npy_type[1:] col_name = info['name'] col_tdim = info['tdim'] col_shape = _tdim2shape( col_tdim, col_name, is_string=(npy_type[0] == 'S')) if col_shape is None: if this_shape == (): this_shape = None if col_shape is not None and not isinstance(col_shape, tuple): col_shape = (col_shape,) if npy_type == 'i1' and this_npy_type == 'b1': this_npy_type = 'i1' if isinstance(self, AsciiTableHDU): if npy_type == 'i8' and this_npy_type in ['i2', 'i4']: this_npy_type = 'i8' elif npy_type == 'f8' and this_npy_type == 'f4': this_npy_type = 'f8' if this_npy_type != npy_type: raise ValueError( "bad input data for column '%s': " "expected '%s', got '%s'" % ( col_name, npy_type, this_npy_type)) if this_shape != col_shape: raise ValueError( "bad input shape for column '%s': " "expected '%s', got '%s'" % (col_name, col_shape, this_shape))
[ "def", "_verify_column_data", "(", "self", ",", "colnum", ",", "data", ")", ":", "this_dt", "=", "data", ".", "dtype", ".", "descr", "[", "0", "]", "if", "len", "(", "data", ".", "shape", ")", ">", "2", ":", "this_shape", "=", "data", ".", "shape", "[", "1", ":", "]", "elif", "len", "(", "data", ".", "shape", ")", "==", "2", "and", "data", ".", "shape", "[", "1", "]", ">", "1", ":", "this_shape", "=", "data", ".", "shape", "[", "1", ":", "]", "else", ":", "this_shape", "=", "(", ")", "this_npy_type", "=", "this_dt", "[", "1", "]", "[", "1", ":", "]", "npy_type", ",", "isvar", ",", "istbit", "=", "self", ".", "_get_tbl_numpy_dtype", "(", "colnum", ")", "info", "=", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "if", "npy_type", "[", "0", "]", "in", "[", "'>'", ",", "'<'", ",", "'|'", "]", ":", "npy_type", "=", "npy_type", "[", "1", ":", "]", "col_name", "=", "info", "[", "'name'", "]", "col_tdim", "=", "info", "[", "'tdim'", "]", "col_shape", "=", "_tdim2shape", "(", "col_tdim", ",", "col_name", ",", "is_string", "=", "(", "npy_type", "[", "0", "]", "==", "'S'", ")", ")", "if", "col_shape", "is", "None", ":", "if", "this_shape", "==", "(", ")", ":", "this_shape", "=", "None", "if", "col_shape", "is", "not", "None", "and", "not", "isinstance", "(", "col_shape", ",", "tuple", ")", ":", "col_shape", "=", "(", "col_shape", ",", ")", "\"\"\"\n print('column name:',col_name)\n print(data.shape)\n print('col tdim', info['tdim'])\n print('column dtype:',npy_type)\n print('input dtype:',this_npy_type)\n print('column shape:',col_shape)\n print('input shape:',this_shape)\n print()\n \"\"\"", "# this mismatch is OK", "if", "npy_type", "==", "'i1'", "and", "this_npy_type", "==", "'b1'", ":", "this_npy_type", "=", "'i1'", "if", "isinstance", "(", "self", ",", "AsciiTableHDU", ")", ":", "# we don't enforce types exact for ascii", "if", "npy_type", "==", "'i8'", "and", "this_npy_type", "in", "[", "'i2'", ",", "'i4'", "]", ":", "this_npy_type", "=", "'i8'", "elif", "npy_type", "==", "'f8'", "and", "this_npy_type", "==", "'f4'", ":", "this_npy_type", "=", "'f8'", "if", "this_npy_type", "!=", "npy_type", ":", "raise", "ValueError", "(", "\"bad input data for column '%s': \"", "\"expected '%s', got '%s'\"", "%", "(", "col_name", ",", "npy_type", ",", "this_npy_type", ")", ")", "if", "this_shape", "!=", "col_shape", ":", "raise", "ValueError", "(", "\"bad input shape for column '%s': \"", "\"expected '%s', got '%s'\"", "%", "(", "col_name", ",", "col_shape", ",", "this_shape", ")", ")" ]
verify the input data is of the correct type and shape
[ "verify", "the", "input", "data", "is", "of", "the", "correct", "type", "and", "shape" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L292-L356
esheldon/fitsio
fitsio/hdu/table.py
TableHDU.write_var_column
def write_var_column(self, column, data, firstrow=0, **keys): """ Write data to a variable-length column in this HDU This HDU must be a table HDU. parameters ---------- column: scalar string/integer The column in which to write. Can be the name or number (0 offset) column: ndarray Numerical python array to write. This must be an object array. firstrow: integer, optional At which row you should begin writing. Be sure you know what you are doing! For appending see the append() method. Default 0. """ if not is_object(data): raise ValueError("Only object fields can be written to " "variable-length arrays") colnum = self._extract_colnum(column) self._FITS.write_var_column(self._ext+1, colnum+1, data, firstrow=firstrow+1) self._update_info()
python
def write_var_column(self, column, data, firstrow=0, **keys): if not is_object(data): raise ValueError("Only object fields can be written to " "variable-length arrays") colnum = self._extract_colnum(column) self._FITS.write_var_column(self._ext+1, colnum+1, data, firstrow=firstrow+1) self._update_info()
[ "def", "write_var_column", "(", "self", ",", "column", ",", "data", ",", "firstrow", "=", "0", ",", "*", "*", "keys", ")", ":", "if", "not", "is_object", "(", "data", ")", ":", "raise", "ValueError", "(", "\"Only object fields can be written to \"", "\"variable-length arrays\"", ")", "colnum", "=", "self", ".", "_extract_colnum", "(", "column", ")", "self", ".", "_FITS", ".", "write_var_column", "(", "self", ".", "_ext", "+", "1", ",", "colnum", "+", "1", ",", "data", ",", "firstrow", "=", "firstrow", "+", "1", ")", "self", ".", "_update_info", "(", ")" ]
Write data to a variable-length column in this HDU This HDU must be a table HDU. parameters ---------- column: scalar string/integer The column in which to write. Can be the name or number (0 offset) column: ndarray Numerical python array to write. This must be an object array. firstrow: integer, optional At which row you should begin writing. Be sure you know what you are doing! For appending see the append() method. Default 0.
[ "Write", "data", "to", "a", "variable", "-", "length", "column", "in", "this", "HDU" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L358-L382
esheldon/fitsio
fitsio/hdu/table.py
TableHDU.insert_column
def insert_column(self, name, data, colnum=None): """ Insert a new column. parameters ---------- name: string The column name data: The data to write into the new column. colnum: int, optional The column number for the new column, zero-offset. Default is to add the new column after the existing ones. Notes ----- This method is used un-modified by ascii tables as well. """ if name in self._colnames: raise ValueError("column '%s' already exists" % name) if IS_PY3 and data.dtype.char == 'U': # fast dtype conversion using an empty array # we could hack at the actual text description, but using # the numpy API is probably safer # this also avoids doing a dtype conversion on every array # element which could b expensive descr = numpy.empty(1).astype(data.dtype).astype('S').dtype.descr else: descr = data.dtype.descr if len(descr) > 1: raise ValueError("you can only insert a single column, " "requested: %s" % descr) this_descr = descr[0] this_descr = [name, this_descr[1]] if len(data.shape) > 1: this_descr += [data.shape[1:]] this_descr = tuple(this_descr) name, fmt, dims = _npy2fits( this_descr, table_type=self._table_type_str) if dims is not None: dims = [dims] if colnum is None: new_colnum = len(self._info['colinfo']) + 1 else: new_colnum = colnum+1 self._FITS.insert_col(self._ext+1, new_colnum, name, fmt, tdim=dims) self._update_info() self.write_column(name, data)
python
def insert_column(self, name, data, colnum=None): if name in self._colnames: raise ValueError("column '%s' already exists" % name) if IS_PY3 and data.dtype.char == 'U': descr = numpy.empty(1).astype(data.dtype).astype('S').dtype.descr else: descr = data.dtype.descr if len(descr) > 1: raise ValueError("you can only insert a single column, " "requested: %s" % descr) this_descr = descr[0] this_descr = [name, this_descr[1]] if len(data.shape) > 1: this_descr += [data.shape[1:]] this_descr = tuple(this_descr) name, fmt, dims = _npy2fits( this_descr, table_type=self._table_type_str) if dims is not None: dims = [dims] if colnum is None: new_colnum = len(self._info['colinfo']) + 1 else: new_colnum = colnum+1 self._FITS.insert_col(self._ext+1, new_colnum, name, fmt, tdim=dims) self._update_info() self.write_column(name, data)
[ "def", "insert_column", "(", "self", ",", "name", ",", "data", ",", "colnum", "=", "None", ")", ":", "if", "name", "in", "self", ".", "_colnames", ":", "raise", "ValueError", "(", "\"column '%s' already exists\"", "%", "name", ")", "if", "IS_PY3", "and", "data", ".", "dtype", ".", "char", "==", "'U'", ":", "# fast dtype conversion using an empty array", "# we could hack at the actual text description, but using", "# the numpy API is probably safer", "# this also avoids doing a dtype conversion on every array", "# element which could b expensive", "descr", "=", "numpy", ".", "empty", "(", "1", ")", ".", "astype", "(", "data", ".", "dtype", ")", ".", "astype", "(", "'S'", ")", ".", "dtype", ".", "descr", "else", ":", "descr", "=", "data", ".", "dtype", ".", "descr", "if", "len", "(", "descr", ")", ">", "1", ":", "raise", "ValueError", "(", "\"you can only insert a single column, \"", "\"requested: %s\"", "%", "descr", ")", "this_descr", "=", "descr", "[", "0", "]", "this_descr", "=", "[", "name", ",", "this_descr", "[", "1", "]", "]", "if", "len", "(", "data", ".", "shape", ")", ">", "1", ":", "this_descr", "+=", "[", "data", ".", "shape", "[", "1", ":", "]", "]", "this_descr", "=", "tuple", "(", "this_descr", ")", "name", ",", "fmt", ",", "dims", "=", "_npy2fits", "(", "this_descr", ",", "table_type", "=", "self", ".", "_table_type_str", ")", "if", "dims", "is", "not", "None", ":", "dims", "=", "[", "dims", "]", "if", "colnum", "is", "None", ":", "new_colnum", "=", "len", "(", "self", ".", "_info", "[", "'colinfo'", "]", ")", "+", "1", "else", ":", "new_colnum", "=", "colnum", "+", "1", "self", ".", "_FITS", ".", "insert_col", "(", "self", ".", "_ext", "+", "1", ",", "new_colnum", ",", "name", ",", "fmt", ",", "tdim", "=", "dims", ")", "self", ".", "_update_info", "(", ")", "self", ".", "write_column", "(", "name", ",", "data", ")" ]
Insert a new column. parameters ---------- name: string The column name data: The data to write into the new column. colnum: int, optional The column number for the new column, zero-offset. Default is to add the new column after the existing ones. Notes ----- This method is used un-modified by ascii tables as well.
[ "Insert", "a", "new", "column", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L384-L439
esheldon/fitsio
fitsio/hdu/table.py
TableHDU.append
def append(self, data, **keys): """ Append new rows to a table HDU parameters ---------- data: ndarray or list of arrays A numerical python array with fields (recarray) or a list of arrays. Should have the same fields as the existing table. If only a subset of the table columns are present, the other columns are filled with zeros. columns: list, optional if a list of arrays is sent, also send the columns of names or column numbers """ firstrow = self._info['nrows'] keys['firstrow'] = firstrow self.write(data, **keys)
python
def append(self, data, **keys): firstrow = self._info['nrows'] keys['firstrow'] = firstrow self.write(data, **keys)
[ "def", "append", "(", "self", ",", "data", ",", "*", "*", "keys", ")", ":", "firstrow", "=", "self", ".", "_info", "[", "'nrows'", "]", "keys", "[", "'firstrow'", "]", "=", "firstrow", "self", ".", "write", "(", "data", ",", "*", "*", "keys", ")" ]
Append new rows to a table HDU parameters ---------- data: ndarray or list of arrays A numerical python array with fields (recarray) or a list of arrays. Should have the same fields as the existing table. If only a subset of the table columns are present, the other columns are filled with zeros. columns: list, optional if a list of arrays is sent, also send the columns of names or column numbers
[ "Append", "new", "rows", "to", "a", "table", "HDU" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L441-L462
esheldon/fitsio
fitsio/hdu/table.py
TableHDU.delete_rows
def delete_rows(self, rows): """ Delete rows from the table parameters ---------- rows: sequence or slice The exact rows to delete as a sequence, or a slice. examples -------- # delete a range of rows with fitsio.FITS(fname,'rw') as fits: fits['mytable'].delete_rows(slice(3,20)) # delete specific rows with fitsio.FITS(fname,'rw') as fits: rows2delete = [3,88,76] fits['mytable'].delete_rows(rows2delete) """ if rows is None: return # extract and convert to 1-offset for C routine if isinstance(rows, slice): rows = self._process_slice(rows) if rows.step is not None and rows.step != 1: rows = numpy.arange( rows.start+1, rows.stop+1, rows.step, ) else: # rows must be 1-offset rows = slice(rows.start+1, rows.stop+1) else: rows = self._extract_rows(rows) # rows must be 1-offset rows += 1 if isinstance(rows, slice): self._FITS.delete_row_range(self._ext+1, rows.start, rows.stop) else: if rows.size == 0: return self._FITS.delete_rows(self._ext+1, rows) self._update_info()
python
def delete_rows(self, rows): if rows is None: return if isinstance(rows, slice): rows = self._process_slice(rows) if rows.step is not None and rows.step != 1: rows = numpy.arange( rows.start+1, rows.stop+1, rows.step, ) else: rows = slice(rows.start+1, rows.stop+1) else: rows = self._extract_rows(rows) rows += 1 if isinstance(rows, slice): self._FITS.delete_row_range(self._ext+1, rows.start, rows.stop) else: if rows.size == 0: return self._FITS.delete_rows(self._ext+1, rows) self._update_info()
[ "def", "delete_rows", "(", "self", ",", "rows", ")", ":", "if", "rows", "is", "None", ":", "return", "# extract and convert to 1-offset for C routine", "if", "isinstance", "(", "rows", ",", "slice", ")", ":", "rows", "=", "self", ".", "_process_slice", "(", "rows", ")", "if", "rows", ".", "step", "is", "not", "None", "and", "rows", ".", "step", "!=", "1", ":", "rows", "=", "numpy", ".", "arange", "(", "rows", ".", "start", "+", "1", ",", "rows", ".", "stop", "+", "1", ",", "rows", ".", "step", ",", ")", "else", ":", "# rows must be 1-offset", "rows", "=", "slice", "(", "rows", ".", "start", "+", "1", ",", "rows", ".", "stop", "+", "1", ")", "else", ":", "rows", "=", "self", ".", "_extract_rows", "(", "rows", ")", "# rows must be 1-offset", "rows", "+=", "1", "if", "isinstance", "(", "rows", ",", "slice", ")", ":", "self", ".", "_FITS", ".", "delete_row_range", "(", "self", ".", "_ext", "+", "1", ",", "rows", ".", "start", ",", "rows", ".", "stop", ")", "else", ":", "if", "rows", ".", "size", "==", "0", ":", "return", "self", ".", "_FITS", ".", "delete_rows", "(", "self", ".", "_ext", "+", "1", ",", "rows", ")", "self", ".", "_update_info", "(", ")" ]
Delete rows from the table parameters ---------- rows: sequence or slice The exact rows to delete as a sequence, or a slice. examples -------- # delete a range of rows with fitsio.FITS(fname,'rw') as fits: fits['mytable'].delete_rows(slice(3,20)) # delete specific rows with fitsio.FITS(fname,'rw') as fits: rows2delete = [3,88,76] fits['mytable'].delete_rows(rows2delete)
[ "Delete", "rows", "from", "the", "table" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L464-L513
esheldon/fitsio
fitsio/hdu/table.py
TableHDU.resize
def resize(self, nrows, front=False): """ Resize the table to the given size, removing or adding rows as necessary. Note if expanding the table at the end, it is more efficient to use the append function than resizing and then writing. New added rows are zerod, except for 'i1', 'u2' and 'u4' data types which get -128,32768,2147483648 respectively parameters ---------- nrows: int new size of table front: bool, optional If True, add or remove rows from the front. Default is False """ nrows_current = self.get_nrows() if nrows == nrows_current: return if nrows < nrows_current: rowdiff = nrows_current - nrows if front: # delete from the front start = 0 stop = rowdiff else: # delete from the back start = nrows stop = nrows_current self.delete_rows(slice(start, stop)) else: rowdiff = nrows - nrows_current if front: # in this case zero is what we want, since the code inserts firstrow = 0 else: firstrow = nrows_current self._FITS.insert_rows(self._ext+1, firstrow, rowdiff) self._update_info()
python
def resize(self, nrows, front=False): nrows_current = self.get_nrows() if nrows == nrows_current: return if nrows < nrows_current: rowdiff = nrows_current - nrows if front: start = 0 stop = rowdiff else: start = nrows stop = nrows_current self.delete_rows(slice(start, stop)) else: rowdiff = nrows - nrows_current if front: firstrow = 0 else: firstrow = nrows_current self._FITS.insert_rows(self._ext+1, firstrow, rowdiff) self._update_info()
[ "def", "resize", "(", "self", ",", "nrows", ",", "front", "=", "False", ")", ":", "nrows_current", "=", "self", ".", "get_nrows", "(", ")", "if", "nrows", "==", "nrows_current", ":", "return", "if", "nrows", "<", "nrows_current", ":", "rowdiff", "=", "nrows_current", "-", "nrows", "if", "front", ":", "# delete from the front", "start", "=", "0", "stop", "=", "rowdiff", "else", ":", "# delete from the back", "start", "=", "nrows", "stop", "=", "nrows_current", "self", ".", "delete_rows", "(", "slice", "(", "start", ",", "stop", ")", ")", "else", ":", "rowdiff", "=", "nrows", "-", "nrows_current", "if", "front", ":", "# in this case zero is what we want, since the code inserts", "firstrow", "=", "0", "else", ":", "firstrow", "=", "nrows_current", "self", ".", "_FITS", ".", "insert_rows", "(", "self", ".", "_ext", "+", "1", ",", "firstrow", ",", "rowdiff", ")", "self", ".", "_update_info", "(", ")" ]
Resize the table to the given size, removing or adding rows as necessary. Note if expanding the table at the end, it is more efficient to use the append function than resizing and then writing. New added rows are zerod, except for 'i1', 'u2' and 'u4' data types which get -128,32768,2147483648 respectively parameters ---------- nrows: int new size of table front: bool, optional If True, add or remove rows from the front. Default is False
[ "Resize", "the", "table", "to", "the", "given", "size", "removing", "or", "adding", "rows", "as", "necessary", ".", "Note", "if", "expanding", "the", "table", "at", "the", "end", "it", "is", "more", "efficient", "to", "use", "the", "append", "function", "than", "resizing", "and", "then", "writing", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L515-L560
esheldon/fitsio
fitsio/hdu/table.py
TableHDU.read
def read(self, **keys): """ read data from this HDU By default, all data are read. send columns= and rows= to select subsets of the data. Table data are read into a recarray; use read_column() to get a single column as an ordinary array. You can alternatively use slice notation fits=fitsio.FITS(filename) fits[ext][:] fits[ext][2:5] fits[ext][200:235:2] fits[ext][rows] fits[ext][cols][rows] parameters ---------- columns: optional An optional set of columns to read from table HDUs. Default is to read all. Can be string or number. If a sequence, a recarray is always returned. If a scalar, an ordinary array is returned. rows: optional An optional list of rows to read from table HDUS. Default is to read all. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. """ columns = keys.get('columns', None) rows = keys.get('rows', None) if columns is not None: if 'columns' in keys: del keys['columns'] data = self.read_columns(columns, **keys) elif rows is not None: if 'rows' in keys: del keys['rows'] data = self.read_rows(rows, **keys) else: data = self._read_all(**keys) return data
python
def read(self, **keys): columns = keys.get('columns', None) rows = keys.get('rows', None) if columns is not None: if 'columns' in keys: del keys['columns'] data = self.read_columns(columns, **keys) elif rows is not None: if 'rows' in keys: del keys['rows'] data = self.read_rows(rows, **keys) else: data = self._read_all(**keys) return data
[ "def", "read", "(", "self", ",", "*", "*", "keys", ")", ":", "columns", "=", "keys", ".", "get", "(", "'columns'", ",", "None", ")", "rows", "=", "keys", ".", "get", "(", "'rows'", ",", "None", ")", "if", "columns", "is", "not", "None", ":", "if", "'columns'", "in", "keys", ":", "del", "keys", "[", "'columns'", "]", "data", "=", "self", ".", "read_columns", "(", "columns", ",", "*", "*", "keys", ")", "elif", "rows", "is", "not", "None", ":", "if", "'rows'", "in", "keys", ":", "del", "keys", "[", "'rows'", "]", "data", "=", "self", ".", "read_rows", "(", "rows", ",", "*", "*", "keys", ")", "else", ":", "data", "=", "self", ".", "_read_all", "(", "*", "*", "keys", ")", "return", "data" ]
read data from this HDU By default, all data are read. send columns= and rows= to select subsets of the data. Table data are read into a recarray; use read_column() to get a single column as an ordinary array. You can alternatively use slice notation fits=fitsio.FITS(filename) fits[ext][:] fits[ext][2:5] fits[ext][200:235:2] fits[ext][rows] fits[ext][cols][rows] parameters ---------- columns: optional An optional set of columns to read from table HDUs. Default is to read all. Can be string or number. If a sequence, a recarray is always returned. If a scalar, an ordinary array is returned. rows: optional An optional list of rows to read from table HDUS. Default is to read all. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details.
[ "read", "data", "from", "this", "HDU" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L562-L606
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._read_all
def _read_all(self, **keys): """ Read all data in the HDU. parameters ---------- vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. lower: bool, optional If True, force all columns names to lower case in output. Will over ride the lower= keyword from construction. upper: bool, optional If True, force all columns names to upper case in output. Will over ride the lower= keyword from construction. """ dtype, offsets, isvar = self.get_rec_dtype(**keys) w, = numpy.where(isvar == True) # noqa has_tbit = self._check_tbit() if w.size > 0: vstorage = keys.get('vstorage', self._vstorage) colnums = self._extract_colnums() rows = None array = self._read_rec_with_var(colnums, rows, dtype, offsets, isvar, vstorage) elif has_tbit: # drop down to read_columns since we can't stuff into a # contiguous array colnums = self._extract_colnums() array = self.read_columns(colnums, **keys) else: firstrow = 1 # noqa - not used? nrows = self._info['nrows'] array = numpy.zeros(nrows, dtype=dtype) self._FITS.read_as_rec(self._ext+1, 1, nrows, array) array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array) for colnum, name in enumerate(array.dtype.names): self._rescale_and_convert_field_inplace( array, name, self._info['colinfo'][colnum]['tscale'], self._info['colinfo'][colnum]['tzero']) lower = keys.get('lower', False) upper = keys.get('upper', False) if self.lower or lower: _names_to_lower_if_recarray(array) elif self.upper or upper: _names_to_upper_if_recarray(array) self._maybe_trim_strings(array, **keys) return array
python
def _read_all(self, **keys): dtype, offsets, isvar = self.get_rec_dtype(**keys) w, = numpy.where(isvar == True) has_tbit = self._check_tbit() if w.size > 0: vstorage = keys.get('vstorage', self._vstorage) colnums = self._extract_colnums() rows = None array = self._read_rec_with_var(colnums, rows, dtype, offsets, isvar, vstorage) elif has_tbit: colnums = self._extract_colnums() array = self.read_columns(colnums, **keys) else: firstrow = 1 nrows = self._info['nrows'] array = numpy.zeros(nrows, dtype=dtype) self._FITS.read_as_rec(self._ext+1, 1, nrows, array) array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array) for colnum, name in enumerate(array.dtype.names): self._rescale_and_convert_field_inplace( array, name, self._info['colinfo'][colnum]['tscale'], self._info['colinfo'][colnum]['tzero']) lower = keys.get('lower', False) upper = keys.get('upper', False) if self.lower or lower: _names_to_lower_if_recarray(array) elif self.upper or upper: _names_to_upper_if_recarray(array) self._maybe_trim_strings(array, **keys) return array
[ "def", "_read_all", "(", "self", ",", "*", "*", "keys", ")", ":", "dtype", ",", "offsets", ",", "isvar", "=", "self", ".", "get_rec_dtype", "(", "*", "*", "keys", ")", "w", ",", "=", "numpy", ".", "where", "(", "isvar", "==", "True", ")", "# noqa", "has_tbit", "=", "self", ".", "_check_tbit", "(", ")", "if", "w", ".", "size", ">", "0", ":", "vstorage", "=", "keys", ".", "get", "(", "'vstorage'", ",", "self", ".", "_vstorage", ")", "colnums", "=", "self", ".", "_extract_colnums", "(", ")", "rows", "=", "None", "array", "=", "self", ".", "_read_rec_with_var", "(", "colnums", ",", "rows", ",", "dtype", ",", "offsets", ",", "isvar", ",", "vstorage", ")", "elif", "has_tbit", ":", "# drop down to read_columns since we can't stuff into a", "# contiguous array", "colnums", "=", "self", ".", "_extract_colnums", "(", ")", "array", "=", "self", ".", "read_columns", "(", "colnums", ",", "*", "*", "keys", ")", "else", ":", "firstrow", "=", "1", "# noqa - not used?", "nrows", "=", "self", ".", "_info", "[", "'nrows'", "]", "array", "=", "numpy", ".", "zeros", "(", "nrows", ",", "dtype", "=", "dtype", ")", "self", ".", "_FITS", ".", "read_as_rec", "(", "self", ".", "_ext", "+", "1", ",", "1", ",", "nrows", ",", "array", ")", "array", "=", "self", ".", "_maybe_decode_fits_ascii_strings_to_unicode_py3", "(", "array", ")", "for", "colnum", ",", "name", "in", "enumerate", "(", "array", ".", "dtype", ".", "names", ")", ":", "self", ".", "_rescale_and_convert_field_inplace", "(", "array", ",", "name", ",", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'tscale'", "]", ",", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'tzero'", "]", ")", "lower", "=", "keys", ".", "get", "(", "'lower'", ",", "False", ")", "upper", "=", "keys", ".", "get", "(", "'upper'", ",", "False", ")", "if", "self", ".", "lower", "or", "lower", ":", "_names_to_lower_if_recarray", "(", "array", ")", "elif", "self", ".", "upper", "or", "upper", ":", "_names_to_upper_if_recarray", "(", "array", ")", "self", ".", "_maybe_trim_strings", "(", "array", ",", "*", "*", "keys", ")", "return", "array" ]
Read all data in the HDU. parameters ---------- vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. lower: bool, optional If True, force all columns names to lower case in output. Will over ride the lower= keyword from construction. upper: bool, optional If True, force all columns names to upper case in output. Will over ride the lower= keyword from construction.
[ "Read", "all", "data", "in", "the", "HDU", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L608-L665
esheldon/fitsio
fitsio/hdu/table.py
TableHDU.read_column
def read_column(self, col, **keys): """ Read the specified column Alternatively, you can use slice notation fits=fitsio.FITS(filename) fits[ext][colname][:] fits[ext][colname][2:5] fits[ext][colname][200:235:2] fits[ext][colname][rows] Note, if reading multiple columns, it is more efficient to use read(columns=) or slice notation with a list of column names. parameters ---------- col: string/int, required The column name or number. rows: optional An optional set of row numbers to read. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. """ res = self.read_columns([col], **keys) colname = res.dtype.names[0] data = res[colname] self._maybe_trim_strings(data, **keys) return data
python
def read_column(self, col, **keys): res = self.read_columns([col], **keys) colname = res.dtype.names[0] data = res[colname] self._maybe_trim_strings(data, **keys) return data
[ "def", "read_column", "(", "self", ",", "col", ",", "*", "*", "keys", ")", ":", "res", "=", "self", ".", "read_columns", "(", "[", "col", "]", ",", "*", "*", "keys", ")", "colname", "=", "res", ".", "dtype", ".", "names", "[", "0", "]", "data", "=", "res", "[", "colname", "]", "self", ".", "_maybe_trim_strings", "(", "data", ",", "*", "*", "keys", ")", "return", "data" ]
Read the specified column Alternatively, you can use slice notation fits=fitsio.FITS(filename) fits[ext][colname][:] fits[ext][colname][2:5] fits[ext][colname][200:235:2] fits[ext][colname][rows] Note, if reading multiple columns, it is more efficient to use read(columns=) or slice notation with a list of column names. parameters ---------- col: string/int, required The column name or number. rows: optional An optional set of row numbers to read. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details.
[ "Read", "the", "specified", "column" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L667-L697
esheldon/fitsio
fitsio/hdu/table.py
TableHDU.read_rows
def read_rows(self, rows, **keys): """ Read the specified rows. parameters ---------- rows: list,array A list or array of row indices. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. lower: bool, optional If True, force all columns names to lower case in output. Will over ride the lower= keyword from construction. upper: bool, optional If True, force all columns names to upper case in output. Will over ride the lower= keyword from construction. """ if rows is None: # we actually want all rows! return self._read_all() if self._info['hdutype'] == ASCII_TBL: keys['rows'] = rows return self.read(**keys) rows = self._extract_rows(rows) dtype, offsets, isvar = self.get_rec_dtype(**keys) w, = numpy.where(isvar == True) # noqa if w.size > 0: vstorage = keys.get('vstorage', self._vstorage) colnums = self._extract_colnums() return self._read_rec_with_var( colnums, rows, dtype, offsets, isvar, vstorage) else: array = numpy.zeros(rows.size, dtype=dtype) self._FITS.read_rows_as_rec(self._ext+1, array, rows) array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array) for colnum, name in enumerate(array.dtype.names): self._rescale_and_convert_field_inplace( array, name, self._info['colinfo'][colnum]['tscale'], self._info['colinfo'][colnum]['tzero']) lower = keys.get('lower', False) upper = keys.get('upper', False) if self.lower or lower: _names_to_lower_if_recarray(array) elif self.upper or upper: _names_to_upper_if_recarray(array) self._maybe_trim_strings(array, **keys) return array
python
def read_rows(self, rows, **keys): if rows is None: return self._read_all() if self._info['hdutype'] == ASCII_TBL: keys['rows'] = rows return self.read(**keys) rows = self._extract_rows(rows) dtype, offsets, isvar = self.get_rec_dtype(**keys) w, = numpy.where(isvar == True) if w.size > 0: vstorage = keys.get('vstorage', self._vstorage) colnums = self._extract_colnums() return self._read_rec_with_var( colnums, rows, dtype, offsets, isvar, vstorage) else: array = numpy.zeros(rows.size, dtype=dtype) self._FITS.read_rows_as_rec(self._ext+1, array, rows) array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array) for colnum, name in enumerate(array.dtype.names): self._rescale_and_convert_field_inplace( array, name, self._info['colinfo'][colnum]['tscale'], self._info['colinfo'][colnum]['tzero']) lower = keys.get('lower', False) upper = keys.get('upper', False) if self.lower or lower: _names_to_lower_if_recarray(array) elif self.upper or upper: _names_to_upper_if_recarray(array) self._maybe_trim_strings(array, **keys) return array
[ "def", "read_rows", "(", "self", ",", "rows", ",", "*", "*", "keys", ")", ":", "if", "rows", "is", "None", ":", "# we actually want all rows!", "return", "self", ".", "_read_all", "(", ")", "if", "self", ".", "_info", "[", "'hdutype'", "]", "==", "ASCII_TBL", ":", "keys", "[", "'rows'", "]", "=", "rows", "return", "self", ".", "read", "(", "*", "*", "keys", ")", "rows", "=", "self", ".", "_extract_rows", "(", "rows", ")", "dtype", ",", "offsets", ",", "isvar", "=", "self", ".", "get_rec_dtype", "(", "*", "*", "keys", ")", "w", ",", "=", "numpy", ".", "where", "(", "isvar", "==", "True", ")", "# noqa", "if", "w", ".", "size", ">", "0", ":", "vstorage", "=", "keys", ".", "get", "(", "'vstorage'", ",", "self", ".", "_vstorage", ")", "colnums", "=", "self", ".", "_extract_colnums", "(", ")", "return", "self", ".", "_read_rec_with_var", "(", "colnums", ",", "rows", ",", "dtype", ",", "offsets", ",", "isvar", ",", "vstorage", ")", "else", ":", "array", "=", "numpy", ".", "zeros", "(", "rows", ".", "size", ",", "dtype", "=", "dtype", ")", "self", ".", "_FITS", ".", "read_rows_as_rec", "(", "self", ".", "_ext", "+", "1", ",", "array", ",", "rows", ")", "array", "=", "self", ".", "_maybe_decode_fits_ascii_strings_to_unicode_py3", "(", "array", ")", "for", "colnum", ",", "name", "in", "enumerate", "(", "array", ".", "dtype", ".", "names", ")", ":", "self", ".", "_rescale_and_convert_field_inplace", "(", "array", ",", "name", ",", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'tscale'", "]", ",", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'tzero'", "]", ")", "lower", "=", "keys", ".", "get", "(", "'lower'", ",", "False", ")", "upper", "=", "keys", ".", "get", "(", "'upper'", ",", "False", ")", "if", "self", ".", "lower", "or", "lower", ":", "_names_to_lower_if_recarray", "(", "array", ")", "elif", "self", ".", "upper", "or", "upper", ":", "_names_to_upper_if_recarray", "(", "array", ")", "self", ".", "_maybe_trim_strings", "(", "array", ",", "*", "*", "keys", ")", "return", "array" ]
Read the specified rows. parameters ---------- rows: list,array A list or array of row indices. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. lower: bool, optional If True, force all columns names to lower case in output. Will over ride the lower= keyword from construction. upper: bool, optional If True, force all columns names to upper case in output. Will over ride the lower= keyword from construction.
[ "Read", "the", "specified", "rows", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L699-L756
esheldon/fitsio
fitsio/hdu/table.py
TableHDU.read_columns
def read_columns(self, columns, **keys): """ read a subset of columns from this binary table HDU By default, all rows are read. Send rows= to select subsets of the data. Table data are read into a recarray for multiple columns, plain array for a single column. parameters ---------- columns: list/array An optional set of columns to read from table HDUs. Can be string or number. If a sequence, a recarray is always returned. If a scalar, an ordinary array is returned. rows: list/array, optional An optional list of rows to read from table HDUS. Default is to read all. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. lower: bool, optional If True, force all columns names to lower case in output. Will over ride the lower= keyword from construction. upper: bool, optional If True, force all columns names to upper case in output. Will over ride the lower= keyword from construction. """ if self._info['hdutype'] == ASCII_TBL: keys['columns'] = columns return self.read(**keys) rows = keys.get('rows', None) # if columns is None, returns all. Guaranteed to be unique and sorted colnums = self._extract_colnums(columns) if isinstance(colnums, int): # scalar sent, don't read as a recarray return self.read_column(columns, **keys) # if rows is None still returns None, and is correctly interpreted # by the reader to mean all rows = self._extract_rows(rows) # this is the full dtype for all columns dtype, offsets, isvar = self.get_rec_dtype(colnums=colnums, **keys) w, = numpy.where(isvar == True) # noqa if w.size > 0: vstorage = keys.get('vstorage', self._vstorage) array = self._read_rec_with_var( colnums, rows, dtype, offsets, isvar, vstorage) else: if rows is None: nrows = self._info['nrows'] else: nrows = rows.size array = numpy.zeros(nrows, dtype=dtype) colnumsp = colnums[:].copy() colnumsp[:] += 1 self._FITS.read_columns_as_rec(self._ext+1, colnumsp, array, rows) array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array) for i in xrange(colnums.size): colnum = int(colnums[i]) name = array.dtype.names[i] self._rescale_and_convert_field_inplace( array, name, self._info['colinfo'][colnum]['tscale'], self._info['colinfo'][colnum]['tzero']) if (self._check_tbit(colnums=colnums)): array = self._fix_tbit_dtype(array, colnums) lower = keys.get('lower', False) upper = keys.get('upper', False) if self.lower or lower: _names_to_lower_if_recarray(array) elif self.upper or upper: _names_to_upper_if_recarray(array) self._maybe_trim_strings(array, **keys) return array
python
def read_columns(self, columns, **keys): if self._info['hdutype'] == ASCII_TBL: keys['columns'] = columns return self.read(**keys) rows = keys.get('rows', None) colnums = self._extract_colnums(columns) if isinstance(colnums, int): return self.read_column(columns, **keys) rows = self._extract_rows(rows) dtype, offsets, isvar = self.get_rec_dtype(colnums=colnums, **keys) w, = numpy.where(isvar == True) if w.size > 0: vstorage = keys.get('vstorage', self._vstorage) array = self._read_rec_with_var( colnums, rows, dtype, offsets, isvar, vstorage) else: if rows is None: nrows = self._info['nrows'] else: nrows = rows.size array = numpy.zeros(nrows, dtype=dtype) colnumsp = colnums[:].copy() colnumsp[:] += 1 self._FITS.read_columns_as_rec(self._ext+1, colnumsp, array, rows) array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array) for i in xrange(colnums.size): colnum = int(colnums[i]) name = array.dtype.names[i] self._rescale_and_convert_field_inplace( array, name, self._info['colinfo'][colnum]['tscale'], self._info['colinfo'][colnum]['tzero']) if (self._check_tbit(colnums=colnums)): array = self._fix_tbit_dtype(array, colnums) lower = keys.get('lower', False) upper = keys.get('upper', False) if self.lower or lower: _names_to_lower_if_recarray(array) elif self.upper or upper: _names_to_upper_if_recarray(array) self._maybe_trim_strings(array, **keys) return array
[ "def", "read_columns", "(", "self", ",", "columns", ",", "*", "*", "keys", ")", ":", "if", "self", ".", "_info", "[", "'hdutype'", "]", "==", "ASCII_TBL", ":", "keys", "[", "'columns'", "]", "=", "columns", "return", "self", ".", "read", "(", "*", "*", "keys", ")", "rows", "=", "keys", ".", "get", "(", "'rows'", ",", "None", ")", "# if columns is None, returns all. Guaranteed to be unique and sorted", "colnums", "=", "self", ".", "_extract_colnums", "(", "columns", ")", "if", "isinstance", "(", "colnums", ",", "int", ")", ":", "# scalar sent, don't read as a recarray", "return", "self", ".", "read_column", "(", "columns", ",", "*", "*", "keys", ")", "# if rows is None still returns None, and is correctly interpreted", "# by the reader to mean all", "rows", "=", "self", ".", "_extract_rows", "(", "rows", ")", "# this is the full dtype for all columns", "dtype", ",", "offsets", ",", "isvar", "=", "self", ".", "get_rec_dtype", "(", "colnums", "=", "colnums", ",", "*", "*", "keys", ")", "w", ",", "=", "numpy", ".", "where", "(", "isvar", "==", "True", ")", "# noqa", "if", "w", ".", "size", ">", "0", ":", "vstorage", "=", "keys", ".", "get", "(", "'vstorage'", ",", "self", ".", "_vstorage", ")", "array", "=", "self", ".", "_read_rec_with_var", "(", "colnums", ",", "rows", ",", "dtype", ",", "offsets", ",", "isvar", ",", "vstorage", ")", "else", ":", "if", "rows", "is", "None", ":", "nrows", "=", "self", ".", "_info", "[", "'nrows'", "]", "else", ":", "nrows", "=", "rows", ".", "size", "array", "=", "numpy", ".", "zeros", "(", "nrows", ",", "dtype", "=", "dtype", ")", "colnumsp", "=", "colnums", "[", ":", "]", ".", "copy", "(", ")", "colnumsp", "[", ":", "]", "+=", "1", "self", ".", "_FITS", ".", "read_columns_as_rec", "(", "self", ".", "_ext", "+", "1", ",", "colnumsp", ",", "array", ",", "rows", ")", "array", "=", "self", ".", "_maybe_decode_fits_ascii_strings_to_unicode_py3", "(", "array", ")", "for", "i", "in", "xrange", "(", "colnums", ".", "size", ")", ":", "colnum", "=", "int", "(", "colnums", "[", "i", "]", ")", "name", "=", "array", ".", "dtype", ".", "names", "[", "i", "]", "self", ".", "_rescale_and_convert_field_inplace", "(", "array", ",", "name", ",", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'tscale'", "]", ",", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'tzero'", "]", ")", "if", "(", "self", ".", "_check_tbit", "(", "colnums", "=", "colnums", ")", ")", ":", "array", "=", "self", ".", "_fix_tbit_dtype", "(", "array", ",", "colnums", ")", "lower", "=", "keys", ".", "get", "(", "'lower'", ",", "False", ")", "upper", "=", "keys", ".", "get", "(", "'upper'", ",", "False", ")", "if", "self", ".", "lower", "or", "lower", ":", "_names_to_lower_if_recarray", "(", "array", ")", "elif", "self", ".", "upper", "or", "upper", ":", "_names_to_upper_if_recarray", "(", "array", ")", "self", ".", "_maybe_trim_strings", "(", "array", ",", "*", "*", "keys", ")", "return", "array" ]
read a subset of columns from this binary table HDU By default, all rows are read. Send rows= to select subsets of the data. Table data are read into a recarray for multiple columns, plain array for a single column. parameters ---------- columns: list/array An optional set of columns to read from table HDUs. Can be string or number. If a sequence, a recarray is always returned. If a scalar, an ordinary array is returned. rows: list/array, optional An optional list of rows to read from table HDUS. Default is to read all. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. lower: bool, optional If True, force all columns names to lower case in output. Will over ride the lower= keyword from construction. upper: bool, optional If True, force all columns names to upper case in output. Will over ride the lower= keyword from construction.
[ "read", "a", "subset", "of", "columns", "from", "this", "binary", "table", "HDU" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L758-L845
esheldon/fitsio
fitsio/hdu/table.py
TableHDU.read_slice
def read_slice(self, firstrow, lastrow, step=1, **keys): """ Read the specified row slice from a table. Read all rows between firstrow and lastrow (non-inclusive, as per python slice notation). Note you must use slice notation for images, e.g. f[ext][20:30, 40:50] parameters ---------- firstrow: integer The first row to read lastrow: integer The last row to read, non-inclusive. This follows the python list slice convention that one does not include the last element. step: integer, optional Step between rows, default 1. e.g., if step is 2, skip every other row. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. lower: bool, optional If True, force all columns names to lower case in output. Will over ride the lower= keyword from construction. upper: bool, optional If True, force all columns names to upper case in output. Will over ride the lower= keyword from construction. """ if self._info['hdutype'] == ASCII_TBL: rows = numpy.arange(firstrow, lastrow, step, dtype='i8') keys['rows'] = rows return self.read_ascii(**keys) step = keys.get('step', 1) if self._info['hdutype'] == IMAGE_HDU: raise ValueError("slices currently only supported for tables") maxrow = self._info['nrows'] if firstrow < 0 or lastrow > maxrow: raise ValueError( "slice must specify a sub-range of [%d,%d]" % (0, maxrow)) dtype, offsets, isvar = self.get_rec_dtype(**keys) w, = numpy.where(isvar == True) # noqa if w.size > 0: vstorage = keys.get('vstorage', self._vstorage) rows = numpy.arange(firstrow, lastrow, step, dtype='i8') colnums = self._extract_colnums() array = self._read_rec_with_var( colnums, rows, dtype, offsets, isvar, vstorage) else: if step != 1: rows = numpy.arange(firstrow, lastrow, step, dtype='i8') array = self.read(rows=rows) else: # no +1 because lastrow is non-inclusive nrows = lastrow - firstrow array = numpy.zeros(nrows, dtype=dtype) # only first needs to be +1. This is becuase the c code is # inclusive self._FITS.read_as_rec(self._ext+1, firstrow+1, lastrow, array) array = self._maybe_decode_fits_ascii_strings_to_unicode_py3( array) for colnum, name in enumerate(array.dtype.names): self._rescale_and_convert_field_inplace( array, name, self._info['colinfo'][colnum]['tscale'], self._info['colinfo'][colnum]['tzero']) lower = keys.get('lower', False) upper = keys.get('upper', False) if self.lower or lower: _names_to_lower_if_recarray(array) elif self.upper or upper: _names_to_upper_if_recarray(array) self._maybe_trim_strings(array, **keys) return array
python
def read_slice(self, firstrow, lastrow, step=1, **keys): if self._info['hdutype'] == ASCII_TBL: rows = numpy.arange(firstrow, lastrow, step, dtype='i8') keys['rows'] = rows return self.read_ascii(**keys) step = keys.get('step', 1) if self._info['hdutype'] == IMAGE_HDU: raise ValueError("slices currently only supported for tables") maxrow = self._info['nrows'] if firstrow < 0 or lastrow > maxrow: raise ValueError( "slice must specify a sub-range of [%d,%d]" % (0, maxrow)) dtype, offsets, isvar = self.get_rec_dtype(**keys) w, = numpy.where(isvar == True) if w.size > 0: vstorage = keys.get('vstorage', self._vstorage) rows = numpy.arange(firstrow, lastrow, step, dtype='i8') colnums = self._extract_colnums() array = self._read_rec_with_var( colnums, rows, dtype, offsets, isvar, vstorage) else: if step != 1: rows = numpy.arange(firstrow, lastrow, step, dtype='i8') array = self.read(rows=rows) else: nrows = lastrow - firstrow array = numpy.zeros(nrows, dtype=dtype) self._FITS.read_as_rec(self._ext+1, firstrow+1, lastrow, array) array = self._maybe_decode_fits_ascii_strings_to_unicode_py3( array) for colnum, name in enumerate(array.dtype.names): self._rescale_and_convert_field_inplace( array, name, self._info['colinfo'][colnum]['tscale'], self._info['colinfo'][colnum]['tzero']) lower = keys.get('lower', False) upper = keys.get('upper', False) if self.lower or lower: _names_to_lower_if_recarray(array) elif self.upper or upper: _names_to_upper_if_recarray(array) self._maybe_trim_strings(array, **keys) return array
[ "def", "read_slice", "(", "self", ",", "firstrow", ",", "lastrow", ",", "step", "=", "1", ",", "*", "*", "keys", ")", ":", "if", "self", ".", "_info", "[", "'hdutype'", "]", "==", "ASCII_TBL", ":", "rows", "=", "numpy", ".", "arange", "(", "firstrow", ",", "lastrow", ",", "step", ",", "dtype", "=", "'i8'", ")", "keys", "[", "'rows'", "]", "=", "rows", "return", "self", ".", "read_ascii", "(", "*", "*", "keys", ")", "step", "=", "keys", ".", "get", "(", "'step'", ",", "1", ")", "if", "self", ".", "_info", "[", "'hdutype'", "]", "==", "IMAGE_HDU", ":", "raise", "ValueError", "(", "\"slices currently only supported for tables\"", ")", "maxrow", "=", "self", ".", "_info", "[", "'nrows'", "]", "if", "firstrow", "<", "0", "or", "lastrow", ">", "maxrow", ":", "raise", "ValueError", "(", "\"slice must specify a sub-range of [%d,%d]\"", "%", "(", "0", ",", "maxrow", ")", ")", "dtype", ",", "offsets", ",", "isvar", "=", "self", ".", "get_rec_dtype", "(", "*", "*", "keys", ")", "w", ",", "=", "numpy", ".", "where", "(", "isvar", "==", "True", ")", "# noqa", "if", "w", ".", "size", ">", "0", ":", "vstorage", "=", "keys", ".", "get", "(", "'vstorage'", ",", "self", ".", "_vstorage", ")", "rows", "=", "numpy", ".", "arange", "(", "firstrow", ",", "lastrow", ",", "step", ",", "dtype", "=", "'i8'", ")", "colnums", "=", "self", ".", "_extract_colnums", "(", ")", "array", "=", "self", ".", "_read_rec_with_var", "(", "colnums", ",", "rows", ",", "dtype", ",", "offsets", ",", "isvar", ",", "vstorage", ")", "else", ":", "if", "step", "!=", "1", ":", "rows", "=", "numpy", ".", "arange", "(", "firstrow", ",", "lastrow", ",", "step", ",", "dtype", "=", "'i8'", ")", "array", "=", "self", ".", "read", "(", "rows", "=", "rows", ")", "else", ":", "# no +1 because lastrow is non-inclusive", "nrows", "=", "lastrow", "-", "firstrow", "array", "=", "numpy", ".", "zeros", "(", "nrows", ",", "dtype", "=", "dtype", ")", "# only first needs to be +1. This is becuase the c code is", "# inclusive", "self", ".", "_FITS", ".", "read_as_rec", "(", "self", ".", "_ext", "+", "1", ",", "firstrow", "+", "1", ",", "lastrow", ",", "array", ")", "array", "=", "self", ".", "_maybe_decode_fits_ascii_strings_to_unicode_py3", "(", "array", ")", "for", "colnum", ",", "name", "in", "enumerate", "(", "array", ".", "dtype", ".", "names", ")", ":", "self", ".", "_rescale_and_convert_field_inplace", "(", "array", ",", "name", ",", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'tscale'", "]", ",", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'tzero'", "]", ")", "lower", "=", "keys", ".", "get", "(", "'lower'", ",", "False", ")", "upper", "=", "keys", ".", "get", "(", "'upper'", ",", "False", ")", "if", "self", ".", "lower", "or", "lower", ":", "_names_to_lower_if_recarray", "(", "array", ")", "elif", "self", ".", "upper", "or", "upper", ":", "_names_to_upper_if_recarray", "(", "array", ")", "self", ".", "_maybe_trim_strings", "(", "array", ",", "*", "*", "keys", ")", "return", "array" ]
Read the specified row slice from a table. Read all rows between firstrow and lastrow (non-inclusive, as per python slice notation). Note you must use slice notation for images, e.g. f[ext][20:30, 40:50] parameters ---------- firstrow: integer The first row to read lastrow: integer The last row to read, non-inclusive. This follows the python list slice convention that one does not include the last element. step: integer, optional Step between rows, default 1. e.g., if step is 2, skip every other row. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. lower: bool, optional If True, force all columns names to lower case in output. Will over ride the lower= keyword from construction. upper: bool, optional If True, force all columns names to upper case in output. Will over ride the lower= keyword from construction.
[ "Read", "the", "specified", "row", "slice", "from", "a", "table", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L847-L931
esheldon/fitsio
fitsio/hdu/table.py
TableHDU.get_rec_dtype
def get_rec_dtype(self, **keys): """ Get the dtype for the specified columns parameters ---------- colnums: integer array The column numbers, 0 offset vstorage: string, optional See docs in read_columns """ colnums = keys.get('colnums', None) vstorage = keys.get('vstorage', self._vstorage) if colnums is None: colnums = self._extract_colnums() descr = [] isvararray = numpy.zeros(len(colnums), dtype=numpy.bool) for i, colnum in enumerate(colnums): dt, isvar = self.get_rec_column_descr(colnum, vstorage) descr.append(dt) isvararray[i] = isvar dtype = numpy.dtype(descr) offsets = numpy.zeros(len(colnums), dtype='i8') for i, n in enumerate(dtype.names): offsets[i] = dtype.fields[n][1] return dtype, offsets, isvararray
python
def get_rec_dtype(self, **keys): colnums = keys.get('colnums', None) vstorage = keys.get('vstorage', self._vstorage) if colnums is None: colnums = self._extract_colnums() descr = [] isvararray = numpy.zeros(len(colnums), dtype=numpy.bool) for i, colnum in enumerate(colnums): dt, isvar = self.get_rec_column_descr(colnum, vstorage) descr.append(dt) isvararray[i] = isvar dtype = numpy.dtype(descr) offsets = numpy.zeros(len(colnums), dtype='i8') for i, n in enumerate(dtype.names): offsets[i] = dtype.fields[n][1] return dtype, offsets, isvararray
[ "def", "get_rec_dtype", "(", "self", ",", "*", "*", "keys", ")", ":", "colnums", "=", "keys", ".", "get", "(", "'colnums'", ",", "None", ")", "vstorage", "=", "keys", ".", "get", "(", "'vstorage'", ",", "self", ".", "_vstorage", ")", "if", "colnums", "is", "None", ":", "colnums", "=", "self", ".", "_extract_colnums", "(", ")", "descr", "=", "[", "]", "isvararray", "=", "numpy", ".", "zeros", "(", "len", "(", "colnums", ")", ",", "dtype", "=", "numpy", ".", "bool", ")", "for", "i", ",", "colnum", "in", "enumerate", "(", "colnums", ")", ":", "dt", ",", "isvar", "=", "self", ".", "get_rec_column_descr", "(", "colnum", ",", "vstorage", ")", "descr", ".", "append", "(", "dt", ")", "isvararray", "[", "i", "]", "=", "isvar", "dtype", "=", "numpy", ".", "dtype", "(", "descr", ")", "offsets", "=", "numpy", ".", "zeros", "(", "len", "(", "colnums", ")", ",", "dtype", "=", "'i8'", ")", "for", "i", ",", "n", "in", "enumerate", "(", "dtype", ".", "names", ")", ":", "offsets", "[", "i", "]", "=", "dtype", ".", "fields", "[", "n", "]", "[", "1", "]", "return", "dtype", ",", "offsets", ",", "isvararray" ]
Get the dtype for the specified columns parameters ---------- colnums: integer array The column numbers, 0 offset vstorage: string, optional See docs in read_columns
[ "Get", "the", "dtype", "for", "the", "specified", "columns" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L933-L961
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._check_tbit
def _check_tbit(self, **keys): """ Check if one of the columns is a TBIT column parameters ---------- colnums: integer array, optional """ colnums = keys.get('colnums', None) if colnums is None: colnums = self._extract_colnums() has_tbit = False for i, colnum in enumerate(colnums): npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum) if (istbit): has_tbit = True break return has_tbit
python
def _check_tbit(self, **keys): colnums = keys.get('colnums', None) if colnums is None: colnums = self._extract_colnums() has_tbit = False for i, colnum in enumerate(colnums): npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum) if (istbit): has_tbit = True break return has_tbit
[ "def", "_check_tbit", "(", "self", ",", "*", "*", "keys", ")", ":", "colnums", "=", "keys", ".", "get", "(", "'colnums'", ",", "None", ")", "if", "colnums", "is", "None", ":", "colnums", "=", "self", ".", "_extract_colnums", "(", ")", "has_tbit", "=", "False", "for", "i", ",", "colnum", "in", "enumerate", "(", "colnums", ")", ":", "npy_type", ",", "isvar", ",", "istbit", "=", "self", ".", "_get_tbl_numpy_dtype", "(", "colnum", ")", "if", "(", "istbit", ")", ":", "has_tbit", "=", "True", "break", "return", "has_tbit" ]
Check if one of the columns is a TBIT column parameters ---------- colnums: integer array, optional
[ "Check", "if", "one", "of", "the", "columns", "is", "a", "TBIT", "column" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L963-L983
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._fix_tbit_dtype
def _fix_tbit_dtype(self, array, colnums): """ If necessary, patch up the TBIT to convert to bool array parameters ---------- array: record array colnums: column numbers for lookup """ descr = array.dtype.descr for i, colnum in enumerate(colnums): npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum) if (istbit): coldescr = list(descr[i]) coldescr[1] = '?' descr[i] = tuple(coldescr) return array.view(descr)
python
def _fix_tbit_dtype(self, array, colnums): descr = array.dtype.descr for i, colnum in enumerate(colnums): npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum) if (istbit): coldescr = list(descr[i]) coldescr[1] = '?' descr[i] = tuple(coldescr) return array.view(descr)
[ "def", "_fix_tbit_dtype", "(", "self", ",", "array", ",", "colnums", ")", ":", "descr", "=", "array", ".", "dtype", ".", "descr", "for", "i", ",", "colnum", "in", "enumerate", "(", "colnums", ")", ":", "npy_type", ",", "isvar", ",", "istbit", "=", "self", ".", "_get_tbl_numpy_dtype", "(", "colnum", ")", "if", "(", "istbit", ")", ":", "coldescr", "=", "list", "(", "descr", "[", "i", "]", ")", "coldescr", "[", "1", "]", "=", "'?'", "descr", "[", "i", "]", "=", "tuple", "(", "coldescr", ")", "return", "array", ".", "view", "(", "descr", ")" ]
If necessary, patch up the TBIT to convert to bool array parameters ---------- array: record array colnums: column numbers for lookup
[ "If", "necessary", "patch", "up", "the", "TBIT", "to", "convert", "to", "bool", "array" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L985-L1002
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._get_simple_dtype_and_shape
def _get_simple_dtype_and_shape(self, colnum, rows=None): """ When reading a single column, we want the basic data type and the shape of the array. for scalar columns, shape is just nrows, otherwise it is (nrows, dim1, dim2) Note if rows= is sent and only a single row is requested, the shape will be (dim2,dim2) """ # basic datatype npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum) info = self._info['colinfo'][colnum] name = info['name'] if rows is None: nrows = self._info['nrows'] else: nrows = rows.size shape = None tdim = info['tdim'] shape = _tdim2shape(tdim, name, is_string=(npy_type[0] == 'S')) if shape is not None: if nrows > 1: if not isinstance(shape, tuple): # vector shape = (nrows, shape) else: # multi-dimensional shape = tuple([nrows] + list(shape)) else: # scalar shape = nrows return npy_type, shape
python
def _get_simple_dtype_and_shape(self, colnum, rows=None): npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum) info = self._info['colinfo'][colnum] name = info['name'] if rows is None: nrows = self._info['nrows'] else: nrows = rows.size shape = None tdim = info['tdim'] shape = _tdim2shape(tdim, name, is_string=(npy_type[0] == 'S')) if shape is not None: if nrows > 1: if not isinstance(shape, tuple): shape = (nrows, shape) else: shape = tuple([nrows] + list(shape)) else: shape = nrows return npy_type, shape
[ "def", "_get_simple_dtype_and_shape", "(", "self", ",", "colnum", ",", "rows", "=", "None", ")", ":", "# basic datatype", "npy_type", ",", "isvar", ",", "istbit", "=", "self", ".", "_get_tbl_numpy_dtype", "(", "colnum", ")", "info", "=", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "name", "=", "info", "[", "'name'", "]", "if", "rows", "is", "None", ":", "nrows", "=", "self", ".", "_info", "[", "'nrows'", "]", "else", ":", "nrows", "=", "rows", ".", "size", "shape", "=", "None", "tdim", "=", "info", "[", "'tdim'", "]", "shape", "=", "_tdim2shape", "(", "tdim", ",", "name", ",", "is_string", "=", "(", "npy_type", "[", "0", "]", "==", "'S'", ")", ")", "if", "shape", "is", "not", "None", ":", "if", "nrows", ">", "1", ":", "if", "not", "isinstance", "(", "shape", ",", "tuple", ")", ":", "# vector", "shape", "=", "(", "nrows", ",", "shape", ")", "else", ":", "# multi-dimensional", "shape", "=", "tuple", "(", "[", "nrows", "]", "+", "list", "(", "shape", ")", ")", "else", ":", "# scalar", "shape", "=", "nrows", "return", "npy_type", ",", "shape" ]
When reading a single column, we want the basic data type and the shape of the array. for scalar columns, shape is just nrows, otherwise it is (nrows, dim1, dim2) Note if rows= is sent and only a single row is requested, the shape will be (dim2,dim2)
[ "When", "reading", "a", "single", "column", "we", "want", "the", "basic", "data", "type", "and", "the", "shape", "of", "the", "array", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1004-L1041
esheldon/fitsio
fitsio/hdu/table.py
TableHDU.get_rec_column_descr
def get_rec_column_descr(self, colnum, vstorage): """ Get a descriptor entry for the specified column. parameters ---------- colnum: integer The column number, 0 offset vstorage: string See docs in read_columns """ npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum) name = self._info['colinfo'][colnum]['name'] if isvar: if vstorage == 'object': descr = (name, 'O') else: tform = self._info['colinfo'][colnum]['tform'] max_size = _extract_vararray_max(tform) if max_size <= 0: name = self._info['colinfo'][colnum]['name'] mess = 'Will read as an object field' if max_size < 0: mess = "Column '%s': No maximum size: '%s'. %s" mess = mess % (name, tform, mess) warnings.warn(mess, FITSRuntimeWarning) else: mess = "Column '%s': Max size is zero: '%s'. %s" mess = mess % (name, tform, mess) warnings.warn(mess, FITSRuntimeWarning) # we are forced to read this as an object array return self.get_rec_column_descr(colnum, 'object') if npy_type[0] == 'S': # variable length string columns cannot # themselves be arrays I don't think npy_type = 'S%d' % max_size descr = (name, npy_type) elif npy_type[0] == 'U': # variable length string columns cannot # themselves be arrays I don't think npy_type = 'U%d' % max_size descr = (name, npy_type) else: descr = (name, npy_type, max_size) else: tdim = self._info['colinfo'][colnum]['tdim'] shape = _tdim2shape( tdim, name, is_string=(npy_type[0] == 'S' or npy_type[0] == 'U')) if shape is not None: descr = (name, npy_type, shape) else: descr = (name, npy_type) return descr, isvar
python
def get_rec_column_descr(self, colnum, vstorage): npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum) name = self._info['colinfo'][colnum]['name'] if isvar: if vstorage == 'object': descr = (name, 'O') else: tform = self._info['colinfo'][colnum]['tform'] max_size = _extract_vararray_max(tform) if max_size <= 0: name = self._info['colinfo'][colnum]['name'] mess = 'Will read as an object field' if max_size < 0: mess = "Column '%s': No maximum size: '%s'. %s" mess = mess % (name, tform, mess) warnings.warn(mess, FITSRuntimeWarning) else: mess = "Column '%s': Max size is zero: '%s'. %s" mess = mess % (name, tform, mess) warnings.warn(mess, FITSRuntimeWarning) return self.get_rec_column_descr(colnum, 'object') if npy_type[0] == 'S': npy_type = 'S%d' % max_size descr = (name, npy_type) elif npy_type[0] == 'U': npy_type = 'U%d' % max_size descr = (name, npy_type) else: descr = (name, npy_type, max_size) else: tdim = self._info['colinfo'][colnum]['tdim'] shape = _tdim2shape( tdim, name, is_string=(npy_type[0] == 'S' or npy_type[0] == 'U')) if shape is not None: descr = (name, npy_type, shape) else: descr = (name, npy_type) return descr, isvar
[ "def", "get_rec_column_descr", "(", "self", ",", "colnum", ",", "vstorage", ")", ":", "npy_type", ",", "isvar", ",", "istbit", "=", "self", ".", "_get_tbl_numpy_dtype", "(", "colnum", ")", "name", "=", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'name'", "]", "if", "isvar", ":", "if", "vstorage", "==", "'object'", ":", "descr", "=", "(", "name", ",", "'O'", ")", "else", ":", "tform", "=", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'tform'", "]", "max_size", "=", "_extract_vararray_max", "(", "tform", ")", "if", "max_size", "<=", "0", ":", "name", "=", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'name'", "]", "mess", "=", "'Will read as an object field'", "if", "max_size", "<", "0", ":", "mess", "=", "\"Column '%s': No maximum size: '%s'. %s\"", "mess", "=", "mess", "%", "(", "name", ",", "tform", ",", "mess", ")", "warnings", ".", "warn", "(", "mess", ",", "FITSRuntimeWarning", ")", "else", ":", "mess", "=", "\"Column '%s': Max size is zero: '%s'. %s\"", "mess", "=", "mess", "%", "(", "name", ",", "tform", ",", "mess", ")", "warnings", ".", "warn", "(", "mess", ",", "FITSRuntimeWarning", ")", "# we are forced to read this as an object array", "return", "self", ".", "get_rec_column_descr", "(", "colnum", ",", "'object'", ")", "if", "npy_type", "[", "0", "]", "==", "'S'", ":", "# variable length string columns cannot", "# themselves be arrays I don't think", "npy_type", "=", "'S%d'", "%", "max_size", "descr", "=", "(", "name", ",", "npy_type", ")", "elif", "npy_type", "[", "0", "]", "==", "'U'", ":", "# variable length string columns cannot", "# themselves be arrays I don't think", "npy_type", "=", "'U%d'", "%", "max_size", "descr", "=", "(", "name", ",", "npy_type", ")", "else", ":", "descr", "=", "(", "name", ",", "npy_type", ",", "max_size", ")", "else", ":", "tdim", "=", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'tdim'", "]", "shape", "=", "_tdim2shape", "(", "tdim", ",", "name", ",", "is_string", "=", "(", "npy_type", "[", "0", "]", "==", "'S'", "or", "npy_type", "[", "0", "]", "==", "'U'", ")", ")", "if", "shape", "is", "not", "None", ":", "descr", "=", "(", "name", ",", "npy_type", ",", "shape", ")", "else", ":", "descr", "=", "(", "name", ",", "npy_type", ")", "return", "descr", ",", "isvar" ]
Get a descriptor entry for the specified column. parameters ---------- colnum: integer The column number, 0 offset vstorage: string See docs in read_columns
[ "Get", "a", "descriptor", "entry", "for", "the", "specified", "column", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1043-L1100
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._read_rec_with_var
def _read_rec_with_var( self, colnums, rows, dtype, offsets, isvar, vstorage): """ Read columns from a table into a rec array, including variable length columns. This is special because, for efficiency, it involves reading from the main table as normal but skipping the columns in the array that are variable. Then reading the variable length columns, with accounting for strides appropriately. row and column numbers should be checked before calling this function """ colnumsp = colnums+1 if rows is None: nrows = self._info['nrows'] else: nrows = rows.size array = numpy.zeros(nrows, dtype=dtype) # read from the main table first wnotvar, = numpy.where(isvar == False) # noqa if wnotvar.size > 0: # this will be contiguous (not true for slices) thesecol = colnumsp[wnotvar] theseoff = offsets[wnotvar] self._FITS.read_columns_as_rec_byoffset(self._ext+1, thesecol, theseoff, array, rows) for i in xrange(thesecol.size): name = array.dtype.names[wnotvar[i]] colnum = thesecol[i]-1 self._rescale_and_convert_field_inplace( array, name, self._info['colinfo'][colnum]['tscale'], self._info['colinfo'][colnum]['tzero']) array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array) # now read the variable length arrays we may be able to speed this up # by storing directly instead of reading first into a list wvar, = numpy.where(isvar == True) # noqa if wvar.size > 0: # this will be contiguous (not true for slices) thesecol = colnumsp[wvar] for i in xrange(thesecol.size): colnump = thesecol[i] name = array.dtype.names[wvar[i]] dlist = self._FITS.read_var_column_as_list( self._ext+1, colnump, rows) if (isinstance(dlist[0], str) or (IS_PY3 and isinstance(dlist[0], bytes))): is_string = True else: is_string = False if array[name].dtype.descr[0][1][1] == 'O': # storing in object array # get references to each, no copy made for irow, item in enumerate(dlist): if IS_PY3 and isinstance(item, bytes): item = item.decode('ascii') array[name][irow] = item else: for irow, item in enumerate(dlist): if IS_PY3 and isinstance(item, bytes): item = item.decode('ascii') if is_string: array[name][irow] = item else: ncopy = len(item) if IS_PY3: ts = array[name].dtype.descr[0][1][1] if ts != 'S' and ts != 'U': array[name][irow][0:ncopy] = item[:] else: array[name][irow] = item else: array[name][irow][0:ncopy] = item[:] return array
python
def _read_rec_with_var( self, colnums, rows, dtype, offsets, isvar, vstorage): colnumsp = colnums+1 if rows is None: nrows = self._info['nrows'] else: nrows = rows.size array = numpy.zeros(nrows, dtype=dtype) wnotvar, = numpy.where(isvar == False) if wnotvar.size > 0: thesecol = colnumsp[wnotvar] theseoff = offsets[wnotvar] self._FITS.read_columns_as_rec_byoffset(self._ext+1, thesecol, theseoff, array, rows) for i in xrange(thesecol.size): name = array.dtype.names[wnotvar[i]] colnum = thesecol[i]-1 self._rescale_and_convert_field_inplace( array, name, self._info['colinfo'][colnum]['tscale'], self._info['colinfo'][colnum]['tzero']) array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array) wvar, = numpy.where(isvar == True) if wvar.size > 0: thesecol = colnumsp[wvar] for i in xrange(thesecol.size): colnump = thesecol[i] name = array.dtype.names[wvar[i]] dlist = self._FITS.read_var_column_as_list( self._ext+1, colnump, rows) if (isinstance(dlist[0], str) or (IS_PY3 and isinstance(dlist[0], bytes))): is_string = True else: is_string = False if array[name].dtype.descr[0][1][1] == 'O': for irow, item in enumerate(dlist): if IS_PY3 and isinstance(item, bytes): item = item.decode('ascii') array[name][irow] = item else: for irow, item in enumerate(dlist): if IS_PY3 and isinstance(item, bytes): item = item.decode('ascii') if is_string: array[name][irow] = item else: ncopy = len(item) if IS_PY3: ts = array[name].dtype.descr[0][1][1] if ts != 'S' and ts != 'U': array[name][irow][0:ncopy] = item[:] else: array[name][irow] = item else: array[name][irow][0:ncopy] = item[:] return array
[ "def", "_read_rec_with_var", "(", "self", ",", "colnums", ",", "rows", ",", "dtype", ",", "offsets", ",", "isvar", ",", "vstorage", ")", ":", "colnumsp", "=", "colnums", "+", "1", "if", "rows", "is", "None", ":", "nrows", "=", "self", ".", "_info", "[", "'nrows'", "]", "else", ":", "nrows", "=", "rows", ".", "size", "array", "=", "numpy", ".", "zeros", "(", "nrows", ",", "dtype", "=", "dtype", ")", "# read from the main table first", "wnotvar", ",", "=", "numpy", ".", "where", "(", "isvar", "==", "False", ")", "# noqa", "if", "wnotvar", ".", "size", ">", "0", ":", "# this will be contiguous (not true for slices)", "thesecol", "=", "colnumsp", "[", "wnotvar", "]", "theseoff", "=", "offsets", "[", "wnotvar", "]", "self", ".", "_FITS", ".", "read_columns_as_rec_byoffset", "(", "self", ".", "_ext", "+", "1", ",", "thesecol", ",", "theseoff", ",", "array", ",", "rows", ")", "for", "i", "in", "xrange", "(", "thesecol", ".", "size", ")", ":", "name", "=", "array", ".", "dtype", ".", "names", "[", "wnotvar", "[", "i", "]", "]", "colnum", "=", "thesecol", "[", "i", "]", "-", "1", "self", ".", "_rescale_and_convert_field_inplace", "(", "array", ",", "name", ",", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'tscale'", "]", ",", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'tzero'", "]", ")", "array", "=", "self", ".", "_maybe_decode_fits_ascii_strings_to_unicode_py3", "(", "array", ")", "# now read the variable length arrays we may be able to speed this up", "# by storing directly instead of reading first into a list", "wvar", ",", "=", "numpy", ".", "where", "(", "isvar", "==", "True", ")", "# noqa", "if", "wvar", ".", "size", ">", "0", ":", "# this will be contiguous (not true for slices)", "thesecol", "=", "colnumsp", "[", "wvar", "]", "for", "i", "in", "xrange", "(", "thesecol", ".", "size", ")", ":", "colnump", "=", "thesecol", "[", "i", "]", "name", "=", "array", ".", "dtype", ".", "names", "[", "wvar", "[", "i", "]", "]", "dlist", "=", "self", ".", "_FITS", ".", "read_var_column_as_list", "(", "self", ".", "_ext", "+", "1", ",", "colnump", ",", "rows", ")", "if", "(", "isinstance", "(", "dlist", "[", "0", "]", ",", "str", ")", "or", "(", "IS_PY3", "and", "isinstance", "(", "dlist", "[", "0", "]", ",", "bytes", ")", ")", ")", ":", "is_string", "=", "True", "else", ":", "is_string", "=", "False", "if", "array", "[", "name", "]", ".", "dtype", ".", "descr", "[", "0", "]", "[", "1", "]", "[", "1", "]", "==", "'O'", ":", "# storing in object array", "# get references to each, no copy made", "for", "irow", ",", "item", "in", "enumerate", "(", "dlist", ")", ":", "if", "IS_PY3", "and", "isinstance", "(", "item", ",", "bytes", ")", ":", "item", "=", "item", ".", "decode", "(", "'ascii'", ")", "array", "[", "name", "]", "[", "irow", "]", "=", "item", "else", ":", "for", "irow", ",", "item", "in", "enumerate", "(", "dlist", ")", ":", "if", "IS_PY3", "and", "isinstance", "(", "item", ",", "bytes", ")", ":", "item", "=", "item", ".", "decode", "(", "'ascii'", ")", "if", "is_string", ":", "array", "[", "name", "]", "[", "irow", "]", "=", "item", "else", ":", "ncopy", "=", "len", "(", "item", ")", "if", "IS_PY3", ":", "ts", "=", "array", "[", "name", "]", ".", "dtype", ".", "descr", "[", "0", "]", "[", "1", "]", "[", "1", "]", "if", "ts", "!=", "'S'", "and", "ts", "!=", "'U'", ":", "array", "[", "name", "]", "[", "irow", "]", "[", "0", ":", "ncopy", "]", "=", "item", "[", ":", "]", "else", ":", "array", "[", "name", "]", "[", "irow", "]", "=", "item", "else", ":", "array", "[", "name", "]", "[", "irow", "]", "[", "0", ":", "ncopy", "]", "=", "item", "[", ":", "]", "return", "array" ]
Read columns from a table into a rec array, including variable length columns. This is special because, for efficiency, it involves reading from the main table as normal but skipping the columns in the array that are variable. Then reading the variable length columns, with accounting for strides appropriately. row and column numbers should be checked before calling this function
[ "Read", "columns", "from", "a", "table", "into", "a", "rec", "array", "including", "variable", "length", "columns", ".", "This", "is", "special", "because", "for", "efficiency", "it", "involves", "reading", "from", "the", "main", "table", "as", "normal", "but", "skipping", "the", "columns", "in", "the", "array", "that", "are", "variable", ".", "Then", "reading", "the", "variable", "length", "columns", "with", "accounting", "for", "strides", "appropriately", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1102-L1188
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._extract_rows
def _extract_rows(self, rows): """ Extract an array of rows from an input scalar or sequence """ if rows is not None: rows = numpy.array(rows, ndmin=1, copy=False, dtype='i8') # returns unique, sorted rows = numpy.unique(rows) maxrow = self._info['nrows']-1 if rows[0] < 0 or rows[-1] > maxrow: raise ValueError("rows must be in [%d,%d]" % (0, maxrow)) return rows
python
def _extract_rows(self, rows): if rows is not None: rows = numpy.array(rows, ndmin=1, copy=False, dtype='i8') rows = numpy.unique(rows) maxrow = self._info['nrows']-1 if rows[0] < 0 or rows[-1] > maxrow: raise ValueError("rows must be in [%d,%d]" % (0, maxrow)) return rows
[ "def", "_extract_rows", "(", "self", ",", "rows", ")", ":", "if", "rows", "is", "not", "None", ":", "rows", "=", "numpy", ".", "array", "(", "rows", ",", "ndmin", "=", "1", ",", "copy", "=", "False", ",", "dtype", "=", "'i8'", ")", "# returns unique, sorted", "rows", "=", "numpy", ".", "unique", "(", "rows", ")", "maxrow", "=", "self", ".", "_info", "[", "'nrows'", "]", "-", "1", "if", "rows", "[", "0", "]", "<", "0", "or", "rows", "[", "-", "1", "]", ">", "maxrow", ":", "raise", "ValueError", "(", "\"rows must be in [%d,%d]\"", "%", "(", "0", ",", "maxrow", ")", ")", "return", "rows" ]
Extract an array of rows from an input scalar or sequence
[ "Extract", "an", "array", "of", "rows", "from", "an", "input", "scalar", "or", "sequence" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1190-L1202
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._process_slice
def _process_slice(self, arg): """ process the input slice for use calling the C code """ start = arg.start stop = arg.stop step = arg.step nrows = self._info['nrows'] if step is None: step = 1 if start is None: start = 0 if stop is None: stop = nrows if start < 0: start = nrows + start if start < 0: raise IndexError("Index out of bounds") if stop < 0: stop = nrows + start + 1 if stop < start: # will return an empty struct stop = start if stop > nrows: stop = nrows return slice(start, stop, step)
python
def _process_slice(self, arg): start = arg.start stop = arg.stop step = arg.step nrows = self._info['nrows'] if step is None: step = 1 if start is None: start = 0 if stop is None: stop = nrows if start < 0: start = nrows + start if start < 0: raise IndexError("Index out of bounds") if stop < 0: stop = nrows + start + 1 if stop < start: stop = start if stop > nrows: stop = nrows return slice(start, stop, step)
[ "def", "_process_slice", "(", "self", ",", "arg", ")", ":", "start", "=", "arg", ".", "start", "stop", "=", "arg", ".", "stop", "step", "=", "arg", ".", "step", "nrows", "=", "self", ".", "_info", "[", "'nrows'", "]", "if", "step", "is", "None", ":", "step", "=", "1", "if", "start", "is", "None", ":", "start", "=", "0", "if", "stop", "is", "None", ":", "stop", "=", "nrows", "if", "start", "<", "0", ":", "start", "=", "nrows", "+", "start", "if", "start", "<", "0", ":", "raise", "IndexError", "(", "\"Index out of bounds\"", ")", "if", "stop", "<", "0", ":", "stop", "=", "nrows", "+", "start", "+", "1", "if", "stop", "<", "start", ":", "# will return an empty struct", "stop", "=", "start", "if", "stop", ">", "nrows", ":", "stop", "=", "nrows", "return", "slice", "(", "start", ",", "stop", ",", "step", ")" ]
process the input slice for use calling the C code
[ "process", "the", "input", "slice", "for", "use", "calling", "the", "C", "code" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1204-L1234
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._slice2rows
def _slice2rows(self, start, stop, step=None): """ Convert a slice to an explicit array of rows """ nrows = self._info['nrows'] if start is None: start = 0 if stop is None: stop = nrows if step is None: step = 1 tstart = self._fix_range(start) tstop = self._fix_range(stop) if tstart == 0 and tstop == nrows: # this is faster: if all fields are also requested, then a # single fread will be done return None if stop < start: raise ValueError("start is greater than stop in slice") return numpy.arange(tstart, tstop, step, dtype='i8')
python
def _slice2rows(self, start, stop, step=None): nrows = self._info['nrows'] if start is None: start = 0 if stop is None: stop = nrows if step is None: step = 1 tstart = self._fix_range(start) tstop = self._fix_range(stop) if tstart == 0 and tstop == nrows: return None if stop < start: raise ValueError("start is greater than stop in slice") return numpy.arange(tstart, tstop, step, dtype='i8')
[ "def", "_slice2rows", "(", "self", ",", "start", ",", "stop", ",", "step", "=", "None", ")", ":", "nrows", "=", "self", ".", "_info", "[", "'nrows'", "]", "if", "start", "is", "None", ":", "start", "=", "0", "if", "stop", "is", "None", ":", "stop", "=", "nrows", "if", "step", "is", "None", ":", "step", "=", "1", "tstart", "=", "self", ".", "_fix_range", "(", "start", ")", "tstop", "=", "self", ".", "_fix_range", "(", "stop", ")", "if", "tstart", "==", "0", "and", "tstop", "==", "nrows", ":", "# this is faster: if all fields are also requested, then a", "# single fread will be done", "return", "None", "if", "stop", "<", "start", ":", "raise", "ValueError", "(", "\"start is greater than stop in slice\"", ")", "return", "numpy", ".", "arange", "(", "tstart", ",", "tstop", ",", "step", ",", "dtype", "=", "'i8'", ")" ]
Convert a slice to an explicit array of rows
[ "Convert", "a", "slice", "to", "an", "explicit", "array", "of", "rows" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1236-L1256
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._fix_range
def _fix_range(self, num, isslice=True): """ Ensure the input is within range. If el=True, then don't treat as a slice element """ nrows = self._info['nrows'] if isslice: # include the end if num < 0: num = nrows + (1+num) elif num > nrows: num = nrows else: # single element if num < 0: num = nrows + num elif num > (nrows-1): num = nrows-1 return num
python
def _fix_range(self, num, isslice=True): nrows = self._info['nrows'] if isslice: if num < 0: num = nrows + (1+num) elif num > nrows: num = nrows else: if num < 0: num = nrows + num elif num > (nrows-1): num = nrows-1 return num
[ "def", "_fix_range", "(", "self", ",", "num", ",", "isslice", "=", "True", ")", ":", "nrows", "=", "self", ".", "_info", "[", "'nrows'", "]", "if", "isslice", ":", "# include the end", "if", "num", "<", "0", ":", "num", "=", "nrows", "+", "(", "1", "+", "num", ")", "elif", "num", ">", "nrows", ":", "num", "=", "nrows", "else", ":", "# single element", "if", "num", "<", "0", ":", "num", "=", "nrows", "+", "num", "elif", "num", ">", "(", "nrows", "-", "1", ")", ":", "num", "=", "nrows", "-", "1", "return", "num" ]
Ensure the input is within range. If el=True, then don't treat as a slice element
[ "Ensure", "the", "input", "is", "within", "range", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1258-L1279
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._rescale_and_convert_field_inplace
def _rescale_and_convert_field_inplace(self, array, name, scale, zero): """ Apply fits scalings. Also, convert bool to proper numpy boolean values """ self._rescale_array(array[name], scale, zero) if array[name].dtype == numpy.bool: array[name] = self._convert_bool_array(array[name]) return array
python
def _rescale_and_convert_field_inplace(self, array, name, scale, zero): self._rescale_array(array[name], scale, zero) if array[name].dtype == numpy.bool: array[name] = self._convert_bool_array(array[name]) return array
[ "def", "_rescale_and_convert_field_inplace", "(", "self", ",", "array", ",", "name", ",", "scale", ",", "zero", ")", ":", "self", ".", "_rescale_array", "(", "array", "[", "name", "]", ",", "scale", ",", "zero", ")", "if", "array", "[", "name", "]", ".", "dtype", "==", "numpy", ".", "bool", ":", "array", "[", "name", "]", "=", "self", ".", "_convert_bool_array", "(", "array", "[", "name", "]", ")", "return", "array" ]
Apply fits scalings. Also, convert bool to proper numpy boolean values
[ "Apply", "fits", "scalings", ".", "Also", "convert", "bool", "to", "proper", "numpy", "boolean", "values" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1281-L1289
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._rescale_array
def _rescale_array(self, array, scale, zero): """ Scale the input array """ if scale != 1.0: sval = numpy.array(scale, dtype=array.dtype) array *= sval if zero != 0.0: zval = numpy.array(zero, dtype=array.dtype) array += zval
python
def _rescale_array(self, array, scale, zero): if scale != 1.0: sval = numpy.array(scale, dtype=array.dtype) array *= sval if zero != 0.0: zval = numpy.array(zero, dtype=array.dtype) array += zval
[ "def", "_rescale_array", "(", "self", ",", "array", ",", "scale", ",", "zero", ")", ":", "if", "scale", "!=", "1.0", ":", "sval", "=", "numpy", ".", "array", "(", "scale", ",", "dtype", "=", "array", ".", "dtype", ")", "array", "*=", "sval", "if", "zero", "!=", "0.0", ":", "zval", "=", "numpy", ".", "array", "(", "zero", ",", "dtype", "=", "array", ".", "dtype", ")", "array", "+=", "zval" ]
Scale the input array
[ "Scale", "the", "input", "array" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1302-L1311
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._maybe_trim_strings
def _maybe_trim_strings(self, array, **keys): """ if requested, trim trailing white space from all string fields in the input array """ trim_strings = keys.get('trim_strings', False) if self.trim_strings or trim_strings: _trim_strings(array)
python
def _maybe_trim_strings(self, array, **keys): trim_strings = keys.get('trim_strings', False) if self.trim_strings or trim_strings: _trim_strings(array)
[ "def", "_maybe_trim_strings", "(", "self", ",", "array", ",", "*", "*", "keys", ")", ":", "trim_strings", "=", "keys", ".", "get", "(", "'trim_strings'", ",", "False", ")", "if", "self", ".", "trim_strings", "or", "trim_strings", ":", "_trim_strings", "(", "array", ")" ]
if requested, trim trailing white space from all string fields in the input array
[ "if", "requested", "trim", "trailing", "white", "space", "from", "all", "string", "fields", "in", "the", "input", "array" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1313-L1320
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._convert_bool_array
def _convert_bool_array(self, array): """ cfitsio reads as characters 'T' and 'F' -- convert to real boolean If input is a fits bool, convert to numpy boolean """ output = (array.view(numpy.int8) == ord('T')).astype(numpy.bool) return output
python
def _convert_bool_array(self, array): output = (array.view(numpy.int8) == ord('T')).astype(numpy.bool) return output
[ "def", "_convert_bool_array", "(", "self", ",", "array", ")", ":", "output", "=", "(", "array", ".", "view", "(", "numpy", ".", "int8", ")", "==", "ord", "(", "'T'", ")", ")", ".", "astype", "(", "numpy", ".", "bool", ")", "return", "output" ]
cfitsio reads as characters 'T' and 'F' -- convert to real boolean If input is a fits bool, convert to numpy boolean
[ "cfitsio", "reads", "as", "characters", "T", "and", "F", "--", "convert", "to", "real", "boolean", "If", "input", "is", "a", "fits", "bool", "convert", "to", "numpy", "boolean" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1344-L1351
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._get_tbl_numpy_dtype
def _get_tbl_numpy_dtype(self, colnum, include_endianness=True): """ Get numpy type for the input column """ table_type = self._info['hdutype'] table_type_string = _hdu_type_map[table_type] try: ftype = self._info['colinfo'][colnum]['eqtype'] if table_type == ASCII_TBL: npy_type = _table_fits2npy_ascii[abs(ftype)] else: npy_type = _table_fits2npy[abs(ftype)] except KeyError: raise KeyError("unsupported %s fits data " "type: %d" % (table_type_string, ftype)) istbit = False if (ftype == 1): istbit = True isvar = False if ftype < 0: isvar = True if include_endianness: # if binary we will read the big endian bytes directly, # if ascii we read into native byte order if table_type == ASCII_TBL: addstr = '' else: addstr = '>' if npy_type not in ['u1', 'i1', 'S', 'U']: npy_type = addstr+npy_type if npy_type == 'S': width = self._info['colinfo'][colnum]['width'] npy_type = 'S%d' % width elif npy_type == 'U': width = self._info['colinfo'][colnum]['width'] npy_type = 'U%d' % width return npy_type, isvar, istbit
python
def _get_tbl_numpy_dtype(self, colnum, include_endianness=True): table_type = self._info['hdutype'] table_type_string = _hdu_type_map[table_type] try: ftype = self._info['colinfo'][colnum]['eqtype'] if table_type == ASCII_TBL: npy_type = _table_fits2npy_ascii[abs(ftype)] else: npy_type = _table_fits2npy[abs(ftype)] except KeyError: raise KeyError("unsupported %s fits data " "type: %d" % (table_type_string, ftype)) istbit = False if (ftype == 1): istbit = True isvar = False if ftype < 0: isvar = True if include_endianness: if table_type == ASCII_TBL: addstr = '' else: addstr = '>' if npy_type not in ['u1', 'i1', 'S', 'U']: npy_type = addstr+npy_type if npy_type == 'S': width = self._info['colinfo'][colnum]['width'] npy_type = 'S%d' % width elif npy_type == 'U': width = self._info['colinfo'][colnum]['width'] npy_type = 'U%d' % width return npy_type, isvar, istbit
[ "def", "_get_tbl_numpy_dtype", "(", "self", ",", "colnum", ",", "include_endianness", "=", "True", ")", ":", "table_type", "=", "self", ".", "_info", "[", "'hdutype'", "]", "table_type_string", "=", "_hdu_type_map", "[", "table_type", "]", "try", ":", "ftype", "=", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'eqtype'", "]", "if", "table_type", "==", "ASCII_TBL", ":", "npy_type", "=", "_table_fits2npy_ascii", "[", "abs", "(", "ftype", ")", "]", "else", ":", "npy_type", "=", "_table_fits2npy", "[", "abs", "(", "ftype", ")", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "\"unsupported %s fits data \"", "\"type: %d\"", "%", "(", "table_type_string", ",", "ftype", ")", ")", "istbit", "=", "False", "if", "(", "ftype", "==", "1", ")", ":", "istbit", "=", "True", "isvar", "=", "False", "if", "ftype", "<", "0", ":", "isvar", "=", "True", "if", "include_endianness", ":", "# if binary we will read the big endian bytes directly,", "# if ascii we read into native byte order", "if", "table_type", "==", "ASCII_TBL", ":", "addstr", "=", "''", "else", ":", "addstr", "=", "'>'", "if", "npy_type", "not", "in", "[", "'u1'", ",", "'i1'", ",", "'S'", ",", "'U'", "]", ":", "npy_type", "=", "addstr", "+", "npy_type", "if", "npy_type", "==", "'S'", ":", "width", "=", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'width'", "]", "npy_type", "=", "'S%d'", "%", "width", "elif", "npy_type", "==", "'U'", ":", "width", "=", "self", ".", "_info", "[", "'colinfo'", "]", "[", "colnum", "]", "[", "'width'", "]", "npy_type", "=", "'U%d'", "%", "width", "return", "npy_type", ",", "isvar", ",", "istbit" ]
Get numpy type for the input column
[ "Get", "numpy", "type", "for", "the", "input", "column" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1353-L1393
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._process_args_as_rows_or_columns
def _process_args_as_rows_or_columns(self, arg, unpack=False): """ We must be able to interpret the args as as either a column name or row number, or sequences thereof. Numpy arrays and slices are also fine. Examples: 'field' 35 [35,55,86] ['f1',f2',...] Can also be tuples or arrays. """ flags = set() # if isinstance(arg, (tuple, list, numpy.ndarray)): # a sequence was entered if isstring(arg[0]): result = arg else: result = arg flags.add('isrows') elif isstring(arg): # a single string was entered result = arg elif isinstance(arg, slice): if unpack: flags.add('isrows') result = self._slice2rows(arg.start, arg.stop, arg.step) else: flags.add('isrows') flags.add('isslice') result = self._process_slice(arg) else: # a single object was entered. # Probably should apply some more checking on this result = arg flags.add('isrows') if numpy.ndim(arg) == 0: flags.add('isscalar') return result, flags
python
def _process_args_as_rows_or_columns(self, arg, unpack=False): flags = set() if isinstance(arg, (tuple, list, numpy.ndarray)): if isstring(arg[0]): result = arg else: result = arg flags.add('isrows') elif isstring(arg): result = arg elif isinstance(arg, slice): if unpack: flags.add('isrows') result = self._slice2rows(arg.start, arg.stop, arg.step) else: flags.add('isrows') flags.add('isslice') result = self._process_slice(arg) else: result = arg flags.add('isrows') if numpy.ndim(arg) == 0: flags.add('isscalar') return result, flags
[ "def", "_process_args_as_rows_or_columns", "(", "self", ",", "arg", ",", "unpack", "=", "False", ")", ":", "flags", "=", "set", "(", ")", "#", "if", "isinstance", "(", "arg", ",", "(", "tuple", ",", "list", ",", "numpy", ".", "ndarray", ")", ")", ":", "# a sequence was entered", "if", "isstring", "(", "arg", "[", "0", "]", ")", ":", "result", "=", "arg", "else", ":", "result", "=", "arg", "flags", ".", "add", "(", "'isrows'", ")", "elif", "isstring", "(", "arg", ")", ":", "# a single string was entered", "result", "=", "arg", "elif", "isinstance", "(", "arg", ",", "slice", ")", ":", "if", "unpack", ":", "flags", ".", "add", "(", "'isrows'", ")", "result", "=", "self", ".", "_slice2rows", "(", "arg", ".", "start", ",", "arg", ".", "stop", ",", "arg", ".", "step", ")", "else", ":", "flags", ".", "add", "(", "'isrows'", ")", "flags", ".", "add", "(", "'isslice'", ")", "result", "=", "self", ".", "_process_slice", "(", "arg", ")", "else", ":", "# a single object was entered.", "# Probably should apply some more checking on this", "result", "=", "arg", "flags", ".", "add", "(", "'isrows'", ")", "if", "numpy", ".", "ndim", "(", "arg", ")", "==", "0", ":", "flags", ".", "add", "(", "'isscalar'", ")", "return", "result", ",", "flags" ]
We must be able to interpret the args as as either a column name or row number, or sequences thereof. Numpy arrays and slices are also fine. Examples: 'field' 35 [35,55,86] ['f1',f2',...] Can also be tuples or arrays.
[ "We", "must", "be", "able", "to", "interpret", "the", "args", "as", "as", "either", "a", "column", "name", "or", "row", "number", "or", "sequences", "thereof", ".", "Numpy", "arrays", "and", "slices", "are", "also", "fine", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1395-L1437
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._extract_colnums
def _extract_colnums(self, columns=None): """ Extract an array of columns from the input """ if columns is None: return numpy.arange(self._ncol, dtype='i8') if not isinstance(columns, (tuple, list, numpy.ndarray)): # is a scalar return self._extract_colnum(columns) colnums = numpy.zeros(len(columns), dtype='i8') for i in xrange(colnums.size): colnums[i] = self._extract_colnum(columns[i]) # returns unique sorted colnums = numpy.unique(colnums) return colnums
python
def _extract_colnums(self, columns=None): if columns is None: return numpy.arange(self._ncol, dtype='i8') if not isinstance(columns, (tuple, list, numpy.ndarray)): return self._extract_colnum(columns) colnums = numpy.zeros(len(columns), dtype='i8') for i in xrange(colnums.size): colnums[i] = self._extract_colnum(columns[i]) colnums = numpy.unique(colnums) return colnums
[ "def", "_extract_colnums", "(", "self", ",", "columns", "=", "None", ")", ":", "if", "columns", "is", "None", ":", "return", "numpy", ".", "arange", "(", "self", ".", "_ncol", ",", "dtype", "=", "'i8'", ")", "if", "not", "isinstance", "(", "columns", ",", "(", "tuple", ",", "list", ",", "numpy", ".", "ndarray", ")", ")", ":", "# is a scalar", "return", "self", ".", "_extract_colnum", "(", "columns", ")", "colnums", "=", "numpy", ".", "zeros", "(", "len", "(", "columns", ")", ",", "dtype", "=", "'i8'", ")", "for", "i", "in", "xrange", "(", "colnums", ".", "size", ")", ":", "colnums", "[", "i", "]", "=", "self", ".", "_extract_colnum", "(", "columns", "[", "i", "]", ")", "# returns unique sorted", "colnums", "=", "numpy", ".", "unique", "(", "colnums", ")", "return", "colnums" ]
Extract an array of columns from the input
[ "Extract", "an", "array", "of", "columns", "from", "the", "input" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1494-L1511
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._extract_colnum
def _extract_colnum(self, col): """ Get the column number for the input column """ if isinteger(col): colnum = col if (colnum < 0) or (colnum > (self._ncol-1)): raise ValueError( "column number should be in [0,%d]" % (0, self._ncol-1)) else: colstr = mks(col) try: if self.case_sensitive: mess = "column name '%s' not found (case sensitive)" % col colnum = self._colnames.index(colstr) else: mess \ = "column name '%s' not found (case insensitive)" % col colnum = self._colnames_lower.index(colstr.lower()) except ValueError: raise ValueError(mess) return int(colnum)
python
def _extract_colnum(self, col): if isinteger(col): colnum = col if (colnum < 0) or (colnum > (self._ncol-1)): raise ValueError( "column number should be in [0,%d]" % (0, self._ncol-1)) else: colstr = mks(col) try: if self.case_sensitive: mess = "column name '%s' not found (case sensitive)" % col colnum = self._colnames.index(colstr) else: mess \ = "column name '%s' not found (case insensitive)" % col colnum = self._colnames_lower.index(colstr.lower()) except ValueError: raise ValueError(mess) return int(colnum)
[ "def", "_extract_colnum", "(", "self", ",", "col", ")", ":", "if", "isinteger", "(", "col", ")", ":", "colnum", "=", "col", "if", "(", "colnum", "<", "0", ")", "or", "(", "colnum", ">", "(", "self", ".", "_ncol", "-", "1", ")", ")", ":", "raise", "ValueError", "(", "\"column number should be in [0,%d]\"", "%", "(", "0", ",", "self", ".", "_ncol", "-", "1", ")", ")", "else", ":", "colstr", "=", "mks", "(", "col", ")", "try", ":", "if", "self", ".", "case_sensitive", ":", "mess", "=", "\"column name '%s' not found (case sensitive)\"", "%", "col", "colnum", "=", "self", ".", "_colnames", ".", "index", "(", "colstr", ")", "else", ":", "mess", "=", "\"column name '%s' not found (case insensitive)\"", "%", "col", "colnum", "=", "self", ".", "_colnames_lower", ".", "index", "(", "colstr", ".", "lower", "(", ")", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "mess", ")", "return", "int", "(", "colnum", ")" ]
Get the column number for the input column
[ "Get", "the", "column", "number", "for", "the", "input", "column" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1513-L1535
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._update_info
def _update_info(self): """ Call parent method and make sure this is in fact a table HDU. Set some convenience data. """ super(TableHDU, self)._update_info() if self._info['hdutype'] == IMAGE_HDU: mess = "Extension %s is not a Table HDU" % self.ext raise ValueError(mess) if 'colinfo' in self._info: self._colnames = [i['name'] for i in self._info['colinfo']] self._colnames_lower = [ i['name'].lower() for i in self._info['colinfo']] self._ncol = len(self._colnames)
python
def _update_info(self): super(TableHDU, self)._update_info() if self._info['hdutype'] == IMAGE_HDU: mess = "Extension %s is not a Table HDU" % self.ext raise ValueError(mess) if 'colinfo' in self._info: self._colnames = [i['name'] for i in self._info['colinfo']] self._colnames_lower = [ i['name'].lower() for i in self._info['colinfo']] self._ncol = len(self._colnames)
[ "def", "_update_info", "(", "self", ")", ":", "super", "(", "TableHDU", ",", "self", ")", ".", "_update_info", "(", ")", "if", "self", ".", "_info", "[", "'hdutype'", "]", "==", "IMAGE_HDU", ":", "mess", "=", "\"Extension %s is not a Table HDU\"", "%", "self", ".", "ext", "raise", "ValueError", "(", "mess", ")", "if", "'colinfo'", "in", "self", ".", "_info", ":", "self", ".", "_colnames", "=", "[", "i", "[", "'name'", "]", "for", "i", "in", "self", ".", "_info", "[", "'colinfo'", "]", "]", "self", ".", "_colnames_lower", "=", "[", "i", "[", "'name'", "]", ".", "lower", "(", ")", "for", "i", "in", "self", ".", "_info", "[", "'colinfo'", "]", "]", "self", ".", "_ncol", "=", "len", "(", "self", ".", "_colnames", ")" ]
Call parent method and make sure this is in fact a table HDU. Set some convenience data.
[ "Call", "parent", "method", "and", "make", "sure", "this", "is", "in", "fact", "a", "table", "HDU", ".", "Set", "some", "convenience", "data", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1537-L1550
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._get_next_buffered_row
def _get_next_buffered_row(self): """ Get the next row for iteration. """ if self._iter_row == self._iter_nrows: raise StopIteration if self._row_buffer_index >= self._iter_row_buffer: self._buffer_iter_rows(self._iter_row) data = self._row_buffer[self._row_buffer_index] self._iter_row += 1 self._row_buffer_index += 1 return data
python
def _get_next_buffered_row(self): if self._iter_row == self._iter_nrows: raise StopIteration if self._row_buffer_index >= self._iter_row_buffer: self._buffer_iter_rows(self._iter_row) data = self._row_buffer[self._row_buffer_index] self._iter_row += 1 self._row_buffer_index += 1 return data
[ "def", "_get_next_buffered_row", "(", "self", ")", ":", "if", "self", ".", "_iter_row", "==", "self", ".", "_iter_nrows", ":", "raise", "StopIteration", "if", "self", ".", "_row_buffer_index", ">=", "self", ".", "_iter_row_buffer", ":", "self", ".", "_buffer_iter_rows", "(", "self", ".", "_iter_row", ")", "data", "=", "self", ".", "_row_buffer", "[", "self", ".", "_row_buffer_index", "]", "self", ".", "_iter_row", "+=", "1", "self", ".", "_row_buffer_index", "+=", "1", "return", "data" ]
Get the next row for iteration.
[ "Get", "the", "next", "row", "for", "iteration", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1640-L1653
esheldon/fitsio
fitsio/hdu/table.py
TableHDU._buffer_iter_rows
def _buffer_iter_rows(self, start): """ Read in the buffer for iteration """ self._row_buffer = self[start:start+self._iter_row_buffer] # start back at the front of the buffer self._row_buffer_index = 0
python
def _buffer_iter_rows(self, start): self._row_buffer = self[start:start+self._iter_row_buffer] self._row_buffer_index = 0
[ "def", "_buffer_iter_rows", "(", "self", ",", "start", ")", ":", "self", ".", "_row_buffer", "=", "self", "[", "start", ":", "start", "+", "self", ".", "_iter_row_buffer", "]", "# start back at the front of the buffer", "self", ".", "_row_buffer_index", "=", "0" ]
Read in the buffer for iteration
[ "Read", "in", "the", "buffer", "for", "iteration" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1655-L1662
esheldon/fitsio
fitsio/hdu/table.py
AsciiTableHDU.read
def read(self, **keys): """ read a data from an ascii table HDU By default, all rows are read. Send rows= to select subsets of the data. Table data are read into a recarray for multiple columns, plain array for a single column. parameters ---------- columns: list/array An optional set of columns to read from table HDUs. Can be string or number. If a sequence, a recarray is always returned. If a scalar, an ordinary array is returned. rows: list/array, optional An optional list of rows to read from table HDUS. Default is to read all. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. lower: bool, optional If True, force all columns names to lower case in output. Will over ride the lower= keyword from construction. upper: bool, optional If True, force all columns names to upper case in output. Will over ride the lower= keyword from construction. """ rows = keys.get('rows', None) columns = keys.get('columns', None) # if columns is None, returns all. Guaranteed to be unique and sorted colnums = self._extract_colnums(columns) if isinstance(colnums, int): # scalar sent, don't read as a recarray return self.read_column(columns, **keys) rows = self._extract_rows(rows) if rows is None: nrows = self._info['nrows'] else: nrows = rows.size # if rows is None still returns None, and is correctly interpreted # by the reader to mean all rows = self._extract_rows(rows) # this is the full dtype for all columns dtype, offsets, isvar = self.get_rec_dtype(colnums=colnums, **keys) array = numpy.zeros(nrows, dtype=dtype) # note reading into existing data wnotvar, = numpy.where(isvar == False) # noqa if wnotvar.size > 0: for i in wnotvar: colnum = colnums[i] name = array.dtype.names[i] a = array[name].copy() self._FITS.read_column(self._ext+1, colnum+1, a, rows) array[name] = a del a array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array) wvar, = numpy.where(isvar == True) # noqa if wvar.size > 0: for i in wvar: colnum = colnums[i] name = array.dtype.names[i] dlist = self._FITS.read_var_column_as_list( self._ext+1, colnum+1, rows) if (isinstance(dlist[0], str) or (IS_PY3 and isinstance(dlist[0], bytes))): is_string = True else: is_string = False if array[name].dtype.descr[0][1][1] == 'O': # storing in object array # get references to each, no copy made for irow, item in enumerate(dlist): if IS_PY3 and isinstance(item, bytes): item = item.decode('ascii') array[name][irow] = item else: for irow, item in enumerate(dlist): if IS_PY3 and isinstance(item, bytes): item = item.decode('ascii') if is_string: array[name][irow] = item else: ncopy = len(item) array[name][irow][0:ncopy] = item[:] lower = keys.get('lower', False) upper = keys.get('upper', False) if self.lower or lower: _names_to_lower_if_recarray(array) elif self.upper or upper: _names_to_upper_if_recarray(array) self._maybe_trim_strings(array, **keys) return array
python
def read(self, **keys): rows = keys.get('rows', None) columns = keys.get('columns', None) colnums = self._extract_colnums(columns) if isinstance(colnums, int): return self.read_column(columns, **keys) rows = self._extract_rows(rows) if rows is None: nrows = self._info['nrows'] else: nrows = rows.size rows = self._extract_rows(rows) dtype, offsets, isvar = self.get_rec_dtype(colnums=colnums, **keys) array = numpy.zeros(nrows, dtype=dtype) wnotvar, = numpy.where(isvar == False) if wnotvar.size > 0: for i in wnotvar: colnum = colnums[i] name = array.dtype.names[i] a = array[name].copy() self._FITS.read_column(self._ext+1, colnum+1, a, rows) array[name] = a del a array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array) wvar, = numpy.where(isvar == True) if wvar.size > 0: for i in wvar: colnum = colnums[i] name = array.dtype.names[i] dlist = self._FITS.read_var_column_as_list( self._ext+1, colnum+1, rows) if (isinstance(dlist[0], str) or (IS_PY3 and isinstance(dlist[0], bytes))): is_string = True else: is_string = False if array[name].dtype.descr[0][1][1] == 'O': for irow, item in enumerate(dlist): if IS_PY3 and isinstance(item, bytes): item = item.decode('ascii') array[name][irow] = item else: for irow, item in enumerate(dlist): if IS_PY3 and isinstance(item, bytes): item = item.decode('ascii') if is_string: array[name][irow] = item else: ncopy = len(item) array[name][irow][0:ncopy] = item[:] lower = keys.get('lower', False) upper = keys.get('upper', False) if self.lower or lower: _names_to_lower_if_recarray(array) elif self.upper or upper: _names_to_upper_if_recarray(array) self._maybe_trim_strings(array, **keys) return array
[ "def", "read", "(", "self", ",", "*", "*", "keys", ")", ":", "rows", "=", "keys", ".", "get", "(", "'rows'", ",", "None", ")", "columns", "=", "keys", ".", "get", "(", "'columns'", ",", "None", ")", "# if columns is None, returns all. Guaranteed to be unique and sorted", "colnums", "=", "self", ".", "_extract_colnums", "(", "columns", ")", "if", "isinstance", "(", "colnums", ",", "int", ")", ":", "# scalar sent, don't read as a recarray", "return", "self", ".", "read_column", "(", "columns", ",", "*", "*", "keys", ")", "rows", "=", "self", ".", "_extract_rows", "(", "rows", ")", "if", "rows", "is", "None", ":", "nrows", "=", "self", ".", "_info", "[", "'nrows'", "]", "else", ":", "nrows", "=", "rows", ".", "size", "# if rows is None still returns None, and is correctly interpreted", "# by the reader to mean all", "rows", "=", "self", ".", "_extract_rows", "(", "rows", ")", "# this is the full dtype for all columns", "dtype", ",", "offsets", ",", "isvar", "=", "self", ".", "get_rec_dtype", "(", "colnums", "=", "colnums", ",", "*", "*", "keys", ")", "array", "=", "numpy", ".", "zeros", "(", "nrows", ",", "dtype", "=", "dtype", ")", "# note reading into existing data", "wnotvar", ",", "=", "numpy", ".", "where", "(", "isvar", "==", "False", ")", "# noqa", "if", "wnotvar", ".", "size", ">", "0", ":", "for", "i", "in", "wnotvar", ":", "colnum", "=", "colnums", "[", "i", "]", "name", "=", "array", ".", "dtype", ".", "names", "[", "i", "]", "a", "=", "array", "[", "name", "]", ".", "copy", "(", ")", "self", ".", "_FITS", ".", "read_column", "(", "self", ".", "_ext", "+", "1", ",", "colnum", "+", "1", ",", "a", ",", "rows", ")", "array", "[", "name", "]", "=", "a", "del", "a", "array", "=", "self", ".", "_maybe_decode_fits_ascii_strings_to_unicode_py3", "(", "array", ")", "wvar", ",", "=", "numpy", ".", "where", "(", "isvar", "==", "True", ")", "# noqa", "if", "wvar", ".", "size", ">", "0", ":", "for", "i", "in", "wvar", ":", "colnum", "=", "colnums", "[", "i", "]", "name", "=", "array", ".", "dtype", ".", "names", "[", "i", "]", "dlist", "=", "self", ".", "_FITS", ".", "read_var_column_as_list", "(", "self", ".", "_ext", "+", "1", ",", "colnum", "+", "1", ",", "rows", ")", "if", "(", "isinstance", "(", "dlist", "[", "0", "]", ",", "str", ")", "or", "(", "IS_PY3", "and", "isinstance", "(", "dlist", "[", "0", "]", ",", "bytes", ")", ")", ")", ":", "is_string", "=", "True", "else", ":", "is_string", "=", "False", "if", "array", "[", "name", "]", ".", "dtype", ".", "descr", "[", "0", "]", "[", "1", "]", "[", "1", "]", "==", "'O'", ":", "# storing in object array", "# get references to each, no copy made", "for", "irow", ",", "item", "in", "enumerate", "(", "dlist", ")", ":", "if", "IS_PY3", "and", "isinstance", "(", "item", ",", "bytes", ")", ":", "item", "=", "item", ".", "decode", "(", "'ascii'", ")", "array", "[", "name", "]", "[", "irow", "]", "=", "item", "else", ":", "for", "irow", ",", "item", "in", "enumerate", "(", "dlist", ")", ":", "if", "IS_PY3", "and", "isinstance", "(", "item", ",", "bytes", ")", ":", "item", "=", "item", ".", "decode", "(", "'ascii'", ")", "if", "is_string", ":", "array", "[", "name", "]", "[", "irow", "]", "=", "item", "else", ":", "ncopy", "=", "len", "(", "item", ")", "array", "[", "name", "]", "[", "irow", "]", "[", "0", ":", "ncopy", "]", "=", "item", "[", ":", "]", "lower", "=", "keys", ".", "get", "(", "'lower'", ",", "False", ")", "upper", "=", "keys", ".", "get", "(", "'upper'", ",", "False", ")", "if", "self", ".", "lower", "or", "lower", ":", "_names_to_lower_if_recarray", "(", "array", ")", "elif", "self", ".", "upper", "or", "upper", ":", "_names_to_upper_if_recarray", "(", "array", ")", "self", ".", "_maybe_trim_strings", "(", "array", ",", "*", "*", "keys", ")", "return", "array" ]
read a data from an ascii table HDU By default, all rows are read. Send rows= to select subsets of the data. Table data are read into a recarray for multiple columns, plain array for a single column. parameters ---------- columns: list/array An optional set of columns to read from table HDUs. Can be string or number. If a sequence, a recarray is always returned. If a scalar, an ordinary array is returned. rows: list/array, optional An optional list of rows to read from table HDUS. Default is to read all. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. lower: bool, optional If True, force all columns names to lower case in output. Will over ride the lower= keyword from construction. upper: bool, optional If True, force all columns names to upper case in output. Will over ride the lower= keyword from construction.
[ "read", "a", "data", "from", "an", "ascii", "table", "HDU" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1713-L1816
esheldon/fitsio
fitsio/hdu/table.py
TableColumnSubset.read
def read(self, **keys): """ Read the data from disk and return as a numpy array """ if self.is_scalar: data = self.fitshdu.read_column(self.columns, **keys) else: c = keys.get('columns', None) if c is None: keys['columns'] = self.columns data = self.fitshdu.read(**keys) return data
python
def read(self, **keys): if self.is_scalar: data = self.fitshdu.read_column(self.columns, **keys) else: c = keys.get('columns', None) if c is None: keys['columns'] = self.columns data = self.fitshdu.read(**keys) return data
[ "def", "read", "(", "self", ",", "*", "*", "keys", ")", ":", "if", "self", ".", "is_scalar", ":", "data", "=", "self", ".", "fitshdu", ".", "read_column", "(", "self", ".", "columns", ",", "*", "*", "keys", ")", "else", ":", "c", "=", "keys", ".", "get", "(", "'columns'", ",", "None", ")", "if", "c", "is", "None", ":", "keys", "[", "'columns'", "]", "=", "self", ".", "columns", "data", "=", "self", ".", "fitshdu", ".", "read", "(", "*", "*", "keys", ")", "return", "data" ]
Read the data from disk and return as a numpy array
[ "Read", "the", "data", "from", "disk", "and", "return", "as", "a", "numpy", "array" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1868-L1881
esheldon/fitsio
fitsio/fitslib.py
read
def read(filename, ext=None, extver=None, **keys): """ Convenience function to read data from the specified FITS HDU By default, all data are read. For tables, send columns= and rows= to select subsets of the data. Table data are read into a recarray; use a FITS object and read_column() to get a single column as an ordinary array. For images, create a FITS object and use slice notation to read subsets. Under the hood, a FITS object is constructed and data are read using an associated FITSHDU object. parameters ---------- filename: string A filename. ext: number or string, optional The extension. Either the numerical extension from zero or a string extension name. If not sent, data is read from the first HDU that has data. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to select a particular version. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. columns: list or array, optional An optional set of columns to read from table HDUs. Default is to read all. Can be string or number. rows: optional An optional list of rows to read from table HDUS. Default is to read all. header: bool, optional If True, read the FITS header and return a tuple (data,header) Default is False. case_sensitive: bool, optional Match column names and extension names with case-sensitivity. Default is False. lower: bool, optional If True, force all columns names to lower case in output upper: bool, optional If True, force all columns names to upper case in output vstorage: string, optional Set the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. """ with FITS(filename, **keys) as fits: header = keys.pop('header', False) if ext is None: for i in xrange(len(fits)): if fits[i].has_data(): ext = i break if ext is None: raise IOError("No extensions have data") item = _make_item(ext, extver=extver) data = fits[item].read(**keys) if header: h = fits[item].read_header() return data, h else: return data
python
def read(filename, ext=None, extver=None, **keys): with FITS(filename, **keys) as fits: header = keys.pop('header', False) if ext is None: for i in xrange(len(fits)): if fits[i].has_data(): ext = i break if ext is None: raise IOError("No extensions have data") item = _make_item(ext, extver=extver) data = fits[item].read(**keys) if header: h = fits[item].read_header() return data, h else: return data
[ "def", "read", "(", "filename", ",", "ext", "=", "None", ",", "extver", "=", "None", ",", "*", "*", "keys", ")", ":", "with", "FITS", "(", "filename", ",", "*", "*", "keys", ")", "as", "fits", ":", "header", "=", "keys", ".", "pop", "(", "'header'", ",", "False", ")", "if", "ext", "is", "None", ":", "for", "i", "in", "xrange", "(", "len", "(", "fits", ")", ")", ":", "if", "fits", "[", "i", "]", ".", "has_data", "(", ")", ":", "ext", "=", "i", "break", "if", "ext", "is", "None", ":", "raise", "IOError", "(", "\"No extensions have data\"", ")", "item", "=", "_make_item", "(", "ext", ",", "extver", "=", "extver", ")", "data", "=", "fits", "[", "item", "]", ".", "read", "(", "*", "*", "keys", ")", "if", "header", ":", "h", "=", "fits", "[", "item", "]", ".", "read_header", "(", ")", "return", "data", ",", "h", "else", ":", "return", "data" ]
Convenience function to read data from the specified FITS HDU By default, all data are read. For tables, send columns= and rows= to select subsets of the data. Table data are read into a recarray; use a FITS object and read_column() to get a single column as an ordinary array. For images, create a FITS object and use slice notation to read subsets. Under the hood, a FITS object is constructed and data are read using an associated FITSHDU object. parameters ---------- filename: string A filename. ext: number or string, optional The extension. Either the numerical extension from zero or a string extension name. If not sent, data is read from the first HDU that has data. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to select a particular version. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. columns: list or array, optional An optional set of columns to read from table HDUs. Default is to read all. Can be string or number. rows: optional An optional list of rows to read from table HDUS. Default is to read all. header: bool, optional If True, read the FITS header and return a tuple (data,header) Default is False. case_sensitive: bool, optional Match column names and extension names with case-sensitivity. Default is False. lower: bool, optional If True, force all columns names to lower case in output upper: bool, optional If True, force all columns names to upper case in output vstorage: string, optional Set the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details.
[ "Convenience", "function", "to", "read", "data", "from", "the", "specified", "FITS", "HDU" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L51-L117
esheldon/fitsio
fitsio/fitslib.py
read_header
def read_header(filename, ext=0, extver=None, case_sensitive=False, **keys): """ Convenience function to read the header from the specified FITS HDU The FITSHDR allows access to the values and comments by name and number. parameters ---------- filename: string A filename. ext: number or string, optional The extension. Either the numerical extension from zero or a string extension name. Default read primary header. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to select a particular version. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. case_sensitive: bool, optional Match extension names with case-sensitivity. Default is False. """ dont_create = 0 try: hdunum = ext+1 except TypeError: hdunum = None _fits = _fitsio_wrap.FITS(filename, READONLY, dont_create) if hdunum is None: extname = mks(ext) if extver is None: extver_num = 0 else: extver_num = extver if not case_sensitive: # the builtin movnam_hdu is not case sensitive hdunum = _fits.movnam_hdu(ANY_HDU, extname, extver_num) else: # for case sensitivity we'll need to run through # all the hdus found = False current_ext = 0 while True: hdunum = current_ext+1 try: hdu_type = _fits.movabs_hdu(hdunum) # noqa - not used name, vers = _fits.get_hdu_name_version(hdunum) if name == extname: if extver is None: # take the first match found = True break else: if extver_num == vers: found = True break except OSError: break current_ext += 1 if not found: raise IOError( 'hdu not found: %s (extver %s)' % (extname, extver)) return FITSHDR(_fits.read_header(hdunum))
python
def read_header(filename, ext=0, extver=None, case_sensitive=False, **keys): dont_create = 0 try: hdunum = ext+1 except TypeError: hdunum = None _fits = _fitsio_wrap.FITS(filename, READONLY, dont_create) if hdunum is None: extname = mks(ext) if extver is None: extver_num = 0 else: extver_num = extver if not case_sensitive: hdunum = _fits.movnam_hdu(ANY_HDU, extname, extver_num) else: found = False current_ext = 0 while True: hdunum = current_ext+1 try: hdu_type = _fits.movabs_hdu(hdunum) name, vers = _fits.get_hdu_name_version(hdunum) if name == extname: if extver is None: found = True break else: if extver_num == vers: found = True break except OSError: break current_ext += 1 if not found: raise IOError( 'hdu not found: %s (extver %s)' % (extname, extver)) return FITSHDR(_fits.read_header(hdunum))
[ "def", "read_header", "(", "filename", ",", "ext", "=", "0", ",", "extver", "=", "None", ",", "case_sensitive", "=", "False", ",", "*", "*", "keys", ")", ":", "dont_create", "=", "0", "try", ":", "hdunum", "=", "ext", "+", "1", "except", "TypeError", ":", "hdunum", "=", "None", "_fits", "=", "_fitsio_wrap", ".", "FITS", "(", "filename", ",", "READONLY", ",", "dont_create", ")", "if", "hdunum", "is", "None", ":", "extname", "=", "mks", "(", "ext", ")", "if", "extver", "is", "None", ":", "extver_num", "=", "0", "else", ":", "extver_num", "=", "extver", "if", "not", "case_sensitive", ":", "# the builtin movnam_hdu is not case sensitive", "hdunum", "=", "_fits", ".", "movnam_hdu", "(", "ANY_HDU", ",", "extname", ",", "extver_num", ")", "else", ":", "# for case sensitivity we'll need to run through", "# all the hdus", "found", "=", "False", "current_ext", "=", "0", "while", "True", ":", "hdunum", "=", "current_ext", "+", "1", "try", ":", "hdu_type", "=", "_fits", ".", "movabs_hdu", "(", "hdunum", ")", "# noqa - not used", "name", ",", "vers", "=", "_fits", ".", "get_hdu_name_version", "(", "hdunum", ")", "if", "name", "==", "extname", ":", "if", "extver", "is", "None", ":", "# take the first match", "found", "=", "True", "break", "else", ":", "if", "extver_num", "==", "vers", ":", "found", "=", "True", "break", "except", "OSError", ":", "break", "current_ext", "+=", "1", "if", "not", "found", ":", "raise", "IOError", "(", "'hdu not found: %s (extver %s)'", "%", "(", "extname", ",", "extver", ")", ")", "return", "FITSHDR", "(", "_fits", ".", "read_header", "(", "hdunum", ")", ")" ]
Convenience function to read the header from the specified FITS HDU The FITSHDR allows access to the values and comments by name and number. parameters ---------- filename: string A filename. ext: number or string, optional The extension. Either the numerical extension from zero or a string extension name. Default read primary header. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to select a particular version. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. case_sensitive: bool, optional Match extension names with case-sensitivity. Default is False.
[ "Convenience", "function", "to", "read", "the", "header", "from", "the", "specified", "FITS", "HDU" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L120-L190
esheldon/fitsio
fitsio/fitslib.py
read_scamp_head
def read_scamp_head(fname, header=None): """ read a SCAMP .head file as a fits header FITSHDR object parameters ---------- fname: string The path to the SCAMP .head file header: FITSHDR, optional Optionally combine the header with the input one. The input can be any object convertable to a FITSHDR object returns ------- header: FITSHDR A fits header object of type FITSHDR """ with open(fname) as fobj: lines = fobj.readlines() lines = [l.strip() for l in lines if l[0:3] != 'END'] # if header is None an empty FITSHDR is created hdr = FITSHDR(header) for l in lines: hdr.add_record(l) return hdr
python
def read_scamp_head(fname, header=None): with open(fname) as fobj: lines = fobj.readlines() lines = [l.strip() for l in lines if l[0:3] != 'END'] hdr = FITSHDR(header) for l in lines: hdr.add_record(l) return hdr
[ "def", "read_scamp_head", "(", "fname", ",", "header", "=", "None", ")", ":", "with", "open", "(", "fname", ")", "as", "fobj", ":", "lines", "=", "fobj", ".", "readlines", "(", ")", "lines", "=", "[", "l", ".", "strip", "(", ")", "for", "l", "in", "lines", "if", "l", "[", "0", ":", "3", "]", "!=", "'END'", "]", "# if header is None an empty FITSHDR is created", "hdr", "=", "FITSHDR", "(", "header", ")", "for", "l", "in", "lines", ":", "hdr", ".", "add_record", "(", "l", ")", "return", "hdr" ]
read a SCAMP .head file as a fits header FITSHDR object parameters ---------- fname: string The path to the SCAMP .head file header: FITSHDR, optional Optionally combine the header with the input one. The input can be any object convertable to a FITSHDR object returns ------- header: FITSHDR A fits header object of type FITSHDR
[ "read", "a", "SCAMP", ".", "head", "file", "as", "a", "fits", "header", "FITSHDR", "object" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L193-L223
esheldon/fitsio
fitsio/fitslib.py
write
def write(filename, data, extname=None, extver=None, units=None, compress=None, table_type='binary', header=None, clobber=False, **keys): """ Convenience function to create a new HDU and write the data. Under the hood, a FITS object is constructed. If you want to append rows to an existing HDU, or modify data in an HDU, please construct a FITS object. parameters ---------- filename: string A filename. data: Either a normal n-dimensional array or a recarray. Images are written to a new IMAGE_HDU and recarrays are written to BINARY_TBl or ASCII_TBL hdus. extname: string, optional An optional name for the new header unit. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to set a particular version, which will be represented in the header with keyname EXTVER. The extver must be an integer > 0. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. compress: string, optional A string representing the compression algorithm for images, default None. Can be one of 'RICE' 'GZIP' 'GZIP_2' 'PLIO' (no unsigned or negative integers) 'HCOMPRESS' (case-insensitive) See the cfitsio manual for details. header: FITSHDR, list, dict, optional A set of header keys to write. The keys are written before the data is written to the table, preventing a resizing of the table area. Can be one of these: - FITSHDR object - list of dictionaries containing 'name','value' and optionally a 'comment' field; the order is preserved. - a dictionary of keyword-value pairs; no comments are written in this case, and the order is arbitrary. Note required keywords such as NAXIS, XTENSION, etc are cleaed out. clobber: bool, optional If True, overwrite any existing file. Default is to append a new extension on existing files. ignore_empty: bool, optional Default False. Unless set to True, only allow empty HDUs in the zero extension. table keywords -------------- These keywords are only active when writing tables. units: list A list of strings representing units for each column. table_type: string, optional Either 'binary' or 'ascii', default 'binary' Matching is case-insensitive write_bitcols: bool, optional Write boolean arrays in the FITS bitcols format, default False """ with FITS(filename, 'rw', clobber=clobber, **keys) as fits: fits.write(data, table_type=table_type, units=units, extname=extname, extver=extver, compress=compress, header=header, **keys)
python
def write(filename, data, extname=None, extver=None, units=None, compress=None, table_type='binary', header=None, clobber=False, **keys): with FITS(filename, 'rw', clobber=clobber, **keys) as fits: fits.write(data, table_type=table_type, units=units, extname=extname, extver=extver, compress=compress, header=header, **keys)
[ "def", "write", "(", "filename", ",", "data", ",", "extname", "=", "None", ",", "extver", "=", "None", ",", "units", "=", "None", ",", "compress", "=", "None", ",", "table_type", "=", "'binary'", ",", "header", "=", "None", ",", "clobber", "=", "False", ",", "*", "*", "keys", ")", ":", "with", "FITS", "(", "filename", ",", "'rw'", ",", "clobber", "=", "clobber", ",", "*", "*", "keys", ")", "as", "fits", ":", "fits", ".", "write", "(", "data", ",", "table_type", "=", "table_type", ",", "units", "=", "units", ",", "extname", "=", "extname", ",", "extver", "=", "extver", ",", "compress", "=", "compress", ",", "header", "=", "header", ",", "*", "*", "keys", ")" ]
Convenience function to create a new HDU and write the data. Under the hood, a FITS object is constructed. If you want to append rows to an existing HDU, or modify data in an HDU, please construct a FITS object. parameters ---------- filename: string A filename. data: Either a normal n-dimensional array or a recarray. Images are written to a new IMAGE_HDU and recarrays are written to BINARY_TBl or ASCII_TBL hdus. extname: string, optional An optional name for the new header unit. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to set a particular version, which will be represented in the header with keyname EXTVER. The extver must be an integer > 0. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. compress: string, optional A string representing the compression algorithm for images, default None. Can be one of 'RICE' 'GZIP' 'GZIP_2' 'PLIO' (no unsigned or negative integers) 'HCOMPRESS' (case-insensitive) See the cfitsio manual for details. header: FITSHDR, list, dict, optional A set of header keys to write. The keys are written before the data is written to the table, preventing a resizing of the table area. Can be one of these: - FITSHDR object - list of dictionaries containing 'name','value' and optionally a 'comment' field; the order is preserved. - a dictionary of keyword-value pairs; no comments are written in this case, and the order is arbitrary. Note required keywords such as NAXIS, XTENSION, etc are cleaed out. clobber: bool, optional If True, overwrite any existing file. Default is to append a new extension on existing files. ignore_empty: bool, optional Default False. Unless set to True, only allow empty HDUs in the zero extension. table keywords -------------- These keywords are only active when writing tables. units: list A list of strings representing units for each column. table_type: string, optional Either 'binary' or 'ascii', default 'binary' Matching is case-insensitive write_bitcols: bool, optional Write boolean arrays in the FITS bitcols format, default False
[ "Convenience", "function", "to", "create", "a", "new", "HDU", "and", "write", "the", "data", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L236-L317
esheldon/fitsio
fitsio/fitslib.py
array2tabledef
def array2tabledef(data, table_type='binary', write_bitcols=False): """ Similar to descr2tabledef but if there are object columns a type and max length will be extracted and used for the tabledef """ is_ascii = (table_type == 'ascii') if data.dtype.fields is None: raise ValueError("data must have fields") names = [] names_nocase = {} formats = [] dims = [] descr = data.dtype.descr for d in descr: # these have the form '<f4' or '|S25', etc. Extract the pure type npy_dtype = d[1][1:] if is_ascii: if npy_dtype in ['u1', 'i1']: raise ValueError( "1-byte integers are not supported for " "ascii tables: '%s'" % npy_dtype) if npy_dtype in ['u2']: raise ValueError( "unsigned 2-byte integers are not supported for " "ascii tables: '%s'" % npy_dtype) if npy_dtype[0] == 'O': # this will be a variable length column 1Pt(len) where t is the # type and len is max length. Each element must be convertible to # the same type as the first name = d[0] form, dim = npy_obj2fits(data, name) elif npy_dtype[0] == "V": continue else: name, form, dim = _npy2fits( d, table_type=table_type, write_bitcols=write_bitcols) if name == '': raise ValueError("field name is an empty string") """ if is_ascii: if dim is not None: raise ValueError("array columns are not supported for " "ascii tables") """ name_nocase = name.upper() if name_nocase in names_nocase: raise ValueError( "duplicate column name found: '%s'. Note " "FITS column names are not case sensitive" % name_nocase) names.append(name) names_nocase[name_nocase] = name_nocase formats.append(form) dims.append(dim) return names, formats, dims
python
def array2tabledef(data, table_type='binary', write_bitcols=False): is_ascii = (table_type == 'ascii') if data.dtype.fields is None: raise ValueError("data must have fields") names = [] names_nocase = {} formats = [] dims = [] descr = data.dtype.descr for d in descr: npy_dtype = d[1][1:] if is_ascii: if npy_dtype in ['u1', 'i1']: raise ValueError( "1-byte integers are not supported for " "ascii tables: '%s'" % npy_dtype) if npy_dtype in ['u2']: raise ValueError( "unsigned 2-byte integers are not supported for " "ascii tables: '%s'" % npy_dtype) if npy_dtype[0] == 'O': name = d[0] form, dim = npy_obj2fits(data, name) elif npy_dtype[0] == "V": continue else: name, form, dim = _npy2fits( d, table_type=table_type, write_bitcols=write_bitcols) if name == '': raise ValueError("field name is an empty string") name_nocase = name.upper() if name_nocase in names_nocase: raise ValueError( "duplicate column name found: '%s'. Note " "FITS column names are not case sensitive" % name_nocase) names.append(name) names_nocase[name_nocase] = name_nocase formats.append(form) dims.append(dim) return names, formats, dims
[ "def", "array2tabledef", "(", "data", ",", "table_type", "=", "'binary'", ",", "write_bitcols", "=", "False", ")", ":", "is_ascii", "=", "(", "table_type", "==", "'ascii'", ")", "if", "data", ".", "dtype", ".", "fields", "is", "None", ":", "raise", "ValueError", "(", "\"data must have fields\"", ")", "names", "=", "[", "]", "names_nocase", "=", "{", "}", "formats", "=", "[", "]", "dims", "=", "[", "]", "descr", "=", "data", ".", "dtype", ".", "descr", "for", "d", "in", "descr", ":", "# these have the form '<f4' or '|S25', etc. Extract the pure type", "npy_dtype", "=", "d", "[", "1", "]", "[", "1", ":", "]", "if", "is_ascii", ":", "if", "npy_dtype", "in", "[", "'u1'", ",", "'i1'", "]", ":", "raise", "ValueError", "(", "\"1-byte integers are not supported for \"", "\"ascii tables: '%s'\"", "%", "npy_dtype", ")", "if", "npy_dtype", "in", "[", "'u2'", "]", ":", "raise", "ValueError", "(", "\"unsigned 2-byte integers are not supported for \"", "\"ascii tables: '%s'\"", "%", "npy_dtype", ")", "if", "npy_dtype", "[", "0", "]", "==", "'O'", ":", "# this will be a variable length column 1Pt(len) where t is the", "# type and len is max length. Each element must be convertible to", "# the same type as the first", "name", "=", "d", "[", "0", "]", "form", ",", "dim", "=", "npy_obj2fits", "(", "data", ",", "name", ")", "elif", "npy_dtype", "[", "0", "]", "==", "\"V\"", ":", "continue", "else", ":", "name", ",", "form", ",", "dim", "=", "_npy2fits", "(", "d", ",", "table_type", "=", "table_type", ",", "write_bitcols", "=", "write_bitcols", ")", "if", "name", "==", "''", ":", "raise", "ValueError", "(", "\"field name is an empty string\"", ")", "\"\"\"\n if is_ascii:\n if dim is not None:\n raise ValueError(\"array columns are not supported for \"\n \"ascii tables\")\n \"\"\"", "name_nocase", "=", "name", ".", "upper", "(", ")", "if", "name_nocase", "in", "names_nocase", ":", "raise", "ValueError", "(", "\"duplicate column name found: '%s'. Note \"", "\"FITS column names are not case sensitive\"", "%", "name_nocase", ")", "names", ".", "append", "(", "name", ")", "names_nocase", "[", "name_nocase", "]", "=", "name_nocase", "formats", ".", "append", "(", "form", ")", "dims", ".", "append", "(", "dim", ")", "return", "names", ",", "formats", ",", "dims" ]
Similar to descr2tabledef but if there are object columns a type and max length will be extracted and used for the tabledef
[ "Similar", "to", "descr2tabledef", "but", "if", "there", "are", "object", "columns", "a", "type", "and", "max", "length", "will", "be", "extracted", "and", "used", "for", "the", "tabledef" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1237-L1298
esheldon/fitsio
fitsio/fitslib.py
descr2tabledef
def descr2tabledef(descr, table_type='binary', write_bitcols=False): """ Create a FITS table def from the input numpy descriptor. parameters ---------- descr: list A numpy recarray type descriptor array.dtype.descr returns ------- names, formats, dims: tuple of lists These are the ttyp, tform and tdim header entries for each field. dim entries may be None """ names = [] formats = [] dims = [] for d in descr: """ npy_dtype = d[1][1:] if is_ascii and npy_dtype in ['u1','i1']: raise ValueError("1-byte integers are not supported for " "ascii tables") """ if d[1][1] == 'O': raise ValueError( 'cannot automatically declare a var column without ' 'some data to determine max len') name, form, dim = _npy2fits( d, table_type=table_type, write_bitcols=write_bitcols) if name == '': raise ValueError("field name is an empty string") """ if is_ascii: if dim is not None: raise ValueError("array columns are not supported " "for ascii tables") """ names.append(name) formats.append(form) dims.append(dim) return names, formats, dims
python
def descr2tabledef(descr, table_type='binary', write_bitcols=False): names = [] formats = [] dims = [] for d in descr: if d[1][1] == 'O': raise ValueError( 'cannot automatically declare a var column without ' 'some data to determine max len') name, form, dim = _npy2fits( d, table_type=table_type, write_bitcols=write_bitcols) if name == '': raise ValueError("field name is an empty string") names.append(name) formats.append(form) dims.append(dim) return names, formats, dims
[ "def", "descr2tabledef", "(", "descr", ",", "table_type", "=", "'binary'", ",", "write_bitcols", "=", "False", ")", ":", "names", "=", "[", "]", "formats", "=", "[", "]", "dims", "=", "[", "]", "for", "d", "in", "descr", ":", "\"\"\"\n npy_dtype = d[1][1:]\n if is_ascii and npy_dtype in ['u1','i1']:\n raise ValueError(\"1-byte integers are not supported for \"\n \"ascii tables\")\n \"\"\"", "if", "d", "[", "1", "]", "[", "1", "]", "==", "'O'", ":", "raise", "ValueError", "(", "'cannot automatically declare a var column without '", "'some data to determine max len'", ")", "name", ",", "form", ",", "dim", "=", "_npy2fits", "(", "d", ",", "table_type", "=", "table_type", ",", "write_bitcols", "=", "write_bitcols", ")", "if", "name", "==", "''", ":", "raise", "ValueError", "(", "\"field name is an empty string\"", ")", "\"\"\"\n if is_ascii:\n if dim is not None:\n raise ValueError(\"array columns are not supported \"\n \"for ascii tables\")\n \"\"\"", "names", ".", "append", "(", "name", ")", "formats", ".", "append", "(", "form", ")", "dims", ".", "append", "(", "dim", ")", "return", "names", ",", "formats", ",", "dims" ]
Create a FITS table def from the input numpy descriptor. parameters ---------- descr: list A numpy recarray type descriptor array.dtype.descr returns ------- names, formats, dims: tuple of lists These are the ttyp, tform and tdim header entries for each field. dim entries may be None
[ "Create", "a", "FITS", "table", "def", "from", "the", "input", "numpy", "descriptor", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1356-L1406
esheldon/fitsio
fitsio/fitslib.py
get_tile_dims
def get_tile_dims(tile_dims, imshape): """ Just make sure the tile dims has the appropriate number of dimensions """ if tile_dims is None: td = None else: td = numpy.array(tile_dims, dtype='i8') nd = len(imshape) if td.size != nd: msg = "expected tile_dims to have %d dims, got %d" % (td.size, nd) raise ValueError(msg) return td
python
def get_tile_dims(tile_dims, imshape): if tile_dims is None: td = None else: td = numpy.array(tile_dims, dtype='i8') nd = len(imshape) if td.size != nd: msg = "expected tile_dims to have %d dims, got %d" % (td.size, nd) raise ValueError(msg) return td
[ "def", "get_tile_dims", "(", "tile_dims", ",", "imshape", ")", ":", "if", "tile_dims", "is", "None", ":", "td", "=", "None", "else", ":", "td", "=", "numpy", ".", "array", "(", "tile_dims", ",", "dtype", "=", "'i8'", ")", "nd", "=", "len", "(", "imshape", ")", "if", "td", ".", "size", "!=", "nd", ":", "msg", "=", "\"expected tile_dims to have %d dims, got %d\"", "%", "(", "td", ".", "size", ",", "nd", ")", "raise", "ValueError", "(", "msg", ")", "return", "td" ]
Just make sure the tile dims has the appropriate number of dimensions
[ "Just", "make", "sure", "the", "tile", "dims", "has", "the", "appropriate", "number", "of", "dimensions" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1452-L1466
esheldon/fitsio
fitsio/fitslib.py
_extract_table_type
def _extract_table_type(type): """ Get the numerical table type """ if isinstance(type, str): type = type.lower() if type[0:7] == 'binary': table_type = BINARY_TBL elif type[0:6] == 'ascii': table_type = ASCII_TBL else: raise ValueError( "table type string should begin with 'binary' or 'ascii' " "(case insensitive)") else: type = int(type) if type not in [BINARY_TBL, ASCII_TBL]: raise ValueError( "table type num should be BINARY_TBL (%d) or " "ASCII_TBL (%d)" % (BINARY_TBL, ASCII_TBL)) table_type = type return table_type
python
def _extract_table_type(type): if isinstance(type, str): type = type.lower() if type[0:7] == 'binary': table_type = BINARY_TBL elif type[0:6] == 'ascii': table_type = ASCII_TBL else: raise ValueError( "table type string should begin with 'binary' or 'ascii' " "(case insensitive)") else: type = int(type) if type not in [BINARY_TBL, ASCII_TBL]: raise ValueError( "table type num should be BINARY_TBL (%d) or " "ASCII_TBL (%d)" % (BINARY_TBL, ASCII_TBL)) table_type = type return table_type
[ "def", "_extract_table_type", "(", "type", ")", ":", "if", "isinstance", "(", "type", ",", "str", ")", ":", "type", "=", "type", ".", "lower", "(", ")", "if", "type", "[", "0", ":", "7", "]", "==", "'binary'", ":", "table_type", "=", "BINARY_TBL", "elif", "type", "[", "0", ":", "6", "]", "==", "'ascii'", ":", "table_type", "=", "ASCII_TBL", "else", ":", "raise", "ValueError", "(", "\"table type string should begin with 'binary' or 'ascii' \"", "\"(case insensitive)\"", ")", "else", ":", "type", "=", "int", "(", "type", ")", "if", "type", "not", "in", "[", "BINARY_TBL", ",", "ASCII_TBL", "]", ":", "raise", "ValueError", "(", "\"table type num should be BINARY_TBL (%d) or \"", "\"ASCII_TBL (%d)\"", "%", "(", "BINARY_TBL", ",", "ASCII_TBL", ")", ")", "table_type", "=", "type", "return", "table_type" ]
Get the numerical table type
[ "Get", "the", "numerical", "table", "type" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1496-L1518
esheldon/fitsio
fitsio/fitslib.py
FITS.close
def close(self): """ Close the fits file and set relevant metadata to None """ if hasattr(self, '_FITS'): if self._FITS is not None: self._FITS.close() self._FITS = None self._filename = None self.mode = None self.charmode = None self.intmode = None self.hdu_list = None self.hdu_map = None
python
def close(self): if hasattr(self, '_FITS'): if self._FITS is not None: self._FITS.close() self._FITS = None self._filename = None self.mode = None self.charmode = None self.intmode = None self.hdu_list = None self.hdu_map = None
[ "def", "close", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_FITS'", ")", ":", "if", "self", ".", "_FITS", "is", "not", "None", ":", "self", ".", "_FITS", ".", "close", "(", ")", "self", ".", "_FITS", "=", "None", "self", ".", "_filename", "=", "None", "self", ".", "mode", "=", "None", "self", ".", "charmode", "=", "None", "self", ".", "intmode", "=", "None", "self", ".", "hdu_list", "=", "None", "self", ".", "hdu_map", "=", "None" ]
Close the fits file and set relevant metadata to None
[ "Close", "the", "fits", "file", "and", "set", "relevant", "metadata", "to", "None" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L409-L422
esheldon/fitsio
fitsio/fitslib.py
FITS.movnam_hdu
def movnam_hdu(self, extname, hdutype=ANY_HDU, extver=0): """ Move to the indicated HDU by name In general, it is not necessary to use this method explicitly. returns the one-offset extension number """ extname = mks(extname) hdu = self._FITS.movnam_hdu(hdutype, extname, extver) return hdu
python
def movnam_hdu(self, extname, hdutype=ANY_HDU, extver=0): extname = mks(extname) hdu = self._FITS.movnam_hdu(hdutype, extname, extver) return hdu
[ "def", "movnam_hdu", "(", "self", ",", "extname", ",", "hdutype", "=", "ANY_HDU", ",", "extver", "=", "0", ")", ":", "extname", "=", "mks", "(", "extname", ")", "hdu", "=", "self", ".", "_FITS", ".", "movnam_hdu", "(", "hdutype", ",", "extname", ",", "extver", ")", "return", "hdu" ]
Move to the indicated HDU by name In general, it is not necessary to use this method explicitly. returns the one-offset extension number
[ "Move", "to", "the", "indicated", "HDU", "by", "name" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L452-L462
esheldon/fitsio
fitsio/fitslib.py
FITS.reopen
def reopen(self): """ close and reopen the fits file with the same mode """ self._FITS.close() del self._FITS self._FITS = _fitsio_wrap.FITS(self._filename, self.intmode, 0) self.update_hdu_list()
python
def reopen(self): self._FITS.close() del self._FITS self._FITS = _fitsio_wrap.FITS(self._filename, self.intmode, 0) self.update_hdu_list()
[ "def", "reopen", "(", "self", ")", ":", "self", ".", "_FITS", ".", "close", "(", ")", "del", "self", ".", "_FITS", "self", ".", "_FITS", "=", "_fitsio_wrap", ".", "FITS", "(", "self", ".", "_filename", ",", "self", ".", "intmode", ",", "0", ")", "self", ".", "update_hdu_list", "(", ")" ]
close and reopen the fits file with the same mode
[ "close", "and", "reopen", "the", "fits", "file", "with", "the", "same", "mode" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L464-L471
esheldon/fitsio
fitsio/fitslib.py
FITS.write
def write(self, data, units=None, extname=None, extver=None, compress=None, tile_dims=None, header=None, names=None, table_type='binary', write_bitcols=False, **keys): """ Write the data to a new HDU. This method is a wrapper. If this is an IMAGE_HDU, write_image is called, otherwise write_table is called. parameters ---------- data: ndarray An n-dimensional image or an array with fields. extname: string, optional An optional extension name. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to set a particular version, which will be represented in the header with keyname EXTVER. The extver must be an integer > 0. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. header: FITSHDR, list, dict, optional A set of header keys to write. Can be one of these: - FITSHDR object - list of dictionaries containing 'name','value' and optionally a 'comment' field; the order is preserved. - a dictionary of keyword-value pairs; no comments are written in this case, and the order is arbitrary. Note required keywords such as NAXIS, XTENSION, etc are cleaed out. Image-only keywords: compress: string, optional A string representing the compression algorithm for images, default None. Can be one of 'RICE' 'GZIP' 'GZIP_2' 'PLIO' (no unsigned or negative integers) 'HCOMPRESS' (case-insensitive) See the cfitsio manual for details. Table-only keywords: units: list/dec, optional: A list of strings with units for each column. table_type: string, optional Either 'binary' or 'ascii', default 'binary' Matching is case-insensitive write_bitcols: bool, optional Write boolean arrays in the FITS bitcols format, default False restrictions ------------ The File must be opened READWRITE """ isimage = False if data is None: isimage = True elif isinstance(data, numpy.ndarray): if data.dtype.fields == None: # noqa - probably should be is None isimage = True if isimage: self.write_image(data, extname=extname, extver=extver, compress=compress, tile_dims=tile_dims, header=header) else: self.write_table(data, units=units, extname=extname, extver=extver, header=header, names=names, table_type=table_type, write_bitcols=write_bitcols)
python
def write(self, data, units=None, extname=None, extver=None, compress=None, tile_dims=None, header=None, names=None, table_type='binary', write_bitcols=False, **keys): isimage = False if data is None: isimage = True elif isinstance(data, numpy.ndarray): if data.dtype.fields == None: isimage = True if isimage: self.write_image(data, extname=extname, extver=extver, compress=compress, tile_dims=tile_dims, header=header) else: self.write_table(data, units=units, extname=extname, extver=extver, header=header, names=names, table_type=table_type, write_bitcols=write_bitcols)
[ "def", "write", "(", "self", ",", "data", ",", "units", "=", "None", ",", "extname", "=", "None", ",", "extver", "=", "None", ",", "compress", "=", "None", ",", "tile_dims", "=", "None", ",", "header", "=", "None", ",", "names", "=", "None", ",", "table_type", "=", "'binary'", ",", "write_bitcols", "=", "False", ",", "*", "*", "keys", ")", ":", "isimage", "=", "False", "if", "data", "is", "None", ":", "isimage", "=", "True", "elif", "isinstance", "(", "data", ",", "numpy", ".", "ndarray", ")", ":", "if", "data", ".", "dtype", ".", "fields", "==", "None", ":", "# noqa - probably should be is None", "isimage", "=", "True", "if", "isimage", ":", "self", ".", "write_image", "(", "data", ",", "extname", "=", "extname", ",", "extver", "=", "extver", ",", "compress", "=", "compress", ",", "tile_dims", "=", "tile_dims", ",", "header", "=", "header", ")", "else", ":", "self", ".", "write_table", "(", "data", ",", "units", "=", "units", ",", "extname", "=", "extname", ",", "extver", "=", "extver", ",", "header", "=", "header", ",", "names", "=", "names", ",", "table_type", "=", "table_type", ",", "write_bitcols", "=", "write_bitcols", ")" ]
Write the data to a new HDU. This method is a wrapper. If this is an IMAGE_HDU, write_image is called, otherwise write_table is called. parameters ---------- data: ndarray An n-dimensional image or an array with fields. extname: string, optional An optional extension name. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to set a particular version, which will be represented in the header with keyname EXTVER. The extver must be an integer > 0. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. header: FITSHDR, list, dict, optional A set of header keys to write. Can be one of these: - FITSHDR object - list of dictionaries containing 'name','value' and optionally a 'comment' field; the order is preserved. - a dictionary of keyword-value pairs; no comments are written in this case, and the order is arbitrary. Note required keywords such as NAXIS, XTENSION, etc are cleaed out. Image-only keywords: compress: string, optional A string representing the compression algorithm for images, default None. Can be one of 'RICE' 'GZIP' 'GZIP_2' 'PLIO' (no unsigned or negative integers) 'HCOMPRESS' (case-insensitive) See the cfitsio manual for details. Table-only keywords: units: list/dec, optional: A list of strings with units for each column. table_type: string, optional Either 'binary' or 'ascii', default 'binary' Matching is case-insensitive write_bitcols: bool, optional Write boolean arrays in the FITS bitcols format, default False restrictions ------------ The File must be opened READWRITE
[ "Write", "the", "data", "to", "a", "new", "HDU", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L473-L549
esheldon/fitsio
fitsio/fitslib.py
FITS.write_image
def write_image(self, img, extname=None, extver=None, compress=None, tile_dims=None, header=None): """ Create a new image extension and write the data. parameters ---------- img: ndarray An n-dimensional image. extname: string, optional An optional extension name. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to set a particular version, which will be represented in the header with keyname EXTVER. The extver must be an integer > 0. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. compress: string, optional A string representing the compression algorithm for images, default None. Can be one of 'RICE' 'GZIP' 'GZIP_2' 'PLIO' (no unsigned or negative integers) 'HCOMPRESS' (case-insensitive) See the cfitsio manual for details. header: FITSHDR, list, dict, optional A set of header keys to write. Can be one of these: - FITSHDR object - list of dictionaries containing 'name','value' and optionally a 'comment' field; the order is preserved. - a dictionary of keyword-value pairs; no comments are written in this case, and the order is arbitrary. Note required keywords such as NAXIS, XTENSION, etc are cleaed out. restrictions ------------ The File must be opened READWRITE """ self.create_image_hdu(img, header=header, extname=extname, extver=extver, compress=compress, tile_dims=tile_dims) if header is not None: self[-1].write_keys(header) self[-1]._update_info()
python
def write_image(self, img, extname=None, extver=None, compress=None, tile_dims=None, header=None): self.create_image_hdu(img, header=header, extname=extname, extver=extver, compress=compress, tile_dims=tile_dims) if header is not None: self[-1].write_keys(header) self[-1]._update_info()
[ "def", "write_image", "(", "self", ",", "img", ",", "extname", "=", "None", ",", "extver", "=", "None", ",", "compress", "=", "None", ",", "tile_dims", "=", "None", ",", "header", "=", "None", ")", ":", "self", ".", "create_image_hdu", "(", "img", ",", "header", "=", "header", ",", "extname", "=", "extname", ",", "extver", "=", "extver", ",", "compress", "=", "compress", ",", "tile_dims", "=", "tile_dims", ")", "if", "header", "is", "not", "None", ":", "self", "[", "-", "1", "]", ".", "write_keys", "(", "header", ")", "self", "[", "-", "1", "]", ".", "_update_info", "(", ")" ]
Create a new image extension and write the data. parameters ---------- img: ndarray An n-dimensional image. extname: string, optional An optional extension name. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to set a particular version, which will be represented in the header with keyname EXTVER. The extver must be an integer > 0. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. compress: string, optional A string representing the compression algorithm for images, default None. Can be one of 'RICE' 'GZIP' 'GZIP_2' 'PLIO' (no unsigned or negative integers) 'HCOMPRESS' (case-insensitive) See the cfitsio manual for details. header: FITSHDR, list, dict, optional A set of header keys to write. Can be one of these: - FITSHDR object - list of dictionaries containing 'name','value' and optionally a 'comment' field; the order is preserved. - a dictionary of keyword-value pairs; no comments are written in this case, and the order is arbitrary. Note required keywords such as NAXIS, XTENSION, etc are cleaed out. restrictions ------------ The File must be opened READWRITE
[ "Create", "a", "new", "image", "extension", "and", "write", "the", "data", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L551-L601
esheldon/fitsio
fitsio/fitslib.py
FITS.create_image_hdu
def create_image_hdu(self, img=None, dims=None, dtype=None, extname=None, extver=None, compress=None, tile_dims=None, header=None): """ Create a new, empty image HDU and reload the hdu list. Either create from an input image or from input dims and dtype fits.create_image_hdu(image, ...) fits.create_image_hdu(dims=dims, dtype=dtype) If an image is sent, the data are also written. You can write data into the new extension using fits[extension].write(image) Alternatively you can skip calling this function and instead just use fits.write(image) or fits.write_image(image) which will create the new image extension for you with the appropriate structure, and write the data. parameters ---------- img: ndarray, optional An image with which to determine the properties of the HDU. The data will be written. dims: sequence, optional A sequence describing the dimensions of the image to be created on disk. You must also send a dtype= dtype: numpy data type When sending dims= also send the data type. Can be of the various numpy data type declaration styles, e.g. 'f8', numpy.float64. extname: string, optional An optional extension name. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to set a particular version, which will be represented in the header with keyname EXTVER. The extver must be an integer > 0. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. compress: string, optional A string representing the compression algorithm for images, default None. Can be one of 'RICE' 'GZIP' 'GZIP_2' 'PLIO' (no unsigned or negative integers) 'HCOMPRESS' (case-insensitive) See the cfitsio manual for details. header: FITSHDR, list, dict, optional This is only used to determine how many slots to reserve for header keywords restrictions ------------ The File must be opened READWRITE """ if (img is not None) or (img is None and dims is None): from_image = True elif dims is not None: from_image = False if from_image: img2send = img if img is not None: dims = img.shape dtstr = img.dtype.descr[0][1][1:] if img.size == 0: raise ValueError("data must have at least 1 row") # data must be c-contiguous and native byte order if not img.flags['C_CONTIGUOUS']: # this always makes a copy img2send = numpy.ascontiguousarray(img) array_to_native(img2send, inplace=True) else: img2send = array_to_native(img, inplace=False) if IS_PY3 and img2send.dtype.char == 'U': # for python3, we convert unicode to ascii # this will error if the character is not in ascii img2send = img2send.astype('S', copy=False) else: self._ensure_empty_image_ok() compress = None tile_dims = None # we get dims from the input image dims2send = None else: # img was None and dims was sent if dtype is None: raise ValueError("send dtype= with dims=") # this must work! dtype = numpy.dtype(dtype) dtstr = dtype.descr[0][1][1:] # use the example image to build the type in C img2send = numpy.zeros(1, dtype=dtype) # sending an array simplifies access dims2send = numpy.array(dims, dtype='i8', ndmin=1) if img2send is not None: if img2send.dtype.fields is not None: raise ValueError( "got record data type, expected regular ndarray") if extname is None: # will be ignored extname = "" else: if not isstring(extname): raise ValueError("extension name must be a string") extname = mks(extname) if extname is not None and extver is not None: extver = check_extver(extver) if extver is None: # will be ignored extver = 0 comptype = get_compress_type(compress) tile_dims = get_tile_dims(tile_dims, dims) if img2send is not None: check_comptype_img(comptype, dtstr) if header is not None: nkeys = len(header) else: nkeys = 0 self._FITS.create_image_hdu(img2send, nkeys, dims=dims2send, comptype=comptype, tile_dims=tile_dims, extname=extname, extver=extver) # don't rebuild the whole list unless this is the first hdu # to be created self.update_hdu_list(rebuild=False)
python
def create_image_hdu(self, img=None, dims=None, dtype=None, extname=None, extver=None, compress=None, tile_dims=None, header=None): if (img is not None) or (img is None and dims is None): from_image = True elif dims is not None: from_image = False if from_image: img2send = img if img is not None: dims = img.shape dtstr = img.dtype.descr[0][1][1:] if img.size == 0: raise ValueError("data must have at least 1 row") if not img.flags['C_CONTIGUOUS']: img2send = numpy.ascontiguousarray(img) array_to_native(img2send, inplace=True) else: img2send = array_to_native(img, inplace=False) if IS_PY3 and img2send.dtype.char == 'U': img2send = img2send.astype('S', copy=False) else: self._ensure_empty_image_ok() compress = None tile_dims = None dims2send = None else: if dtype is None: raise ValueError("send dtype= with dims=") dtype = numpy.dtype(dtype) dtstr = dtype.descr[0][1][1:] img2send = numpy.zeros(1, dtype=dtype) dims2send = numpy.array(dims, dtype='i8', ndmin=1) if img2send is not None: if img2send.dtype.fields is not None: raise ValueError( "got record data type, expected regular ndarray") if extname is None: extname = "" else: if not isstring(extname): raise ValueError("extension name must be a string") extname = mks(extname) if extname is not None and extver is not None: extver = check_extver(extver) if extver is None: extver = 0 comptype = get_compress_type(compress) tile_dims = get_tile_dims(tile_dims, dims) if img2send is not None: check_comptype_img(comptype, dtstr) if header is not None: nkeys = len(header) else: nkeys = 0 self._FITS.create_image_hdu(img2send, nkeys, dims=dims2send, comptype=comptype, tile_dims=tile_dims, extname=extname, extver=extver) self.update_hdu_list(rebuild=False)
[ "def", "create_image_hdu", "(", "self", ",", "img", "=", "None", ",", "dims", "=", "None", ",", "dtype", "=", "None", ",", "extname", "=", "None", ",", "extver", "=", "None", ",", "compress", "=", "None", ",", "tile_dims", "=", "None", ",", "header", "=", "None", ")", ":", "if", "(", "img", "is", "not", "None", ")", "or", "(", "img", "is", "None", "and", "dims", "is", "None", ")", ":", "from_image", "=", "True", "elif", "dims", "is", "not", "None", ":", "from_image", "=", "False", "if", "from_image", ":", "img2send", "=", "img", "if", "img", "is", "not", "None", ":", "dims", "=", "img", ".", "shape", "dtstr", "=", "img", ".", "dtype", ".", "descr", "[", "0", "]", "[", "1", "]", "[", "1", ":", "]", "if", "img", ".", "size", "==", "0", ":", "raise", "ValueError", "(", "\"data must have at least 1 row\"", ")", "# data must be c-contiguous and native byte order", "if", "not", "img", ".", "flags", "[", "'C_CONTIGUOUS'", "]", ":", "# this always makes a copy", "img2send", "=", "numpy", ".", "ascontiguousarray", "(", "img", ")", "array_to_native", "(", "img2send", ",", "inplace", "=", "True", ")", "else", ":", "img2send", "=", "array_to_native", "(", "img", ",", "inplace", "=", "False", ")", "if", "IS_PY3", "and", "img2send", ".", "dtype", ".", "char", "==", "'U'", ":", "# for python3, we convert unicode to ascii", "# this will error if the character is not in ascii", "img2send", "=", "img2send", ".", "astype", "(", "'S'", ",", "copy", "=", "False", ")", "else", ":", "self", ".", "_ensure_empty_image_ok", "(", ")", "compress", "=", "None", "tile_dims", "=", "None", "# we get dims from the input image", "dims2send", "=", "None", "else", ":", "# img was None and dims was sent", "if", "dtype", "is", "None", ":", "raise", "ValueError", "(", "\"send dtype= with dims=\"", ")", "# this must work!", "dtype", "=", "numpy", ".", "dtype", "(", "dtype", ")", "dtstr", "=", "dtype", ".", "descr", "[", "0", "]", "[", "1", "]", "[", "1", ":", "]", "# use the example image to build the type in C", "img2send", "=", "numpy", ".", "zeros", "(", "1", ",", "dtype", "=", "dtype", ")", "# sending an array simplifies access", "dims2send", "=", "numpy", ".", "array", "(", "dims", ",", "dtype", "=", "'i8'", ",", "ndmin", "=", "1", ")", "if", "img2send", "is", "not", "None", ":", "if", "img2send", ".", "dtype", ".", "fields", "is", "not", "None", ":", "raise", "ValueError", "(", "\"got record data type, expected regular ndarray\"", ")", "if", "extname", "is", "None", ":", "# will be ignored", "extname", "=", "\"\"", "else", ":", "if", "not", "isstring", "(", "extname", ")", ":", "raise", "ValueError", "(", "\"extension name must be a string\"", ")", "extname", "=", "mks", "(", "extname", ")", "if", "extname", "is", "not", "None", "and", "extver", "is", "not", "None", ":", "extver", "=", "check_extver", "(", "extver", ")", "if", "extver", "is", "None", ":", "# will be ignored", "extver", "=", "0", "comptype", "=", "get_compress_type", "(", "compress", ")", "tile_dims", "=", "get_tile_dims", "(", "tile_dims", ",", "dims", ")", "if", "img2send", "is", "not", "None", ":", "check_comptype_img", "(", "comptype", ",", "dtstr", ")", "if", "header", "is", "not", "None", ":", "nkeys", "=", "len", "(", "header", ")", "else", ":", "nkeys", "=", "0", "self", ".", "_FITS", ".", "create_image_hdu", "(", "img2send", ",", "nkeys", ",", "dims", "=", "dims2send", ",", "comptype", "=", "comptype", ",", "tile_dims", "=", "tile_dims", ",", "extname", "=", "extname", ",", "extver", "=", "extver", ")", "# don't rebuild the whole list unless this is the first hdu", "# to be created", "self", ".", "update_hdu_list", "(", "rebuild", "=", "False", ")" ]
Create a new, empty image HDU and reload the hdu list. Either create from an input image or from input dims and dtype fits.create_image_hdu(image, ...) fits.create_image_hdu(dims=dims, dtype=dtype) If an image is sent, the data are also written. You can write data into the new extension using fits[extension].write(image) Alternatively you can skip calling this function and instead just use fits.write(image) or fits.write_image(image) which will create the new image extension for you with the appropriate structure, and write the data. parameters ---------- img: ndarray, optional An image with which to determine the properties of the HDU. The data will be written. dims: sequence, optional A sequence describing the dimensions of the image to be created on disk. You must also send a dtype= dtype: numpy data type When sending dims= also send the data type. Can be of the various numpy data type declaration styles, e.g. 'f8', numpy.float64. extname: string, optional An optional extension name. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to set a particular version, which will be represented in the header with keyname EXTVER. The extver must be an integer > 0. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. compress: string, optional A string representing the compression algorithm for images, default None. Can be one of 'RICE' 'GZIP' 'GZIP_2' 'PLIO' (no unsigned or negative integers) 'HCOMPRESS' (case-insensitive) See the cfitsio manual for details. header: FITSHDR, list, dict, optional This is only used to determine how many slots to reserve for header keywords restrictions ------------ The File must be opened READWRITE
[ "Create", "a", "new", "empty", "image", "HDU", "and", "reload", "the", "hdu", "list", ".", "Either", "create", "from", "an", "input", "image", "or", "from", "input", "dims", "and", "dtype" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L606-L765
esheldon/fitsio
fitsio/fitslib.py
FITS._ensure_empty_image_ok
def _ensure_empty_image_ok(self): """ If ignore_empty was not set to True, we only allow empty HDU for first HDU and if there is no data there already """ if self.ignore_empty: return if len(self) > 1: raise RuntimeError( "Cannot write None image at extension %d" % len(self)) if 'ndims' in self[0]._info: raise RuntimeError("Can only write None images to extension zero, " "which already exists")
python
def _ensure_empty_image_ok(self): if self.ignore_empty: return if len(self) > 1: raise RuntimeError( "Cannot write None image at extension %d" % len(self)) if 'ndims' in self[0]._info: raise RuntimeError("Can only write None images to extension zero, " "which already exists")
[ "def", "_ensure_empty_image_ok", "(", "self", ")", ":", "if", "self", ".", "ignore_empty", ":", "return", "if", "len", "(", "self", ")", ">", "1", ":", "raise", "RuntimeError", "(", "\"Cannot write None image at extension %d\"", "%", "len", "(", "self", ")", ")", "if", "'ndims'", "in", "self", "[", "0", "]", ".", "_info", ":", "raise", "RuntimeError", "(", "\"Can only write None images to extension zero, \"", "\"which already exists\"", ")" ]
If ignore_empty was not set to True, we only allow empty HDU for first HDU and if there is no data there already
[ "If", "ignore_empty", "was", "not", "set", "to", "True", "we", "only", "allow", "empty", "HDU", "for", "first", "HDU", "and", "if", "there", "is", "no", "data", "there", "already" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L767-L780
esheldon/fitsio
fitsio/fitslib.py
FITS.write_table
def write_table(self, data, table_type='binary', names=None, formats=None, units=None, extname=None, extver=None, header=None, write_bitcols=False): """ Create a new table extension and write the data. The table definition is taken from the fields in the input array. If you want to append new rows to the table, access the HDU directly and use the write() function, e.g. fits[extension].append(data) parameters ---------- data: recarray A numpy array with fields. The table definition will be determined from this array. table_type: string, optional Either 'binary' or 'ascii', default 'binary' Matching is case-insensitive extname: string, optional An optional string for the extension name. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to set a particular version, which will be represented in the header with keyname EXTVER. The extver must be an integer > 0. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. units: list/dec, optional: A list of strings with units for each column. header: FITSHDR, list, dict, optional A set of header keys to write. The keys are written before the data is written to the table, preventing a resizing of the table area. Can be one of these: - FITSHDR object - list of dictionaries containing 'name','value' and optionally a 'comment' field; the order is preserved. - a dictionary of keyword-value pairs; no comments are written in this case, and the order is arbitrary. Note required keywords such as NAXIS, XTENSION, etc are cleaed out. write_bitcols: boolean, optional Write boolean arrays in the FITS bitcols format, default False restrictions ------------ The File must be opened READWRITE """ """ if data.dtype.fields == None: raise ValueError("data must have fields") if data.size == 0: raise ValueError("data must have at least 1 row") """ self.create_table_hdu(data=data, header=header, names=names, units=units, extname=extname, extver=extver, table_type=table_type, write_bitcols=write_bitcols) if header is not None: self[-1].write_keys(header) self[-1]._update_info() self[-1].write(data, names=names)
python
def write_table(self, data, table_type='binary', names=None, formats=None, units=None, extname=None, extver=None, header=None, write_bitcols=False): self.create_table_hdu(data=data, header=header, names=names, units=units, extname=extname, extver=extver, table_type=table_type, write_bitcols=write_bitcols) if header is not None: self[-1].write_keys(header) self[-1]._update_info() self[-1].write(data, names=names)
[ "def", "write_table", "(", "self", ",", "data", ",", "table_type", "=", "'binary'", ",", "names", "=", "None", ",", "formats", "=", "None", ",", "units", "=", "None", ",", "extname", "=", "None", ",", "extver", "=", "None", ",", "header", "=", "None", ",", "write_bitcols", "=", "False", ")", ":", "\"\"\"\n if data.dtype.fields == None:\n raise ValueError(\"data must have fields\")\n if data.size == 0:\n raise ValueError(\"data must have at least 1 row\")\n \"\"\"", "self", ".", "create_table_hdu", "(", "data", "=", "data", ",", "header", "=", "header", ",", "names", "=", "names", ",", "units", "=", "units", ",", "extname", "=", "extname", ",", "extver", "=", "extver", ",", "table_type", "=", "table_type", ",", "write_bitcols", "=", "write_bitcols", ")", "if", "header", "is", "not", "None", ":", "self", "[", "-", "1", "]", ".", "write_keys", "(", "header", ")", "self", "[", "-", "1", "]", ".", "_update_info", "(", ")", "self", "[", "-", "1", "]", ".", "write", "(", "data", ",", "names", "=", "names", ")" ]
Create a new table extension and write the data. The table definition is taken from the fields in the input array. If you want to append new rows to the table, access the HDU directly and use the write() function, e.g. fits[extension].append(data) parameters ---------- data: recarray A numpy array with fields. The table definition will be determined from this array. table_type: string, optional Either 'binary' or 'ascii', default 'binary' Matching is case-insensitive extname: string, optional An optional string for the extension name. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to set a particular version, which will be represented in the header with keyname EXTVER. The extver must be an integer > 0. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. units: list/dec, optional: A list of strings with units for each column. header: FITSHDR, list, dict, optional A set of header keys to write. The keys are written before the data is written to the table, preventing a resizing of the table area. Can be one of these: - FITSHDR object - list of dictionaries containing 'name','value' and optionally a 'comment' field; the order is preserved. - a dictionary of keyword-value pairs; no comments are written in this case, and the order is arbitrary. Note required keywords such as NAXIS, XTENSION, etc are cleaed out. write_bitcols: boolean, optional Write boolean arrays in the FITS bitcols format, default False restrictions ------------ The File must be opened READWRITE
[ "Create", "a", "new", "table", "extension", "and", "write", "the", "data", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L782-L853
esheldon/fitsio
fitsio/fitslib.py
FITS.create_table_hdu
def create_table_hdu(self, data=None, dtype=None, header=None, names=None, formats=None, units=None, dims=None, extname=None, extver=None, table_type='binary', write_bitcols=False): """ Create a new, empty table extension and reload the hdu list. There are three ways to do it: 1) send a numpy dtype, from which the formats in the fits file will be determined. 2) Send an array in data= keyword. this is required if you have object fields for writing to variable length columns. 3) send the names,formats and dims yourself You can then write data into the new extension using fits[extension].write(array) If you want to write to a single column fits[extension].write_column(array) But be careful as the other columns will be left zeroed. Often you will instead just use write_table to do this all atomically. fits.write_table(recarray) write_table will create the new table extension for you with the appropriate fields. parameters ---------- dtype: numpy dtype or descriptor, optional If you have an array with fields, you can just send arr.dtype. You can also use a list of tuples, e.g. [('x','f8'),('index','i4')] or a dictionary representation. data: a numpy array with fields, optional or a dictionary An array or dict from which to determine the table definition. You must use this instead of sending a descriptor if you have object array fields, as this is the only way to determine the type and max size. names: list of strings, optional The list of field names formats: list of strings, optional The TFORM format strings for each field. dims: list of strings, optional An optional list of dimension strings for each field. Should match the repeat count for the formats fields. Be careful of the order since FITS is more like fortran. See the descr2tabledef function. table_type: string, optional Either 'binary' or 'ascii', default 'binary' Matching is case-insensitive units: list of strings, optional An optional list of unit strings for each field. extname: string, optional An optional extension name. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to set a particular version, which will be represented in the header with keyname EXTVER. The extver must be an integer > 0. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. write_bitcols: bool, optional Write boolean arrays in the FITS bitcols format, default False header: FITSHDR, list, dict, optional This is only used to determine how many slots to reserve for header keywords restrictions ------------ The File must be opened READWRITE """ # record this for the TableHDU object self.keys['write_bitcols'] = write_bitcols # can leave as turn table_type_int = _extract_table_type(table_type) if data is not None: if isinstance(data, numpy.ndarray): names, formats, dims = array2tabledef( data, table_type=table_type, write_bitcols=write_bitcols) elif isinstance(data, (list, dict)): names, formats, dims = collection2tabledef( data, names=names, table_type=table_type, write_bitcols=write_bitcols) else: raise ValueError( "data must be an ndarray with fields or a dict") elif dtype is not None: dtype = numpy.dtype(dtype) names, formats, dims = descr2tabledef( dtype. descr, write_bitcols=write_bitcols, table_type=table_type, ) else: if names is None or formats is None: raise ValueError( "send either dtype=, data=, or names= and formats=") if not isinstance(names, list) or not isinstance(formats, list): raise ValueError("names and formats should be lists") if len(names) != len(formats): raise ValueError("names and formats must be same length") if dims is not None: if not isinstance(dims, list): raise ValueError("dims should be a list") if len(dims) != len(names): raise ValueError("names and dims must be same length") if units is not None: if not isinstance(units, list): raise ValueError("units should be a list") if len(units) != len(names): raise ValueError("names and units must be same length") if extname is None: # will be ignored extname = "" else: if not isstring(extname): raise ValueError("extension name must be a string") extname = mks(extname) if extname is not None and extver is not None: extver = check_extver(extver) if extver is None: # will be ignored extver = 0 if extname is None: # will be ignored extname = "" if header is not None: nkeys = len(header) else: nkeys = 0 # note we can create extname in the c code for tables, but not images self._FITS.create_table_hdu(table_type_int, nkeys, names, formats, tunit=units, tdim=dims, extname=extname, extver=extver) # don't rebuild the whole list unless this is the first hdu # to be created self.update_hdu_list(rebuild=False)
python
def create_table_hdu(self, data=None, dtype=None, header=None, names=None, formats=None, units=None, dims=None, extname=None, extver=None, table_type='binary', write_bitcols=False): self.keys['write_bitcols'] = write_bitcols table_type_int = _extract_table_type(table_type) if data is not None: if isinstance(data, numpy.ndarray): names, formats, dims = array2tabledef( data, table_type=table_type, write_bitcols=write_bitcols) elif isinstance(data, (list, dict)): names, formats, dims = collection2tabledef( data, names=names, table_type=table_type, write_bitcols=write_bitcols) else: raise ValueError( "data must be an ndarray with fields or a dict") elif dtype is not None: dtype = numpy.dtype(dtype) names, formats, dims = descr2tabledef( dtype. descr, write_bitcols=write_bitcols, table_type=table_type, ) else: if names is None or formats is None: raise ValueError( "send either dtype=, data=, or names= and formats=") if not isinstance(names, list) or not isinstance(formats, list): raise ValueError("names and formats should be lists") if len(names) != len(formats): raise ValueError("names and formats must be same length") if dims is not None: if not isinstance(dims, list): raise ValueError("dims should be a list") if len(dims) != len(names): raise ValueError("names and dims must be same length") if units is not None: if not isinstance(units, list): raise ValueError("units should be a list") if len(units) != len(names): raise ValueError("names and units must be same length") if extname is None: extname = "" else: if not isstring(extname): raise ValueError("extension name must be a string") extname = mks(extname) if extname is not None and extver is not None: extver = check_extver(extver) if extver is None: extver = 0 if extname is None: extname = "" if header is not None: nkeys = len(header) else: nkeys = 0 self._FITS.create_table_hdu(table_type_int, nkeys, names, formats, tunit=units, tdim=dims, extname=extname, extver=extver) self.update_hdu_list(rebuild=False)
[ "def", "create_table_hdu", "(", "self", ",", "data", "=", "None", ",", "dtype", "=", "None", ",", "header", "=", "None", ",", "names", "=", "None", ",", "formats", "=", "None", ",", "units", "=", "None", ",", "dims", "=", "None", ",", "extname", "=", "None", ",", "extver", "=", "None", ",", "table_type", "=", "'binary'", ",", "write_bitcols", "=", "False", ")", ":", "# record this for the TableHDU object", "self", ".", "keys", "[", "'write_bitcols'", "]", "=", "write_bitcols", "# can leave as turn", "table_type_int", "=", "_extract_table_type", "(", "table_type", ")", "if", "data", "is", "not", "None", ":", "if", "isinstance", "(", "data", ",", "numpy", ".", "ndarray", ")", ":", "names", ",", "formats", ",", "dims", "=", "array2tabledef", "(", "data", ",", "table_type", "=", "table_type", ",", "write_bitcols", "=", "write_bitcols", ")", "elif", "isinstance", "(", "data", ",", "(", "list", ",", "dict", ")", ")", ":", "names", ",", "formats", ",", "dims", "=", "collection2tabledef", "(", "data", ",", "names", "=", "names", ",", "table_type", "=", "table_type", ",", "write_bitcols", "=", "write_bitcols", ")", "else", ":", "raise", "ValueError", "(", "\"data must be an ndarray with fields or a dict\"", ")", "elif", "dtype", "is", "not", "None", ":", "dtype", "=", "numpy", ".", "dtype", "(", "dtype", ")", "names", ",", "formats", ",", "dims", "=", "descr2tabledef", "(", "dtype", ".", "descr", ",", "write_bitcols", "=", "write_bitcols", ",", "table_type", "=", "table_type", ",", ")", "else", ":", "if", "names", "is", "None", "or", "formats", "is", "None", ":", "raise", "ValueError", "(", "\"send either dtype=, data=, or names= and formats=\"", ")", "if", "not", "isinstance", "(", "names", ",", "list", ")", "or", "not", "isinstance", "(", "formats", ",", "list", ")", ":", "raise", "ValueError", "(", "\"names and formats should be lists\"", ")", "if", "len", "(", "names", ")", "!=", "len", "(", "formats", ")", ":", "raise", "ValueError", "(", "\"names and formats must be same length\"", ")", "if", "dims", "is", "not", "None", ":", "if", "not", "isinstance", "(", "dims", ",", "list", ")", ":", "raise", "ValueError", "(", "\"dims should be a list\"", ")", "if", "len", "(", "dims", ")", "!=", "len", "(", "names", ")", ":", "raise", "ValueError", "(", "\"names and dims must be same length\"", ")", "if", "units", "is", "not", "None", ":", "if", "not", "isinstance", "(", "units", ",", "list", ")", ":", "raise", "ValueError", "(", "\"units should be a list\"", ")", "if", "len", "(", "units", ")", "!=", "len", "(", "names", ")", ":", "raise", "ValueError", "(", "\"names and units must be same length\"", ")", "if", "extname", "is", "None", ":", "# will be ignored", "extname", "=", "\"\"", "else", ":", "if", "not", "isstring", "(", "extname", ")", ":", "raise", "ValueError", "(", "\"extension name must be a string\"", ")", "extname", "=", "mks", "(", "extname", ")", "if", "extname", "is", "not", "None", "and", "extver", "is", "not", "None", ":", "extver", "=", "check_extver", "(", "extver", ")", "if", "extver", "is", "None", ":", "# will be ignored", "extver", "=", "0", "if", "extname", "is", "None", ":", "# will be ignored", "extname", "=", "\"\"", "if", "header", "is", "not", "None", ":", "nkeys", "=", "len", "(", "header", ")", "else", ":", "nkeys", "=", "0", "# note we can create extname in the c code for tables, but not images", "self", ".", "_FITS", ".", "create_table_hdu", "(", "table_type_int", ",", "nkeys", ",", "names", ",", "formats", ",", "tunit", "=", "units", ",", "tdim", "=", "dims", ",", "extname", "=", "extname", ",", "extver", "=", "extver", ")", "# don't rebuild the whole list unless this is the first hdu", "# to be created", "self", ".", "update_hdu_list", "(", "rebuild", "=", "False", ")" ]
Create a new, empty table extension and reload the hdu list. There are three ways to do it: 1) send a numpy dtype, from which the formats in the fits file will be determined. 2) Send an array in data= keyword. this is required if you have object fields for writing to variable length columns. 3) send the names,formats and dims yourself You can then write data into the new extension using fits[extension].write(array) If you want to write to a single column fits[extension].write_column(array) But be careful as the other columns will be left zeroed. Often you will instead just use write_table to do this all atomically. fits.write_table(recarray) write_table will create the new table extension for you with the appropriate fields. parameters ---------- dtype: numpy dtype or descriptor, optional If you have an array with fields, you can just send arr.dtype. You can also use a list of tuples, e.g. [('x','f8'),('index','i4')] or a dictionary representation. data: a numpy array with fields, optional or a dictionary An array or dict from which to determine the table definition. You must use this instead of sending a descriptor if you have object array fields, as this is the only way to determine the type and max size. names: list of strings, optional The list of field names formats: list of strings, optional The TFORM format strings for each field. dims: list of strings, optional An optional list of dimension strings for each field. Should match the repeat count for the formats fields. Be careful of the order since FITS is more like fortran. See the descr2tabledef function. table_type: string, optional Either 'binary' or 'ascii', default 'binary' Matching is case-insensitive units: list of strings, optional An optional list of unit strings for each field. extname: string, optional An optional extension name. extver: integer, optional FITS allows multiple extensions to have the same name (extname). These extensions can optionally specify an EXTVER version number in the header. Send extver= to set a particular version, which will be represented in the header with keyname EXTVER. The extver must be an integer > 0. If extver is not sent, the first one will be selected. If ext is an integer, the extver is ignored. write_bitcols: bool, optional Write boolean arrays in the FITS bitcols format, default False header: FITSHDR, list, dict, optional This is only used to determine how many slots to reserve for header keywords restrictions ------------ The File must be opened READWRITE
[ "Create", "a", "new", "empty", "table", "extension", "and", "reload", "the", "hdu", "list", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L861-L1017
esheldon/fitsio
fitsio/fitslib.py
FITS.update_hdu_list
def update_hdu_list(self, rebuild=True): """ Force an update of the entire HDU list Normally you don't need to call this method directly if rebuild is false or the hdu_list is not yet set, the list is rebuilt from scratch """ if not hasattr(self, 'hdu_list'): rebuild = True if rebuild: self.hdu_list = [] self.hdu_map = {} # we don't know how many hdus there are, so iterate # until we can't open any more ext_start = 0 else: # start from last ext_start = len(self) ext = ext_start while True: try: self._append_hdu_info(ext) except IOError: break except RuntimeError: break ext = ext + 1
python
def update_hdu_list(self, rebuild=True): if not hasattr(self, 'hdu_list'): rebuild = True if rebuild: self.hdu_list = [] self.hdu_map = {} ext_start = 0 else: ext_start = len(self) ext = ext_start while True: try: self._append_hdu_info(ext) except IOError: break except RuntimeError: break ext = ext + 1
[ "def", "update_hdu_list", "(", "self", ",", "rebuild", "=", "True", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'hdu_list'", ")", ":", "rebuild", "=", "True", "if", "rebuild", ":", "self", ".", "hdu_list", "=", "[", "]", "self", ".", "hdu_map", "=", "{", "}", "# we don't know how many hdus there are, so iterate", "# until we can't open any more", "ext_start", "=", "0", "else", ":", "# start from last", "ext_start", "=", "len", "(", "self", ")", "ext", "=", "ext_start", "while", "True", ":", "try", ":", "self", ".", "_append_hdu_info", "(", "ext", ")", "except", "IOError", ":", "break", "except", "RuntimeError", ":", "break", "ext", "=", "ext", "+", "1" ]
Force an update of the entire HDU list Normally you don't need to call this method directly if rebuild is false or the hdu_list is not yet set, the list is rebuilt from scratch
[ "Force", "an", "update", "of", "the", "entire", "HDU", "list" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1019-L1051
esheldon/fitsio
fitsio/fitslib.py
FITS._append_hdu_info
def _append_hdu_info(self, ext): """ internal routine append info for indiciated extension """ # raised IOError if not found hdu_type = self._FITS.movabs_hdu(ext+1) if hdu_type == IMAGE_HDU: hdu = ImageHDU(self._FITS, ext, **self.keys) elif hdu_type == BINARY_TBL: hdu = TableHDU(self._FITS, ext, **self.keys) elif hdu_type == ASCII_TBL: hdu = AsciiTableHDU(self._FITS, ext, **self.keys) else: mess = ("extension %s is of unknown type %s " "this is probably a bug") mess = mess % (ext, hdu_type) raise IOError(mess) self.hdu_list.append(hdu) self.hdu_map[ext] = hdu extname = hdu.get_extname() if not self.case_sensitive: extname = extname.lower() if extname != '': # this will guarantee we default to *first* version, # if version is not requested, using __getitem__ if extname not in self.hdu_map: self.hdu_map[extname] = hdu ver = hdu.get_extver() if ver > 0: key = '%s-%s' % (extname, ver) self.hdu_map[key] = hdu
python
def _append_hdu_info(self, ext): hdu_type = self._FITS.movabs_hdu(ext+1) if hdu_type == IMAGE_HDU: hdu = ImageHDU(self._FITS, ext, **self.keys) elif hdu_type == BINARY_TBL: hdu = TableHDU(self._FITS, ext, **self.keys) elif hdu_type == ASCII_TBL: hdu = AsciiTableHDU(self._FITS, ext, **self.keys) else: mess = ("extension %s is of unknown type %s " "this is probably a bug") mess = mess % (ext, hdu_type) raise IOError(mess) self.hdu_list.append(hdu) self.hdu_map[ext] = hdu extname = hdu.get_extname() if not self.case_sensitive: extname = extname.lower() if extname != '': if extname not in self.hdu_map: self.hdu_map[extname] = hdu ver = hdu.get_extver() if ver > 0: key = '%s-%s' % (extname, ver) self.hdu_map[key] = hdu
[ "def", "_append_hdu_info", "(", "self", ",", "ext", ")", ":", "# raised IOError if not found", "hdu_type", "=", "self", ".", "_FITS", ".", "movabs_hdu", "(", "ext", "+", "1", ")", "if", "hdu_type", "==", "IMAGE_HDU", ":", "hdu", "=", "ImageHDU", "(", "self", ".", "_FITS", ",", "ext", ",", "*", "*", "self", ".", "keys", ")", "elif", "hdu_type", "==", "BINARY_TBL", ":", "hdu", "=", "TableHDU", "(", "self", ".", "_FITS", ",", "ext", ",", "*", "*", "self", ".", "keys", ")", "elif", "hdu_type", "==", "ASCII_TBL", ":", "hdu", "=", "AsciiTableHDU", "(", "self", ".", "_FITS", ",", "ext", ",", "*", "*", "self", ".", "keys", ")", "else", ":", "mess", "=", "(", "\"extension %s is of unknown type %s \"", "\"this is probably a bug\"", ")", "mess", "=", "mess", "%", "(", "ext", ",", "hdu_type", ")", "raise", "IOError", "(", "mess", ")", "self", ".", "hdu_list", ".", "append", "(", "hdu", ")", "self", ".", "hdu_map", "[", "ext", "]", "=", "hdu", "extname", "=", "hdu", ".", "get_extname", "(", ")", "if", "not", "self", ".", "case_sensitive", ":", "extname", "=", "extname", ".", "lower", "(", ")", "if", "extname", "!=", "''", ":", "# this will guarantee we default to *first* version,", "# if version is not requested, using __getitem__", "if", "extname", "not", "in", "self", ".", "hdu_map", ":", "self", ".", "hdu_map", "[", "extname", "]", "=", "hdu", "ver", "=", "hdu", ".", "get_extver", "(", ")", "if", "ver", ">", "0", ":", "key", "=", "'%s-%s'", "%", "(", "extname", ",", "ver", ")", "self", ".", "hdu_map", "[", "key", "]", "=", "hdu" ]
internal routine append info for indiciated extension
[ "internal", "routine" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1053-L1090
esheldon/fitsio
fitsio/fitslib.py
FITS.next
def next(self): """ Move to the next iteration """ if self._iter_index == len(self.hdu_list): raise StopIteration hdu = self.hdu_list[self._iter_index] self._iter_index += 1 return hdu
python
def next(self): if self._iter_index == len(self.hdu_list): raise StopIteration hdu = self.hdu_list[self._iter_index] self._iter_index += 1 return hdu
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "_iter_index", "==", "len", "(", "self", ".", "hdu_list", ")", ":", "raise", "StopIteration", "hdu", "=", "self", ".", "hdu_list", "[", "self", ".", "_iter_index", "]", "self", ".", "_iter_index", "+=", "1", "return", "hdu" ]
Move to the next iteration
[ "Move", "to", "the", "next", "iteration" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1101-L1109
esheldon/fitsio
fitsio/fitslib.py
FITS._extract_item
def _extract_item(self, item): """ utility function to extract an "item", meaning a extension number,name plus version. """ ver = 0 if isinstance(item, tuple): ver_sent = True nitem = len(item) if nitem == 1: ext = item[0] elif nitem == 2: ext, ver = item else: ver_sent = False ext = item return ext, ver, ver_sent
python
def _extract_item(self, item): ver = 0 if isinstance(item, tuple): ver_sent = True nitem = len(item) if nitem == 1: ext = item[0] elif nitem == 2: ext, ver = item else: ver_sent = False ext = item return ext, ver, ver_sent
[ "def", "_extract_item", "(", "self", ",", "item", ")", ":", "ver", "=", "0", "if", "isinstance", "(", "item", ",", "tuple", ")", ":", "ver_sent", "=", "True", "nitem", "=", "len", "(", "item", ")", "if", "nitem", "==", "1", ":", "ext", "=", "item", "[", "0", "]", "elif", "nitem", "==", "2", ":", "ext", ",", "ver", "=", "item", "else", ":", "ver_sent", "=", "False", "ext", "=", "item", "return", "ext", ",", "ver", ",", "ver_sent" ]
utility function to extract an "item", meaning a extension number,name plus version.
[ "utility", "function", "to", "extract", "an", "item", "meaning", "a", "extension", "number", "name", "plus", "version", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/fitslib.py#L1121-L1137
esheldon/fitsio
fitsio/hdu/image.py
ImageHDU._update_info
def _update_info(self): """ Call parent method and make sure this is in fact a image HDU. Set dims in C order """ super(ImageHDU, self)._update_info() if self._info['hdutype'] != IMAGE_HDU: mess = "Extension %s is not a Image HDU" % self.ext raise ValueError(mess) # convert to c order if 'dims' in self._info: self._info['dims'] = list(reversed(self._info['dims']))
python
def _update_info(self): super(ImageHDU, self)._update_info() if self._info['hdutype'] != IMAGE_HDU: mess = "Extension %s is not a Image HDU" % self.ext raise ValueError(mess) if 'dims' in self._info: self._info['dims'] = list(reversed(self._info['dims']))
[ "def", "_update_info", "(", "self", ")", ":", "super", "(", "ImageHDU", ",", "self", ")", ".", "_update_info", "(", ")", "if", "self", ".", "_info", "[", "'hdutype'", "]", "!=", "IMAGE_HDU", ":", "mess", "=", "\"Extension %s is not a Image HDU\"", "%", "self", ".", "ext", "raise", "ValueError", "(", "mess", ")", "# convert to c order", "if", "'dims'", "in", "self", ".", "_info", ":", "self", ".", "_info", "[", "'dims'", "]", "=", "list", "(", "reversed", "(", "self", ".", "_info", "[", "'dims'", "]", ")", ")" ]
Call parent method and make sure this is in fact a image HDU. Set dims in C order
[ "Call", "parent", "method", "and", "make", "sure", "this", "is", "in", "fact", "a", "image", "HDU", ".", "Set", "dims", "in", "C", "order" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L37-L50
esheldon/fitsio
fitsio/hdu/image.py
ImageHDU.reshape
def reshape(self, dims): """ reshape an existing image to the requested dimensions parameters ---------- dims: sequence Any sequence convertible to i8 """ adims = numpy.array(dims, ndmin=1, dtype='i8') self._FITS.reshape_image(self._ext+1, adims)
python
def reshape(self, dims): adims = numpy.array(dims, ndmin=1, dtype='i8') self._FITS.reshape_image(self._ext+1, adims)
[ "def", "reshape", "(", "self", ",", "dims", ")", ":", "adims", "=", "numpy", ".", "array", "(", "dims", ",", "ndmin", "=", "1", ",", "dtype", "=", "'i8'", ")", "self", ".", "_FITS", ".", "reshape_image", "(", "self", ".", "_ext", "+", "1", ",", "adims", ")" ]
reshape an existing image to the requested dimensions parameters ---------- dims: sequence Any sequence convertible to i8
[ "reshape", "an", "existing", "image", "to", "the", "requested", "dimensions" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L91-L102
esheldon/fitsio
fitsio/hdu/image.py
ImageHDU.write
def write(self, img, start=0, **keys): """ Write the image into this HDU If data already exist in this HDU, they will be overwritten. If the image to write is larger than the image on disk, or if the start position is such that the write would extend beyond the existing dimensions, the on-disk image is expanded. parameters ---------- img: ndarray A simple numpy ndarray start: integer or sequence Where to start writing data. Can be an integer offset into the entire array, or a sequence determining where in N-dimensional space to start. """ dims = self.get_dims() if img.dtype.fields is not None: raise ValueError("got recarray, expected regular ndarray") if img.size == 0: raise ValueError("data must have at least 1 row") # data must be c-contiguous and native byte order if not img.flags['C_CONTIGUOUS']: # this always makes a copy img_send = numpy.ascontiguousarray(img) array_to_native(img_send, inplace=True) else: img_send = array_to_native(img, inplace=False) if IS_PY3 and img_send.dtype.char == 'U': # for python3, we convert unicode to ascii # this will error if the character is not in ascii img_send = img_send.astype('S', copy=False) if not numpy.isscalar(start): # convert to scalar offset # note we use the on-disk data type to get itemsize offset = _convert_full_start_to_offset(dims, start) else: offset = start # see if we need to resize the image if self.has_data(): self._expand_if_needed(dims, img.shape, start, offset) self._FITS.write_image(self._ext+1, img_send, offset+1) self._update_info()
python
def write(self, img, start=0, **keys): dims = self.get_dims() if img.dtype.fields is not None: raise ValueError("got recarray, expected regular ndarray") if img.size == 0: raise ValueError("data must have at least 1 row") if not img.flags['C_CONTIGUOUS']: img_send = numpy.ascontiguousarray(img) array_to_native(img_send, inplace=True) else: img_send = array_to_native(img, inplace=False) if IS_PY3 and img_send.dtype.char == 'U': img_send = img_send.astype('S', copy=False) if not numpy.isscalar(start): offset = _convert_full_start_to_offset(dims, start) else: offset = start if self.has_data(): self._expand_if_needed(dims, img.shape, start, offset) self._FITS.write_image(self._ext+1, img_send, offset+1) self._update_info()
[ "def", "write", "(", "self", ",", "img", ",", "start", "=", "0", ",", "*", "*", "keys", ")", ":", "dims", "=", "self", ".", "get_dims", "(", ")", "if", "img", ".", "dtype", ".", "fields", "is", "not", "None", ":", "raise", "ValueError", "(", "\"got recarray, expected regular ndarray\"", ")", "if", "img", ".", "size", "==", "0", ":", "raise", "ValueError", "(", "\"data must have at least 1 row\"", ")", "# data must be c-contiguous and native byte order", "if", "not", "img", ".", "flags", "[", "'C_CONTIGUOUS'", "]", ":", "# this always makes a copy", "img_send", "=", "numpy", ".", "ascontiguousarray", "(", "img", ")", "array_to_native", "(", "img_send", ",", "inplace", "=", "True", ")", "else", ":", "img_send", "=", "array_to_native", "(", "img", ",", "inplace", "=", "False", ")", "if", "IS_PY3", "and", "img_send", ".", "dtype", ".", "char", "==", "'U'", ":", "# for python3, we convert unicode to ascii", "# this will error if the character is not in ascii", "img_send", "=", "img_send", ".", "astype", "(", "'S'", ",", "copy", "=", "False", ")", "if", "not", "numpy", ".", "isscalar", "(", "start", ")", ":", "# convert to scalar offset", "# note we use the on-disk data type to get itemsize", "offset", "=", "_convert_full_start_to_offset", "(", "dims", ",", "start", ")", "else", ":", "offset", "=", "start", "# see if we need to resize the image", "if", "self", ".", "has_data", "(", ")", ":", "self", ".", "_expand_if_needed", "(", "dims", ",", "img", ".", "shape", ",", "start", ",", "offset", ")", "self", ".", "_FITS", ".", "write_image", "(", "self", ".", "_ext", "+", "1", ",", "img_send", ",", "offset", "+", "1", ")", "self", ".", "_update_info", "(", ")" ]
Write the image into this HDU If data already exist in this HDU, they will be overwritten. If the image to write is larger than the image on disk, or if the start position is such that the write would extend beyond the existing dimensions, the on-disk image is expanded. parameters ---------- img: ndarray A simple numpy ndarray start: integer or sequence Where to start writing data. Can be an integer offset into the entire array, or a sequence determining where in N-dimensional space to start.
[ "Write", "the", "image", "into", "this", "HDU" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L104-L156
esheldon/fitsio
fitsio/hdu/image.py
ImageHDU.read
def read(self, **keys): """ Read the image. If the HDU is an IMAGE_HDU, read the corresponding image. Compression and scaling are dealt with properly. """ if not self.has_data(): return None dtype, shape = self._get_dtype_and_shape() array = numpy.zeros(shape, dtype=dtype) self._FITS.read_image(self._ext+1, array) return array
python
def read(self, **keys): if not self.has_data(): return None dtype, shape = self._get_dtype_and_shape() array = numpy.zeros(shape, dtype=dtype) self._FITS.read_image(self._ext+1, array) return array
[ "def", "read", "(", "self", ",", "*", "*", "keys", ")", ":", "if", "not", "self", ".", "has_data", "(", ")", ":", "return", "None", "dtype", ",", "shape", "=", "self", ".", "_get_dtype_and_shape", "(", ")", "array", "=", "numpy", ".", "zeros", "(", "shape", ",", "dtype", "=", "dtype", ")", "self", ".", "_FITS", ".", "read_image", "(", "self", ".", "_ext", "+", "1", ",", "array", ")", "return", "array" ]
Read the image. If the HDU is an IMAGE_HDU, read the corresponding image. Compression and scaling are dealt with properly.
[ "Read", "the", "image", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L158-L171
esheldon/fitsio
fitsio/hdu/image.py
ImageHDU._get_dtype_and_shape
def _get_dtype_and_shape(self): """ Get the numpy dtype and shape for image """ npy_dtype = self._get_image_numpy_dtype() if self._info['ndims'] != 0: shape = self._info['dims'] else: raise IOError("no image present in HDU") return npy_dtype, shape
python
def _get_dtype_and_shape(self): npy_dtype = self._get_image_numpy_dtype() if self._info['ndims'] != 0: shape = self._info['dims'] else: raise IOError("no image present in HDU") return npy_dtype, shape
[ "def", "_get_dtype_and_shape", "(", "self", ")", ":", "npy_dtype", "=", "self", ".", "_get_image_numpy_dtype", "(", ")", "if", "self", ".", "_info", "[", "'ndims'", "]", "!=", "0", ":", "shape", "=", "self", ".", "_info", "[", "'dims'", "]", "else", ":", "raise", "IOError", "(", "\"no image present in HDU\"", ")", "return", "npy_dtype", ",", "shape" ]
Get the numpy dtype and shape for image
[ "Get", "the", "numpy", "dtype", "and", "shape", "for", "image" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L173-L184
esheldon/fitsio
fitsio/hdu/image.py
ImageHDU._get_image_numpy_dtype
def _get_image_numpy_dtype(self): """ Get the numpy dtype for the image """ try: ftype = self._info['img_equiv_type'] npy_type = _image_bitpix2npy[ftype] except KeyError: raise KeyError("unsupported fits data type: %d" % ftype) return npy_type
python
def _get_image_numpy_dtype(self): try: ftype = self._info['img_equiv_type'] npy_type = _image_bitpix2npy[ftype] except KeyError: raise KeyError("unsupported fits data type: %d" % ftype) return npy_type
[ "def", "_get_image_numpy_dtype", "(", "self", ")", ":", "try", ":", "ftype", "=", "self", ".", "_info", "[", "'img_equiv_type'", "]", "npy_type", "=", "_image_bitpix2npy", "[", "ftype", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "\"unsupported fits data type: %d\"", "%", "ftype", ")", "return", "npy_type" ]
Get the numpy dtype for the image
[ "Get", "the", "numpy", "dtype", "for", "the", "image" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L186-L196
esheldon/fitsio
fitsio/hdu/image.py
ImageHDU._read_image_slice
def _read_image_slice(self, arg): """ workhorse to read a slice """ if 'ndims' not in self._info: raise ValueError("Attempt to slice empty extension") if isinstance(arg, slice): # one-dimensional, e.g. 2:20 return self._read_image_slice((arg,)) if not isinstance(arg, tuple): raise ValueError("arguments must be slices, one for each " "dimension, e.g. [2:5] or [2:5,8:25] etc.") # should be a tuple of slices, one for each dimension # e.g. [2:3, 8:100] nd = len(arg) if nd != self._info['ndims']: raise ValueError("Got slice dimensions %d, " "expected %d" % (nd, self._info['ndims'])) targ = arg arg = [] for a in targ: if isinstance(a, slice): arg.append(a) elif isinstance(a, int): arg.append(slice(a, a+1, 1)) else: raise ValueError("arguments must be slices, e.g. 2:12") dims = self._info['dims'] arrdims = [] first = [] last = [] steps = [] # check the args and reverse dimensions since # fits is backwards from numpy dim = 0 for slc in arg: start = slc.start stop = slc.stop step = slc.step if start is None: start = 0 if stop is None: stop = dims[dim] if step is None: step = 1 if step < 1: raise ValueError("slice steps must be >= 1") if start < 0: start = dims[dim] + start if start < 0: raise IndexError("Index out of bounds") if stop < 0: stop = dims[dim] + start + 1 # move to 1-offset start = start + 1 if stop < start: raise ValueError("python slices but include at least one " "element, got %s" % slc) if stop > dims[dim]: stop = dims[dim] first.append(start) last.append(stop) steps.append(step) arrdims.append(stop-start+1) dim += 1 first.reverse() last.reverse() steps.reverse() first = numpy.array(first, dtype='i8') last = numpy.array(last, dtype='i8') steps = numpy.array(steps, dtype='i8') npy_dtype = self._get_image_numpy_dtype() array = numpy.zeros(arrdims, dtype=npy_dtype) self._FITS.read_image_slice(self._ext+1, first, last, steps, array) return array
python
def _read_image_slice(self, arg): if 'ndims' not in self._info: raise ValueError("Attempt to slice empty extension") if isinstance(arg, slice): return self._read_image_slice((arg,)) if not isinstance(arg, tuple): raise ValueError("arguments must be slices, one for each " "dimension, e.g. [2:5] or [2:5,8:25] etc.") nd = len(arg) if nd != self._info['ndims']: raise ValueError("Got slice dimensions %d, " "expected %d" % (nd, self._info['ndims'])) targ = arg arg = [] for a in targ: if isinstance(a, slice): arg.append(a) elif isinstance(a, int): arg.append(slice(a, a+1, 1)) else: raise ValueError("arguments must be slices, e.g. 2:12") dims = self._info['dims'] arrdims = [] first = [] last = [] steps = [] dim = 0 for slc in arg: start = slc.start stop = slc.stop step = slc.step if start is None: start = 0 if stop is None: stop = dims[dim] if step is None: step = 1 if step < 1: raise ValueError("slice steps must be >= 1") if start < 0: start = dims[dim] + start if start < 0: raise IndexError("Index out of bounds") if stop < 0: stop = dims[dim] + start + 1 start = start + 1 if stop < start: raise ValueError("python slices but include at least one " "element, got %s" % slc) if stop > dims[dim]: stop = dims[dim] first.append(start) last.append(stop) steps.append(step) arrdims.append(stop-start+1) dim += 1 first.reverse() last.reverse() steps.reverse() first = numpy.array(first, dtype='i8') last = numpy.array(last, dtype='i8') steps = numpy.array(steps, dtype='i8') npy_dtype = self._get_image_numpy_dtype() array = numpy.zeros(arrdims, dtype=npy_dtype) self._FITS.read_image_slice(self._ext+1, first, last, steps, array) return array
[ "def", "_read_image_slice", "(", "self", ",", "arg", ")", ":", "if", "'ndims'", "not", "in", "self", ".", "_info", ":", "raise", "ValueError", "(", "\"Attempt to slice empty extension\"", ")", "if", "isinstance", "(", "arg", ",", "slice", ")", ":", "# one-dimensional, e.g. 2:20", "return", "self", ".", "_read_image_slice", "(", "(", "arg", ",", ")", ")", "if", "not", "isinstance", "(", "arg", ",", "tuple", ")", ":", "raise", "ValueError", "(", "\"arguments must be slices, one for each \"", "\"dimension, e.g. [2:5] or [2:5,8:25] etc.\"", ")", "# should be a tuple of slices, one for each dimension", "# e.g. [2:3, 8:100]", "nd", "=", "len", "(", "arg", ")", "if", "nd", "!=", "self", ".", "_info", "[", "'ndims'", "]", ":", "raise", "ValueError", "(", "\"Got slice dimensions %d, \"", "\"expected %d\"", "%", "(", "nd", ",", "self", ".", "_info", "[", "'ndims'", "]", ")", ")", "targ", "=", "arg", "arg", "=", "[", "]", "for", "a", "in", "targ", ":", "if", "isinstance", "(", "a", ",", "slice", ")", ":", "arg", ".", "append", "(", "a", ")", "elif", "isinstance", "(", "a", ",", "int", ")", ":", "arg", ".", "append", "(", "slice", "(", "a", ",", "a", "+", "1", ",", "1", ")", ")", "else", ":", "raise", "ValueError", "(", "\"arguments must be slices, e.g. 2:12\"", ")", "dims", "=", "self", ".", "_info", "[", "'dims'", "]", "arrdims", "=", "[", "]", "first", "=", "[", "]", "last", "=", "[", "]", "steps", "=", "[", "]", "# check the args and reverse dimensions since", "# fits is backwards from numpy", "dim", "=", "0", "for", "slc", "in", "arg", ":", "start", "=", "slc", ".", "start", "stop", "=", "slc", ".", "stop", "step", "=", "slc", ".", "step", "if", "start", "is", "None", ":", "start", "=", "0", "if", "stop", "is", "None", ":", "stop", "=", "dims", "[", "dim", "]", "if", "step", "is", "None", ":", "step", "=", "1", "if", "step", "<", "1", ":", "raise", "ValueError", "(", "\"slice steps must be >= 1\"", ")", "if", "start", "<", "0", ":", "start", "=", "dims", "[", "dim", "]", "+", "start", "if", "start", "<", "0", ":", "raise", "IndexError", "(", "\"Index out of bounds\"", ")", "if", "stop", "<", "0", ":", "stop", "=", "dims", "[", "dim", "]", "+", "start", "+", "1", "# move to 1-offset", "start", "=", "start", "+", "1", "if", "stop", "<", "start", ":", "raise", "ValueError", "(", "\"python slices but include at least one \"", "\"element, got %s\"", "%", "slc", ")", "if", "stop", ">", "dims", "[", "dim", "]", ":", "stop", "=", "dims", "[", "dim", "]", "first", ".", "append", "(", "start", ")", "last", ".", "append", "(", "stop", ")", "steps", ".", "append", "(", "step", ")", "arrdims", ".", "append", "(", "stop", "-", "start", "+", "1", ")", "dim", "+=", "1", "first", ".", "reverse", "(", ")", "last", ".", "reverse", "(", ")", "steps", ".", "reverse", "(", ")", "first", "=", "numpy", ".", "array", "(", "first", ",", "dtype", "=", "'i8'", ")", "last", "=", "numpy", ".", "array", "(", "last", ",", "dtype", "=", "'i8'", ")", "steps", "=", "numpy", ".", "array", "(", "steps", ",", "dtype", "=", "'i8'", ")", "npy_dtype", "=", "self", ".", "_get_image_numpy_dtype", "(", ")", "array", "=", "numpy", ".", "zeros", "(", "arrdims", ",", "dtype", "=", "npy_dtype", ")", "self", ".", "_FITS", ".", "read_image_slice", "(", "self", ".", "_ext", "+", "1", ",", "first", ",", "last", ",", "steps", ",", "array", ")", "return", "array" ]
workhorse to read a slice
[ "workhorse", "to", "read", "a", "slice" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L206-L295
esheldon/fitsio
fitsio/hdu/image.py
ImageHDU._expand_if_needed
def _expand_if_needed(self, dims, write_dims, start, offset): """ expand the on-disk image if the indended write will extend beyond the existing dimensions """ from operator import mul if numpy.isscalar(start): start_is_scalar = True else: start_is_scalar = False existing_size = reduce(mul, dims, 1) required_size = offset + reduce(mul, write_dims, 1) if required_size > existing_size: print( " required size:", required_size, "existing size:", existing_size) # we need to expand the image ndim = len(dims) idim = len(write_dims) if start_is_scalar: if start == 0: start = [0]*ndim else: raise ValueError( "When expanding " "an existing image while writing, the start keyword " "must have the same number of dimensions " "as the image or be exactly 0, got %s " % start) if idim != ndim: raise ValueError( "When expanding " "an existing image while writing, the input image " "must have the same number of dimensions " "as the original. " "Got %d instead of %d" % (idim, ndim)) new_dims = [] for i in xrange(ndim): required_dim = start[i] + write_dims[i] if required_dim < dims[i]: # careful not to shrink the image! dimsize = dims[i] else: dimsize = required_dim new_dims.append(dimsize) print(" reshaping image to:", new_dims) self.reshape(new_dims)
python
def _expand_if_needed(self, dims, write_dims, start, offset): from operator import mul if numpy.isscalar(start): start_is_scalar = True else: start_is_scalar = False existing_size = reduce(mul, dims, 1) required_size = offset + reduce(mul, write_dims, 1) if required_size > existing_size: print( " required size:", required_size, "existing size:", existing_size) ndim = len(dims) idim = len(write_dims) if start_is_scalar: if start == 0: start = [0]*ndim else: raise ValueError( "When expanding " "an existing image while writing, the start keyword " "must have the same number of dimensions " "as the image or be exactly 0, got %s " % start) if idim != ndim: raise ValueError( "When expanding " "an existing image while writing, the input image " "must have the same number of dimensions " "as the original. " "Got %d instead of %d" % (idim, ndim)) new_dims = [] for i in xrange(ndim): required_dim = start[i] + write_dims[i] if required_dim < dims[i]: dimsize = dims[i] else: dimsize = required_dim new_dims.append(dimsize) print(" reshaping image to:", new_dims) self.reshape(new_dims)
[ "def", "_expand_if_needed", "(", "self", ",", "dims", ",", "write_dims", ",", "start", ",", "offset", ")", ":", "from", "operator", "import", "mul", "if", "numpy", ".", "isscalar", "(", "start", ")", ":", "start_is_scalar", "=", "True", "else", ":", "start_is_scalar", "=", "False", "existing_size", "=", "reduce", "(", "mul", ",", "dims", ",", "1", ")", "required_size", "=", "offset", "+", "reduce", "(", "mul", ",", "write_dims", ",", "1", ")", "if", "required_size", ">", "existing_size", ":", "print", "(", "\" required size:\"", ",", "required_size", ",", "\"existing size:\"", ",", "existing_size", ")", "# we need to expand the image", "ndim", "=", "len", "(", "dims", ")", "idim", "=", "len", "(", "write_dims", ")", "if", "start_is_scalar", ":", "if", "start", "==", "0", ":", "start", "=", "[", "0", "]", "*", "ndim", "else", ":", "raise", "ValueError", "(", "\"When expanding \"", "\"an existing image while writing, the start keyword \"", "\"must have the same number of dimensions \"", "\"as the image or be exactly 0, got %s \"", "%", "start", ")", "if", "idim", "!=", "ndim", ":", "raise", "ValueError", "(", "\"When expanding \"", "\"an existing image while writing, the input image \"", "\"must have the same number of dimensions \"", "\"as the original. \"", "\"Got %d instead of %d\"", "%", "(", "idim", ",", "ndim", ")", ")", "new_dims", "=", "[", "]", "for", "i", "in", "xrange", "(", "ndim", ")", ":", "required_dim", "=", "start", "[", "i", "]", "+", "write_dims", "[", "i", "]", "if", "required_dim", "<", "dims", "[", "i", "]", ":", "# careful not to shrink the image!", "dimsize", "=", "dims", "[", "i", "]", "else", ":", "dimsize", "=", "required_dim", "new_dims", ".", "append", "(", "dimsize", ")", "print", "(", "\" reshaping image to:\"", ",", "new_dims", ")", "self", ".", "reshape", "(", "new_dims", ")" ]
expand the on-disk image if the indended write will extend beyond the existing dimensions
[ "expand", "the", "on", "-", "disk", "image", "if", "the", "indended", "write", "will", "extend", "beyond", "the", "existing", "dimensions" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L297-L350
esheldon/fitsio
fitsio/hdu/base.py
HDUBase.get_extname
def get_extname(self): """ Get the name for this extension, can be an empty string """ name = self._info['extname'] if name.strip() == '': name = self._info['hduname'] return name.strip()
python
def get_extname(self): name = self._info['extname'] if name.strip() == '': name = self._info['hduname'] return name.strip()
[ "def", "get_extname", "(", "self", ")", ":", "name", "=", "self", ".", "_info", "[", "'extname'", "]", "if", "name", ".", "strip", "(", ")", "==", "''", ":", "name", "=", "self", ".", "_info", "[", "'hduname'", "]", "return", "name", ".", "strip", "(", ")" ]
Get the name for this extension, can be an empty string
[ "Get", "the", "name", "for", "this", "extension", "can", "be", "an", "empty", "string" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L59-L66
esheldon/fitsio
fitsio/hdu/base.py
HDUBase.get_extver
def get_extver(self): """ Get the version for this extension. Used when a name is given to multiple extensions """ ver = self._info['extver'] if ver == 0: ver = self._info['hduver'] return ver
python
def get_extver(self): ver = self._info['extver'] if ver == 0: ver = self._info['hduver'] return ver
[ "def", "get_extver", "(", "self", ")", ":", "ver", "=", "self", ".", "_info", "[", "'extver'", "]", "if", "ver", "==", "0", ":", "ver", "=", "self", ".", "_info", "[", "'hduver'", "]", "return", "ver" ]
Get the version for this extension. Used when a name is given to multiple extensions
[ "Get", "the", "version", "for", "this", "extension", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L68-L77
esheldon/fitsio
fitsio/hdu/base.py
HDUBase.get_exttype
def get_exttype(self, num=False): """ Get the extension type By default the result is a string that mirrors the enumerated type names in cfitsio 'IMAGE_HDU', 'ASCII_TBL', 'BINARY_TBL' which have numeric values 0 1 2 send num=True to get the numbers. The values fitsio.IMAGE_HDU .ASCII_TBL, and .BINARY_TBL are available for comparison parameters ---------- num: bool, optional Return the numeric values. """ if num: return self._info['hdutype'] else: name = _hdu_type_map[self._info['hdutype']] return name
python
def get_exttype(self, num=False): if num: return self._info['hdutype'] else: name = _hdu_type_map[self._info['hdutype']] return name
[ "def", "get_exttype", "(", "self", ",", "num", "=", "False", ")", ":", "if", "num", ":", "return", "self", ".", "_info", "[", "'hdutype'", "]", "else", ":", "name", "=", "_hdu_type_map", "[", "self", ".", "_info", "[", "'hdutype'", "]", "]", "return", "name" ]
Get the extension type By default the result is a string that mirrors the enumerated type names in cfitsio 'IMAGE_HDU', 'ASCII_TBL', 'BINARY_TBL' which have numeric values 0 1 2 send num=True to get the numbers. The values fitsio.IMAGE_HDU .ASCII_TBL, and .BINARY_TBL are available for comparison parameters ---------- num: bool, optional Return the numeric values.
[ "Get", "the", "extension", "type" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L79-L101
esheldon/fitsio
fitsio/hdu/base.py
HDUBase.get_offsets
def get_offsets(self): """ returns ------- a dictionary with these entries header_start: byte offset from beginning of the file to the start of the header data_start: byte offset from beginning of the file to the start of the data section data_end: byte offset from beginning of the file to the end of the data section Note these are also in the information dictionary, which you can access with get_info() """ return dict( header_start=self._info['header_start'], data_start=self._info['data_start'], data_end=self._info['data_end'], )
python
def get_offsets(self): return dict( header_start=self._info['header_start'], data_start=self._info['data_start'], data_end=self._info['data_end'], )
[ "def", "get_offsets", "(", "self", ")", ":", "return", "dict", "(", "header_start", "=", "self", ".", "_info", "[", "'header_start'", "]", ",", "data_start", "=", "self", ".", "_info", "[", "'data_start'", "]", ",", "data_end", "=", "self", ".", "_info", "[", "'data_end'", "]", ",", ")" ]
returns ------- a dictionary with these entries header_start: byte offset from beginning of the file to the start of the header data_start: byte offset from beginning of the file to the start of the data section data_end: byte offset from beginning of the file to the end of the data section Note these are also in the information dictionary, which you can access with get_info()
[ "returns", "-------", "a", "dictionary", "with", "these", "entries" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L103-L126
esheldon/fitsio
fitsio/hdu/base.py
HDUBase.verify_checksum
def verify_checksum(self): """ Verify the checksum in the header for this HDU. """ res = self._FITS.verify_checksum(self._ext+1) if res['dataok'] != 1: raise ValueError("data checksum failed") if res['hduok'] != 1: raise ValueError("hdu checksum failed")
python
def verify_checksum(self): res = self._FITS.verify_checksum(self._ext+1) if res['dataok'] != 1: raise ValueError("data checksum failed") if res['hduok'] != 1: raise ValueError("hdu checksum failed")
[ "def", "verify_checksum", "(", "self", ")", ":", "res", "=", "self", ".", "_FITS", ".", "verify_checksum", "(", "self", ".", "_ext", "+", "1", ")", "if", "res", "[", "'dataok'", "]", "!=", "1", ":", "raise", "ValueError", "(", "\"data checksum failed\"", ")", "if", "res", "[", "'hduok'", "]", "!=", "1", ":", "raise", "ValueError", "(", "\"hdu checksum failed\"", ")" ]
Verify the checksum in the header for this HDU.
[ "Verify", "the", "checksum", "in", "the", "header", "for", "this", "HDU", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L153-L161
esheldon/fitsio
fitsio/hdu/base.py
HDUBase.write_comment
def write_comment(self, comment): """ Write a comment into the header """ self._FITS.write_comment(self._ext+1, str(comment))
python
def write_comment(self, comment): self._FITS.write_comment(self._ext+1, str(comment))
[ "def", "write_comment", "(", "self", ",", "comment", ")", ":", "self", ".", "_FITS", ".", "write_comment", "(", "self", ".", "_ext", "+", "1", ",", "str", "(", "comment", ")", ")" ]
Write a comment into the header
[ "Write", "a", "comment", "into", "the", "header" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L163-L167
esheldon/fitsio
fitsio/hdu/base.py
HDUBase.write_history
def write_history(self, history): """ Write history text into the header """ self._FITS.write_history(self._ext+1, str(history))
python
def write_history(self, history): self._FITS.write_history(self._ext+1, str(history))
[ "def", "write_history", "(", "self", ",", "history", ")", ":", "self", ".", "_FITS", ".", "write_history", "(", "self", ".", "_ext", "+", "1", ",", "str", "(", "history", ")", ")" ]
Write history text into the header
[ "Write", "history", "text", "into", "the", "header" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L169-L173
esheldon/fitsio
fitsio/hdu/base.py
HDUBase._write_continue
def _write_continue(self, value): """ Write history text into the header """ self._FITS.write_continue(self._ext+1, str(value))
python
def _write_continue(self, value): self._FITS.write_continue(self._ext+1, str(value))
[ "def", "_write_continue", "(", "self", ",", "value", ")", ":", "self", ".", "_FITS", ".", "write_continue", "(", "self", ".", "_ext", "+", "1", ",", "str", "(", "value", ")", ")" ]
Write history text into the header
[ "Write", "history", "text", "into", "the", "header" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L175-L179
esheldon/fitsio
fitsio/hdu/base.py
HDUBase.write_key
def write_key(self, name, value, comment=""): """ Write the input value to the header parameters ---------- name: string Name of keyword to write/update value: scalar Value to write, can be string float or integer type, including numpy scalar types. comment: string, optional An optional comment to write for this key Notes ----- Write COMMENT and HISTORY using the write_comment and write_history methods """ if value is None: self._FITS.write_undefined_key(self._ext+1, str(name), str(comment)) elif isinstance(value, bool): if value: v = 1 else: v = 0 self._FITS.write_logical_key(self._ext+1, str(name), v, str(comment)) elif isinstance(value, _stypes): self._FITS.write_string_key(self._ext+1, str(name), str(value), str(comment)) elif isinstance(value, _ftypes): self._FITS.write_double_key(self._ext+1, str(name), float(value), str(comment)) elif isinstance(value, _itypes): self._FITS.write_long_key(self._ext+1, str(name), int(value), str(comment)) elif isinstance(value, (tuple, list)): vl = [str(el) for el in value] sval = ','.join(vl) self._FITS.write_string_key(self._ext+1, str(name), sval, str(comment)) else: sval = str(value) mess = ( "warning, keyword '%s' has non-standard " "value type %s, " "Converting to string: '%s'") warnings.warn(mess % (name, type(value), sval), FITSRuntimeWarning) self._FITS.write_string_key(self._ext+1, str(name), sval, str(comment))
python
def write_key(self, name, value, comment=""): if value is None: self._FITS.write_undefined_key(self._ext+1, str(name), str(comment)) elif isinstance(value, bool): if value: v = 1 else: v = 0 self._FITS.write_logical_key(self._ext+1, str(name), v, str(comment)) elif isinstance(value, _stypes): self._FITS.write_string_key(self._ext+1, str(name), str(value), str(comment)) elif isinstance(value, _ftypes): self._FITS.write_double_key(self._ext+1, str(name), float(value), str(comment)) elif isinstance(value, _itypes): self._FITS.write_long_key(self._ext+1, str(name), int(value), str(comment)) elif isinstance(value, (tuple, list)): vl = [str(el) for el in value] sval = ','.join(vl) self._FITS.write_string_key(self._ext+1, str(name), sval, str(comment)) else: sval = str(value) mess = ( "warning, keyword '%s' has non-standard " "value type %s, " "Converting to string: '%s'") warnings.warn(mess % (name, type(value), sval), FITSRuntimeWarning) self._FITS.write_string_key(self._ext+1, str(name), sval, str(comment))
[ "def", "write_key", "(", "self", ",", "name", ",", "value", ",", "comment", "=", "\"\"", ")", ":", "if", "value", "is", "None", ":", "self", ".", "_FITS", ".", "write_undefined_key", "(", "self", ".", "_ext", "+", "1", ",", "str", "(", "name", ")", ",", "str", "(", "comment", ")", ")", "elif", "isinstance", "(", "value", ",", "bool", ")", ":", "if", "value", ":", "v", "=", "1", "else", ":", "v", "=", "0", "self", ".", "_FITS", ".", "write_logical_key", "(", "self", ".", "_ext", "+", "1", ",", "str", "(", "name", ")", ",", "v", ",", "str", "(", "comment", ")", ")", "elif", "isinstance", "(", "value", ",", "_stypes", ")", ":", "self", ".", "_FITS", ".", "write_string_key", "(", "self", ".", "_ext", "+", "1", ",", "str", "(", "name", ")", ",", "str", "(", "value", ")", ",", "str", "(", "comment", ")", ")", "elif", "isinstance", "(", "value", ",", "_ftypes", ")", ":", "self", ".", "_FITS", ".", "write_double_key", "(", "self", ".", "_ext", "+", "1", ",", "str", "(", "name", ")", ",", "float", "(", "value", ")", ",", "str", "(", "comment", ")", ")", "elif", "isinstance", "(", "value", ",", "_itypes", ")", ":", "self", ".", "_FITS", ".", "write_long_key", "(", "self", ".", "_ext", "+", "1", ",", "str", "(", "name", ")", ",", "int", "(", "value", ")", ",", "str", "(", "comment", ")", ")", "elif", "isinstance", "(", "value", ",", "(", "tuple", ",", "list", ")", ")", ":", "vl", "=", "[", "str", "(", "el", ")", "for", "el", "in", "value", "]", "sval", "=", "','", ".", "join", "(", "vl", ")", "self", ".", "_FITS", ".", "write_string_key", "(", "self", ".", "_ext", "+", "1", ",", "str", "(", "name", ")", ",", "sval", ",", "str", "(", "comment", ")", ")", "else", ":", "sval", "=", "str", "(", "value", ")", "mess", "=", "(", "\"warning, keyword '%s' has non-standard \"", "\"value type %s, \"", "\"Converting to string: '%s'\"", ")", "warnings", ".", "warn", "(", "mess", "%", "(", "name", ",", "type", "(", "value", ")", ",", "sval", ")", ",", "FITSRuntimeWarning", ")", "self", ".", "_FITS", ".", "write_string_key", "(", "self", ".", "_ext", "+", "1", ",", "str", "(", "name", ")", ",", "sval", ",", "str", "(", "comment", ")", ")" ]
Write the input value to the header parameters ---------- name: string Name of keyword to write/update value: scalar Value to write, can be string float or integer type, including numpy scalar types. comment: string, optional An optional comment to write for this key Notes ----- Write COMMENT and HISTORY using the write_comment and write_history methods
[ "Write", "the", "input", "value", "to", "the", "header" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L181-L247
esheldon/fitsio
fitsio/hdu/base.py
HDUBase.write_keys
def write_keys(self, records_in, clean=True): """ Write the keywords to the header. parameters ---------- records: FITSHDR or list or dict Can be one of these: - FITSHDR object - list of dictionaries containing 'name','value' and optionally a 'comment' field; the order is preserved. - a dictionary of keyword-value pairs; no comments are written in this case, and the order is arbitrary. clean: boolean If True, trim out the standard fits header keywords that are created on HDU creation, such as EXTEND, SIMPLE, STTYPE, TFORM, TDIM, XTENSION, BITPIX, NAXIS, etc. Notes ----- Input keys named COMMENT and HISTORY are written using the write_comment and write_history methods. """ if isinstance(records_in, FITSHDR): hdr = records_in else: hdr = FITSHDR(records_in) if clean: is_table = hasattr(self, '_table_type_str') # is_table = isinstance(self, TableHDU) hdr.clean(is_table=is_table) for r in hdr.records(): name = r['name'].upper() value = r['value'] if name == 'COMMENT': self.write_comment(value) elif name == 'HISTORY': self.write_history(value) elif name == 'CONTINUE': self._write_continue(value) else: comment = r.get('comment', '') self.write_key(name, value, comment=comment)
python
def write_keys(self, records_in, clean=True): if isinstance(records_in, FITSHDR): hdr = records_in else: hdr = FITSHDR(records_in) if clean: is_table = hasattr(self, '_table_type_str') hdr.clean(is_table=is_table) for r in hdr.records(): name = r['name'].upper() value = r['value'] if name == 'COMMENT': self.write_comment(value) elif name == 'HISTORY': self.write_history(value) elif name == 'CONTINUE': self._write_continue(value) else: comment = r.get('comment', '') self.write_key(name, value, comment=comment)
[ "def", "write_keys", "(", "self", ",", "records_in", ",", "clean", "=", "True", ")", ":", "if", "isinstance", "(", "records_in", ",", "FITSHDR", ")", ":", "hdr", "=", "records_in", "else", ":", "hdr", "=", "FITSHDR", "(", "records_in", ")", "if", "clean", ":", "is_table", "=", "hasattr", "(", "self", ",", "'_table_type_str'", ")", "# is_table = isinstance(self, TableHDU)", "hdr", ".", "clean", "(", "is_table", "=", "is_table", ")", "for", "r", "in", "hdr", ".", "records", "(", ")", ":", "name", "=", "r", "[", "'name'", "]", ".", "upper", "(", ")", "value", "=", "r", "[", "'value'", "]", "if", "name", "==", "'COMMENT'", ":", "self", ".", "write_comment", "(", "value", ")", "elif", "name", "==", "'HISTORY'", ":", "self", ".", "write_history", "(", "value", ")", "elif", "name", "==", "'CONTINUE'", ":", "self", ".", "_write_continue", "(", "value", ")", "else", ":", "comment", "=", "r", ".", "get", "(", "'comment'", ",", "''", ")", "self", ".", "write_key", "(", "name", ",", "value", ",", "comment", "=", "comment", ")" ]
Write the keywords to the header. parameters ---------- records: FITSHDR or list or dict Can be one of these: - FITSHDR object - list of dictionaries containing 'name','value' and optionally a 'comment' field; the order is preserved. - a dictionary of keyword-value pairs; no comments are written in this case, and the order is arbitrary. clean: boolean If True, trim out the standard fits header keywords that are created on HDU creation, such as EXTEND, SIMPLE, STTYPE, TFORM, TDIM, XTENSION, BITPIX, NAXIS, etc. Notes ----- Input keys named COMMENT and HISTORY are written using the write_comment and write_history methods.
[ "Write", "the", "keywords", "to", "the", "header", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L249-L295
esheldon/fitsio
fitsio/hdu/base.py
HDUBase._update_info
def _update_info(self): """ Update metadata for this HDU """ try: self._FITS.movabs_hdu(self._ext+1) except IOError: raise RuntimeError("no such hdu") self._info = self._FITS.get_hdu_info(self._ext+1)
python
def _update_info(self): try: self._FITS.movabs_hdu(self._ext+1) except IOError: raise RuntimeError("no such hdu") self._info = self._FITS.get_hdu_info(self._ext+1)
[ "def", "_update_info", "(", "self", ")", ":", "try", ":", "self", ".", "_FITS", ".", "movabs_hdu", "(", "self", ".", "_ext", "+", "1", ")", "except", "IOError", ":", "raise", "RuntimeError", "(", "\"no such hdu\"", ")", "self", ".", "_info", "=", "self", ".", "_FITS", ".", "get_hdu_info", "(", "self", ".", "_ext", "+", "1", ")" ]
Update metadata for this HDU
[ "Update", "metadata", "for", "this", "HDU" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L322-L331
esheldon/fitsio
fitsio/hdu/base.py
HDUBase._get_repr_list
def _get_repr_list(self): """ Get some representation data common to all HDU types """ spacing = ' '*2 text = [''] text.append("%sfile: %s" % (spacing, self._filename)) text.append("%sextension: %d" % (spacing, self._info['hdunum']-1)) text.append( "%stype: %s" % (spacing, _hdu_type_map[self._info['hdutype']])) extname = self.get_extname() if extname != "": text.append("%sextname: %s" % (spacing, extname)) extver = self.get_extver() if extver != 0: text.append("%sextver: %s" % (spacing, extver)) return text, spacing
python
def _get_repr_list(self): spacing = ' '*2 text = [''] text.append("%sfile: %s" % (spacing, self._filename)) text.append("%sextension: %d" % (spacing, self._info['hdunum']-1)) text.append( "%stype: %s" % (spacing, _hdu_type_map[self._info['hdutype']])) extname = self.get_extname() if extname != "": text.append("%sextname: %s" % (spacing, extname)) extver = self.get_extver() if extver != 0: text.append("%sextver: %s" % (spacing, extver)) return text, spacing
[ "def", "_get_repr_list", "(", "self", ")", ":", "spacing", "=", "' '", "*", "2", "text", "=", "[", "''", "]", "text", ".", "append", "(", "\"%sfile: %s\"", "%", "(", "spacing", ",", "self", ".", "_filename", ")", ")", "text", ".", "append", "(", "\"%sextension: %d\"", "%", "(", "spacing", ",", "self", ".", "_info", "[", "'hdunum'", "]", "-", "1", ")", ")", "text", ".", "append", "(", "\"%stype: %s\"", "%", "(", "spacing", ",", "_hdu_type_map", "[", "self", ".", "_info", "[", "'hdutype'", "]", "]", ")", ")", "extname", "=", "self", ".", "get_extname", "(", ")", "if", "extname", "!=", "\"\"", ":", "text", ".", "append", "(", "\"%sextname: %s\"", "%", "(", "spacing", ",", "extname", ")", ")", "extver", "=", "self", ".", "get_extver", "(", ")", "if", "extver", "!=", "0", ":", "text", ".", "append", "(", "\"%sextver: %s\"", "%", "(", "spacing", ",", "extver", ")", ")", "return", "text", ",", "spacing" ]
Get some representation data common to all HDU types
[ "Get", "some", "representation", "data", "common", "to", "all", "HDU", "types" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/base.py#L333-L351
esheldon/fitsio
fitsio/header.py
FITSHDR.add_record
def add_record(self, record_in): """ Add a new record. Strip quotes from around strings. This will over-write if the key already exists, except for COMMENT and HISTORY fields parameters ----------- record: The record, either a dict or a header card string or a FITSRecord or FITSCard convert: bool, optional If True, convert strings. E.g. '3' gets converted to 3 and "'hello'" gets converted to 'hello' and 'T'/'F' to True/False. Default is False. """ if (isinstance(record_in, dict) and 'name' in record_in and 'value' in record_in): record = {} record.update(record_in) else: record = FITSRecord(record_in) # only append when this name already exists if it is # a comment or history field, otherwise simply over-write key = record['name'].upper() key_exists = key in self._record_map if not key_exists or key in ('COMMENT', 'HISTORY', 'CONTINUE'): # append new record self._record_list.append(record) index = len(self._record_list)-1 self._index_map[key] = index else: # over-write existing index = self._index_map[key] self._record_list[index] = record self._record_map[key] = record
python
def add_record(self, record_in): if (isinstance(record_in, dict) and 'name' in record_in and 'value' in record_in): record = {} record.update(record_in) else: record = FITSRecord(record_in) key = record['name'].upper() key_exists = key in self._record_map if not key_exists or key in ('COMMENT', 'HISTORY', 'CONTINUE'): self._record_list.append(record) index = len(self._record_list)-1 self._index_map[key] = index else: index = self._index_map[key] self._record_list[index] = record self._record_map[key] = record
[ "def", "add_record", "(", "self", ",", "record_in", ")", ":", "if", "(", "isinstance", "(", "record_in", ",", "dict", ")", "and", "'name'", "in", "record_in", "and", "'value'", "in", "record_in", ")", ":", "record", "=", "{", "}", "record", ".", "update", "(", "record_in", ")", "else", ":", "record", "=", "FITSRecord", "(", "record_in", ")", "# only append when this name already exists if it is", "# a comment or history field, otherwise simply over-write", "key", "=", "record", "[", "'name'", "]", ".", "upper", "(", ")", "key_exists", "=", "key", "in", "self", ".", "_record_map", "if", "not", "key_exists", "or", "key", "in", "(", "'COMMENT'", ",", "'HISTORY'", ",", "'CONTINUE'", ")", ":", "# append new record", "self", ".", "_record_list", ".", "append", "(", "record", ")", "index", "=", "len", "(", "self", ".", "_record_list", ")", "-", "1", "self", ".", "_index_map", "[", "key", "]", "=", "index", "else", ":", "# over-write existing", "index", "=", "self", ".", "_index_map", "[", "key", "]", "self", ".", "_record_list", "[", "index", "]", "=", "record", "self", ".", "_record_map", "[", "key", "]", "=", "record" ]
Add a new record. Strip quotes from around strings. This will over-write if the key already exists, except for COMMENT and HISTORY fields parameters ----------- record: The record, either a dict or a header card string or a FITSRecord or FITSCard convert: bool, optional If True, convert strings. E.g. '3' gets converted to 3 and "'hello'" gets converted to 'hello' and 'T'/'F' to True/False. Default is False.
[ "Add", "a", "new", "record", ".", "Strip", "quotes", "from", "around", "strings", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L133-L174
esheldon/fitsio
fitsio/header.py
FITSHDR.get_comment
def get_comment(self, item): """ Get the comment for the requested entry """ key = item.upper() if key not in self._record_map: raise KeyError("unknown record: %s" % key) if 'comment' not in self._record_map[key]: return None else: return self._record_map[key]['comment']
python
def get_comment(self, item): key = item.upper() if key not in self._record_map: raise KeyError("unknown record: %s" % key) if 'comment' not in self._record_map[key]: return None else: return self._record_map[key]['comment']
[ "def", "get_comment", "(", "self", ",", "item", ")", ":", "key", "=", "item", ".", "upper", "(", ")", "if", "key", "not", "in", "self", ".", "_record_map", ":", "raise", "KeyError", "(", "\"unknown record: %s\"", "%", "key", ")", "if", "'comment'", "not", "in", "self", ".", "_record_map", "[", "key", "]", ":", "return", "None", "else", ":", "return", "self", ".", "_record_map", "[", "key", "]", "[", "'comment'", "]" ]
Get the comment for the requested entry
[ "Get", "the", "comment", "for", "the", "requested", "entry" ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L180-L191
esheldon/fitsio
fitsio/header.py
FITSHDR.delete
def delete(self, name): """ Delete the specified entry if it exists. """ if isinstance(name, (list, tuple)): for xx in name: self.delete(xx) else: if name in self._record_map: del self._record_map[name] self._record_list = [ r for r in self._record_list if r['name'] != name]
python
def delete(self, name): if isinstance(name, (list, tuple)): for xx in name: self.delete(xx) else: if name in self._record_map: del self._record_map[name] self._record_list = [ r for r in self._record_list if r['name'] != name]
[ "def", "delete", "(", "self", ",", "name", ")", ":", "if", "isinstance", "(", "name", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "xx", "in", "name", ":", "self", ".", "delete", "(", "xx", ")", "else", ":", "if", "name", "in", "self", ".", "_record_map", ":", "del", "self", ".", "_record_map", "[", "name", "]", "self", ".", "_record_list", "=", "[", "r", "for", "r", "in", "self", ".", "_record_list", "if", "r", "[", "'name'", "]", "!=", "name", "]" ]
Delete the specified entry if it exists.
[ "Delete", "the", "specified", "entry", "if", "it", "exists", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L205-L216
esheldon/fitsio
fitsio/header.py
FITSHDR.clean
def clean(self, is_table=False): """ Remove reserved keywords from the header. These are keywords that the fits writer must write in order to maintain consistency between header and data. keywords -------- is_table: bool, optional Set True if this is a table, so extra keywords will be cleaned """ rmnames = [ 'SIMPLE', 'EXTEND', 'XTENSION', 'BITPIX', 'PCOUNT', 'GCOUNT', 'THEAP', 'EXTNAME', 'BLANK', 'ZQUANTIZ', 'ZDITHER0', 'ZIMAGE', 'ZCMPTYPE', 'ZSIMPLE', 'ZTENSION', 'ZPCOUNT', 'ZGCOUNT', 'ZBITPIX', 'ZEXTEND', # 'FZTILELN','FZALGOR', 'CHECKSUM', 'DATASUM'] if is_table: # these are not allowed in tables rmnames += [ 'BUNIT', 'BSCALE', 'BZERO', ] self.delete(rmnames) r = self._record_map.get('NAXIS', None) if r is not None: naxis = int(r['value']) self.delete('NAXIS') rmnames = ['NAXIS%d' % i for i in xrange(1, naxis+1)] self.delete(rmnames) r = self._record_map.get('ZNAXIS', None) self.delete('ZNAXIS') if r is not None: znaxis = int(r['value']) rmnames = ['ZNAXIS%d' % i for i in xrange(1, znaxis+1)] self.delete(rmnames) rmnames = ['ZTILE%d' % i for i in xrange(1, znaxis+1)] self.delete(rmnames) rmnames = ['ZNAME%d' % i for i in xrange(1, znaxis+1)] self.delete(rmnames) rmnames = ['ZVAL%d' % i for i in xrange(1, znaxis+1)] self.delete(rmnames) r = self._record_map.get('TFIELDS', None) if r is not None: tfields = int(r['value']) self.delete('TFIELDS') if tfields > 0: nbase = [ 'TFORM', 'TTYPE', 'TDIM', 'TUNIT', 'TSCAL', 'TZERO', 'TNULL', 'TDISP', 'TDMIN', 'TDMAX', 'TDESC', 'TROTA', 'TRPIX', 'TRVAL', 'TDELT', 'TCUNI', # 'FZALG' ] for i in xrange(1, tfields+1): names = ['%s%d' % (n, i) for n in nbase] self.delete(names)
python
def clean(self, is_table=False): rmnames = [ 'SIMPLE', 'EXTEND', 'XTENSION', 'BITPIX', 'PCOUNT', 'GCOUNT', 'THEAP', 'EXTNAME', 'BLANK', 'ZQUANTIZ', 'ZDITHER0', 'ZIMAGE', 'ZCMPTYPE', 'ZSIMPLE', 'ZTENSION', 'ZPCOUNT', 'ZGCOUNT', 'ZBITPIX', 'ZEXTEND', 'CHECKSUM', 'DATASUM'] if is_table: rmnames += [ 'BUNIT', 'BSCALE', 'BZERO', ] self.delete(rmnames) r = self._record_map.get('NAXIS', None) if r is not None: naxis = int(r['value']) self.delete('NAXIS') rmnames = ['NAXIS%d' % i for i in xrange(1, naxis+1)] self.delete(rmnames) r = self._record_map.get('ZNAXIS', None) self.delete('ZNAXIS') if r is not None: znaxis = int(r['value']) rmnames = ['ZNAXIS%d' % i for i in xrange(1, znaxis+1)] self.delete(rmnames) rmnames = ['ZTILE%d' % i for i in xrange(1, znaxis+1)] self.delete(rmnames) rmnames = ['ZNAME%d' % i for i in xrange(1, znaxis+1)] self.delete(rmnames) rmnames = ['ZVAL%d' % i for i in xrange(1, znaxis+1)] self.delete(rmnames) r = self._record_map.get('TFIELDS', None) if r is not None: tfields = int(r['value']) self.delete('TFIELDS') if tfields > 0: nbase = [ 'TFORM', 'TTYPE', 'TDIM', 'TUNIT', 'TSCAL', 'TZERO', 'TNULL', 'TDISP', 'TDMIN', 'TDMAX', 'TDESC', 'TROTA', 'TRPIX', 'TRVAL', 'TDELT', 'TCUNI', ] for i in xrange(1, tfields+1): names = ['%s%d' % (n, i) for n in nbase] self.delete(names)
[ "def", "clean", "(", "self", ",", "is_table", "=", "False", ")", ":", "rmnames", "=", "[", "'SIMPLE'", ",", "'EXTEND'", ",", "'XTENSION'", ",", "'BITPIX'", ",", "'PCOUNT'", ",", "'GCOUNT'", ",", "'THEAP'", ",", "'EXTNAME'", ",", "'BLANK'", ",", "'ZQUANTIZ'", ",", "'ZDITHER0'", ",", "'ZIMAGE'", ",", "'ZCMPTYPE'", ",", "'ZSIMPLE'", ",", "'ZTENSION'", ",", "'ZPCOUNT'", ",", "'ZGCOUNT'", ",", "'ZBITPIX'", ",", "'ZEXTEND'", ",", "# 'FZTILELN','FZALGOR',", "'CHECKSUM'", ",", "'DATASUM'", "]", "if", "is_table", ":", "# these are not allowed in tables", "rmnames", "+=", "[", "'BUNIT'", ",", "'BSCALE'", ",", "'BZERO'", ",", "]", "self", ".", "delete", "(", "rmnames", ")", "r", "=", "self", ".", "_record_map", ".", "get", "(", "'NAXIS'", ",", "None", ")", "if", "r", "is", "not", "None", ":", "naxis", "=", "int", "(", "r", "[", "'value'", "]", ")", "self", ".", "delete", "(", "'NAXIS'", ")", "rmnames", "=", "[", "'NAXIS%d'", "%", "i", "for", "i", "in", "xrange", "(", "1", ",", "naxis", "+", "1", ")", "]", "self", ".", "delete", "(", "rmnames", ")", "r", "=", "self", ".", "_record_map", ".", "get", "(", "'ZNAXIS'", ",", "None", ")", "self", ".", "delete", "(", "'ZNAXIS'", ")", "if", "r", "is", "not", "None", ":", "znaxis", "=", "int", "(", "r", "[", "'value'", "]", ")", "rmnames", "=", "[", "'ZNAXIS%d'", "%", "i", "for", "i", "in", "xrange", "(", "1", ",", "znaxis", "+", "1", ")", "]", "self", ".", "delete", "(", "rmnames", ")", "rmnames", "=", "[", "'ZTILE%d'", "%", "i", "for", "i", "in", "xrange", "(", "1", ",", "znaxis", "+", "1", ")", "]", "self", ".", "delete", "(", "rmnames", ")", "rmnames", "=", "[", "'ZNAME%d'", "%", "i", "for", "i", "in", "xrange", "(", "1", ",", "znaxis", "+", "1", ")", "]", "self", ".", "delete", "(", "rmnames", ")", "rmnames", "=", "[", "'ZVAL%d'", "%", "i", "for", "i", "in", "xrange", "(", "1", ",", "znaxis", "+", "1", ")", "]", "self", ".", "delete", "(", "rmnames", ")", "r", "=", "self", ".", "_record_map", ".", "get", "(", "'TFIELDS'", ",", "None", ")", "if", "r", "is", "not", "None", ":", "tfields", "=", "int", "(", "r", "[", "'value'", "]", ")", "self", ".", "delete", "(", "'TFIELDS'", ")", "if", "tfields", ">", "0", ":", "nbase", "=", "[", "'TFORM'", ",", "'TTYPE'", ",", "'TDIM'", ",", "'TUNIT'", ",", "'TSCAL'", ",", "'TZERO'", ",", "'TNULL'", ",", "'TDISP'", ",", "'TDMIN'", ",", "'TDMAX'", ",", "'TDESC'", ",", "'TROTA'", ",", "'TRPIX'", ",", "'TRVAL'", ",", "'TDELT'", ",", "'TCUNI'", ",", "# 'FZALG'", "]", "for", "i", "in", "xrange", "(", "1", ",", "tfields", "+", "1", ")", ":", "names", "=", "[", "'%s%d'", "%", "(", "n", ",", "i", ")", "for", "n", "in", "nbase", "]", "self", ".", "delete", "(", "names", ")" ]
Remove reserved keywords from the header. These are keywords that the fits writer must write in order to maintain consistency between header and data. keywords -------- is_table: bool, optional Set True if this is a table, so extra keywords will be cleaned
[ "Remove", "reserved", "keywords", "from", "the", "header", "." ]
train
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/header.py#L218-L288