id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
4,500 | fhs/pyhdf | pyhdf/V.py | VG.tagrefs | def tagrefs(self):
"""Get the tags and reference numbers of all the vgroup
members.
Args::
no argument
Returns::
list of (tag,ref) tuples, one for each vgroup member
C library equivalent : Vgettagrefs
"""
n = self._nmembers
ret = []
if n:
tags = _C.array_int32(n)
refs = _C.array_int32(n)
k = _C.Vgettagrefs(self._id, tags, refs, n)
_checkErr('tagrefs', k, "error getting tags and refs")
for m in xrange(k):
ret.append((tags[m], refs[m]))
return ret | python | def tagrefs(self):
"""Get the tags and reference numbers of all the vgroup
members.
Args::
no argument
Returns::
list of (tag,ref) tuples, one for each vgroup member
C library equivalent : Vgettagrefs
"""
n = self._nmembers
ret = []
if n:
tags = _C.array_int32(n)
refs = _C.array_int32(n)
k = _C.Vgettagrefs(self._id, tags, refs, n)
_checkErr('tagrefs', k, "error getting tags and refs")
for m in xrange(k):
ret.append((tags[m], refs[m]))
return ret | [
"def",
"tagrefs",
"(",
"self",
")",
":",
"n",
"=",
"self",
".",
"_nmembers",
"ret",
"=",
"[",
"]",
"if",
"n",
":",
"tags",
"=",
"_C",
".",
"array_int32",
"(",
"n",
")",
"refs",
"=",
"_C",
".",
"array_int32",
"(",
"n",
")",
"k",
"=",
"_C",
".",
"Vgettagrefs",
"(",
"self",
".",
"_id",
",",
"tags",
",",
"refs",
",",
"n",
")",
"_checkErr",
"(",
"'tagrefs'",
",",
"k",
",",
"\"error getting tags and refs\"",
")",
"for",
"m",
"in",
"xrange",
"(",
"k",
")",
":",
"ret",
".",
"append",
"(",
"(",
"tags",
"[",
"m",
"]",
",",
"refs",
"[",
"m",
"]",
")",
")",
"return",
"ret"
] | Get the tags and reference numbers of all the vgroup
members.
Args::
no argument
Returns::
list of (tag,ref) tuples, one for each vgroup member
C library equivalent : Vgettagrefs | [
"Get",
"the",
"tags",
"and",
"reference",
"numbers",
"of",
"all",
"the",
"vgroup",
"members",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L1100-L1124 |
4,501 | fhs/pyhdf | pyhdf/V.py | VG.inqtagref | def inqtagref(self, tag, ref):
"""Determines if an object identified by its tag and reference
number belongs to the vgroup.
Args::
tag tag of the object to check
ref reference number of the object to check
Returns::
False (0) if the object does not belong to the vgroup,
True (1) otherwise
C library equivalent : Vinqtagref
"""
return _C.Vinqtagref(self._id, tag, ref) | python | def inqtagref(self, tag, ref):
"""Determines if an object identified by its tag and reference
number belongs to the vgroup.
Args::
tag tag of the object to check
ref reference number of the object to check
Returns::
False (0) if the object does not belong to the vgroup,
True (1) otherwise
C library equivalent : Vinqtagref
"""
return _C.Vinqtagref(self._id, tag, ref) | [
"def",
"inqtagref",
"(",
"self",
",",
"tag",
",",
"ref",
")",
":",
"return",
"_C",
".",
"Vinqtagref",
"(",
"self",
".",
"_id",
",",
"tag",
",",
"ref",
")"
] | Determines if an object identified by its tag and reference
number belongs to the vgroup.
Args::
tag tag of the object to check
ref reference number of the object to check
Returns::
False (0) if the object does not belong to the vgroup,
True (1) otherwise
C library equivalent : Vinqtagref | [
"Determines",
"if",
"an",
"object",
"identified",
"by",
"its",
"tag",
"and",
"reference",
"number",
"belongs",
"to",
"the",
"vgroup",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L1126-L1143 |
4,502 | fhs/pyhdf | pyhdf/V.py | VG.nrefs | def nrefs(self, tag):
"""Determine the number of tags of a given type in a vgroup.
Args::
tag tag type to look for in the vgroup
Returns::
number of members identified by this tag type
C library equivalent : Vnrefs
"""
n = _C.Vnrefs(self._id, tag)
_checkErr('nrefs', n, "bad arguments")
return n | python | def nrefs(self, tag):
"""Determine the number of tags of a given type in a vgroup.
Args::
tag tag type to look for in the vgroup
Returns::
number of members identified by this tag type
C library equivalent : Vnrefs
"""
n = _C.Vnrefs(self._id, tag)
_checkErr('nrefs', n, "bad arguments")
return n | [
"def",
"nrefs",
"(",
"self",
",",
"tag",
")",
":",
"n",
"=",
"_C",
".",
"Vnrefs",
"(",
"self",
".",
"_id",
",",
"tag",
")",
"_checkErr",
"(",
"'nrefs'",
",",
"n",
",",
"\"bad arguments\"",
")",
"return",
"n"
] | Determine the number of tags of a given type in a vgroup.
Args::
tag tag type to look for in the vgroup
Returns::
number of members identified by this tag type
C library equivalent : Vnrefs | [
"Determine",
"the",
"number",
"of",
"tags",
"of",
"a",
"given",
"type",
"in",
"a",
"vgroup",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L1145-L1161 |
4,503 | fhs/pyhdf | pyhdf/V.py | VG.attrinfo | def attrinfo(self):
"""Return info about all the vgroup attributes.
Args::
no argument
Returns::
dictionnary describing each vgroup attribute; for each attribute,
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
- attribute data type (one of HC.xxx constants)
- attribute order
- attribute value
- attribute size in bytes
C library equivalent : no equivalent
"""
dic = {}
for n in range(self._nattrs):
att = self.attr(n)
name, type, order, size = att.info()
dic[name] = (type, order, att.get(), size)
return dic | python | def attrinfo(self):
"""Return info about all the vgroup attributes.
Args::
no argument
Returns::
dictionnary describing each vgroup attribute; for each attribute,
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
- attribute data type (one of HC.xxx constants)
- attribute order
- attribute value
- attribute size in bytes
C library equivalent : no equivalent
"""
dic = {}
for n in range(self._nattrs):
att = self.attr(n)
name, type, order, size = att.info()
dic[name] = (type, order, att.get(), size)
return dic | [
"def",
"attrinfo",
"(",
"self",
")",
":",
"dic",
"=",
"{",
"}",
"for",
"n",
"in",
"range",
"(",
"self",
".",
"_nattrs",
")",
":",
"att",
"=",
"self",
".",
"attr",
"(",
"n",
")",
"name",
",",
"type",
",",
"order",
",",
"size",
"=",
"att",
".",
"info",
"(",
")",
"dic",
"[",
"name",
"]",
"=",
"(",
"type",
",",
"order",
",",
"att",
".",
"get",
"(",
")",
",",
"size",
")",
"return",
"dic"
] | Return info about all the vgroup attributes.
Args::
no argument
Returns::
dictionnary describing each vgroup attribute; for each attribute,
a (name,data) pair is added to the dictionary, where 'data' is
a tuple holding:
- attribute data type (one of HC.xxx constants)
- attribute order
- attribute value
- attribute size in bytes
C library equivalent : no equivalent | [
"Return",
"info",
"about",
"all",
"the",
"vgroup",
"attributes",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L1218-L1245 |
4,504 | fhs/pyhdf | pyhdf/V.py | VG.findattr | def findattr(self, name):
"""Search the vgroup for a given attribute.
Args::
name attribute name
Returns::
if found, VGAttr instance describing the attribute
None otherwise
C library equivalent : Vfindattr
"""
try:
att = self.attr(name)
if att._index is None:
att = None
except HDF4Error:
att = None
return att | python | def findattr(self, name):
"""Search the vgroup for a given attribute.
Args::
name attribute name
Returns::
if found, VGAttr instance describing the attribute
None otherwise
C library equivalent : Vfindattr
"""
try:
att = self.attr(name)
if att._index is None:
att = None
except HDF4Error:
att = None
return att | [
"def",
"findattr",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"att",
"=",
"self",
".",
"attr",
"(",
"name",
")",
"if",
"att",
".",
"_index",
"is",
"None",
":",
"att",
"=",
"None",
"except",
"HDF4Error",
":",
"att",
"=",
"None",
"return",
"att"
] | Search the vgroup for a given attribute.
Args::
name attribute name
Returns::
if found, VGAttr instance describing the attribute
None otherwise
C library equivalent : Vfindattr | [
"Search",
"the",
"vgroup",
"for",
"a",
"given",
"attribute",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L1248-L1269 |
4,505 | fhs/pyhdf | pyhdf/SD.py | SDAttr.index | def index(self):
"""Retrieve the attribute index number.
Args::
no argument
Returns::
attribute index number (starting at 0)
C library equivalent : SDfindattr
"""
self._index = _C.SDfindattr(self._obj._id, self._name)
_checkErr('find', self._index, 'illegal attribute name')
return self._index | python | def index(self):
"""Retrieve the attribute index number.
Args::
no argument
Returns::
attribute index number (starting at 0)
C library equivalent : SDfindattr
"""
self._index = _C.SDfindattr(self._obj._id, self._name)
_checkErr('find', self._index, 'illegal attribute name')
return self._index | [
"def",
"index",
"(",
"self",
")",
":",
"self",
".",
"_index",
"=",
"_C",
".",
"SDfindattr",
"(",
"self",
".",
"_obj",
".",
"_id",
",",
"self",
".",
"_name",
")",
"_checkErr",
"(",
"'find'",
",",
"self",
".",
"_index",
",",
"'illegal attribute name'",
")",
"return",
"self",
".",
"_index"
] | Retrieve the attribute index number.
Args::
no argument
Returns::
attribute index number (starting at 0)
C library equivalent : SDfindattr | [
"Retrieve",
"the",
"attribute",
"index",
"number",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1178-L1194 |
4,506 | fhs/pyhdf | pyhdf/SD.py | SD.end | def end(self):
"""End access to the SD interface and close the HDF file.
Args::
no argument
Returns::
None
The instance should not be used afterwards.
The 'end()' method is implicitly called when the
SD instance is deleted.
C library equivalent : SDend
"""
status = _C.SDend(self._id)
_checkErr('end', status, "cannot execute")
self._id = None | python | def end(self):
"""End access to the SD interface and close the HDF file.
Args::
no argument
Returns::
None
The instance should not be used afterwards.
The 'end()' method is implicitly called when the
SD instance is deleted.
C library equivalent : SDend
"""
status = _C.SDend(self._id)
_checkErr('end', status, "cannot execute")
self._id = None | [
"def",
"end",
"(",
"self",
")",
":",
"status",
"=",
"_C",
".",
"SDend",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'end'",
",",
"status",
",",
"\"cannot execute\"",
")",
"self",
".",
"_id",
"=",
"None"
] | End access to the SD interface and close the HDF file.
Args::
no argument
Returns::
None
The instance should not be used afterwards.
The 'end()' method is implicitly called when the
SD instance is deleted.
C library equivalent : SDend | [
"End",
"access",
"to",
"the",
"SD",
"interface",
"and",
"close",
"the",
"HDF",
"file",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1457-L1477 |
4,507 | fhs/pyhdf | pyhdf/SD.py | SD.info | def info(self):
"""Retrieve information about the SD interface.
Args::
no argument
Returns::
2-element tuple holding:
number of datasets inside the file
number of file attributes
C library equivalent : SDfileinfo
"""
status, n_datasets, n_file_attrs = _C.SDfileinfo(self._id)
_checkErr('info', status, "cannot execute")
return n_datasets, n_file_attrs | python | def info(self):
"""Retrieve information about the SD interface.
Args::
no argument
Returns::
2-element tuple holding:
number of datasets inside the file
number of file attributes
C library equivalent : SDfileinfo
"""
status, n_datasets, n_file_attrs = _C.SDfileinfo(self._id)
_checkErr('info', status, "cannot execute")
return n_datasets, n_file_attrs | [
"def",
"info",
"(",
"self",
")",
":",
"status",
",",
"n_datasets",
",",
"n_file_attrs",
"=",
"_C",
".",
"SDfileinfo",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'info'",
",",
"status",
",",
"\"cannot execute\"",
")",
"return",
"n_datasets",
",",
"n_file_attrs"
] | Retrieve information about the SD interface.
Args::
no argument
Returns::
2-element tuple holding:
number of datasets inside the file
number of file attributes
C library equivalent : SDfileinfo | [
"Retrieve",
"information",
"about",
"the",
"SD",
"interface",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1479-L1497 |
4,508 | fhs/pyhdf | pyhdf/SD.py | SD.nametoindex | def nametoindex(self, sds_name):
"""Return the index number of a dataset given the dataset name.
Args::
sds_name : dataset name
Returns::
index number of the dataset
C library equivalent : SDnametoindex
"""
sds_idx = _C.SDnametoindex(self._id, sds_name)
_checkErr('nametoindex', sds_idx, 'non existent SDS')
return sds_idx | python | def nametoindex(self, sds_name):
"""Return the index number of a dataset given the dataset name.
Args::
sds_name : dataset name
Returns::
index number of the dataset
C library equivalent : SDnametoindex
"""
sds_idx = _C.SDnametoindex(self._id, sds_name)
_checkErr('nametoindex', sds_idx, 'non existent SDS')
return sds_idx | [
"def",
"nametoindex",
"(",
"self",
",",
"sds_name",
")",
":",
"sds_idx",
"=",
"_C",
".",
"SDnametoindex",
"(",
"self",
".",
"_id",
",",
"sds_name",
")",
"_checkErr",
"(",
"'nametoindex'",
",",
"sds_idx",
",",
"'non existent SDS'",
")",
"return",
"sds_idx"
] | Return the index number of a dataset given the dataset name.
Args::
sds_name : dataset name
Returns::
index number of the dataset
C library equivalent : SDnametoindex | [
"Return",
"the",
"index",
"number",
"of",
"a",
"dataset",
"given",
"the",
"dataset",
"name",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1499-L1515 |
4,509 | fhs/pyhdf | pyhdf/SD.py | SD.reftoindex | def reftoindex(self, sds_ref):
"""Returns the index number of a dataset given the dataset
reference number.
Args::
sds_ref : dataset reference number
Returns::
dataset index number
C library equivalent : SDreftoindex
"""
sds_idx = _C.SDreftoindex(self._id, sds_ref)
_checkErr('reftoindex', sds_idx, 'illegal SDS ref number')
return sds_idx | python | def reftoindex(self, sds_ref):
"""Returns the index number of a dataset given the dataset
reference number.
Args::
sds_ref : dataset reference number
Returns::
dataset index number
C library equivalent : SDreftoindex
"""
sds_idx = _C.SDreftoindex(self._id, sds_ref)
_checkErr('reftoindex', sds_idx, 'illegal SDS ref number')
return sds_idx | [
"def",
"reftoindex",
"(",
"self",
",",
"sds_ref",
")",
":",
"sds_idx",
"=",
"_C",
".",
"SDreftoindex",
"(",
"self",
".",
"_id",
",",
"sds_ref",
")",
"_checkErr",
"(",
"'reftoindex'",
",",
"sds_idx",
",",
"'illegal SDS ref number'",
")",
"return",
"sds_idx"
] | Returns the index number of a dataset given the dataset
reference number.
Args::
sds_ref : dataset reference number
Returns::
dataset index number
C library equivalent : SDreftoindex | [
"Returns",
"the",
"index",
"number",
"of",
"a",
"dataset",
"given",
"the",
"dataset",
"reference",
"number",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1517-L1534 |
4,510 | fhs/pyhdf | pyhdf/SD.py | SD.setfillmode | def setfillmode(self, fill_mode):
"""Set the fill mode for all the datasets in the file.
Args::
fill_mode : fill mode; one of :
SDC.FILL write the fill value to all the datasets
of the file by default
SDC.NOFILL do not write fill values to all datasets
of the file by default
Returns::
previous fill mode value
C library equivalent: SDsetfillmode
"""
if not fill_mode in [SDC.FILL, SDC.NOFILL]:
raise HDF4Error("bad fill mode")
old_mode = _C.SDsetfillmode(self._id, fill_mode)
_checkErr('setfillmode', old_mode, 'cannot execute')
return old_mode | python | def setfillmode(self, fill_mode):
"""Set the fill mode for all the datasets in the file.
Args::
fill_mode : fill mode; one of :
SDC.FILL write the fill value to all the datasets
of the file by default
SDC.NOFILL do not write fill values to all datasets
of the file by default
Returns::
previous fill mode value
C library equivalent: SDsetfillmode
"""
if not fill_mode in [SDC.FILL, SDC.NOFILL]:
raise HDF4Error("bad fill mode")
old_mode = _C.SDsetfillmode(self._id, fill_mode)
_checkErr('setfillmode', old_mode, 'cannot execute')
return old_mode | [
"def",
"setfillmode",
"(",
"self",
",",
"fill_mode",
")",
":",
"if",
"not",
"fill_mode",
"in",
"[",
"SDC",
".",
"FILL",
",",
"SDC",
".",
"NOFILL",
"]",
":",
"raise",
"HDF4Error",
"(",
"\"bad fill mode\"",
")",
"old_mode",
"=",
"_C",
".",
"SDsetfillmode",
"(",
"self",
".",
"_id",
",",
"fill_mode",
")",
"_checkErr",
"(",
"'setfillmode'",
",",
"old_mode",
",",
"'cannot execute'",
")",
"return",
"old_mode"
] | Set the fill mode for all the datasets in the file.
Args::
fill_mode : fill mode; one of :
SDC.FILL write the fill value to all the datasets
of the file by default
SDC.NOFILL do not write fill values to all datasets
of the file by default
Returns::
previous fill mode value
C library equivalent: SDsetfillmode | [
"Set",
"the",
"fill",
"mode",
"for",
"all",
"the",
"datasets",
"in",
"the",
"file",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1536-L1558 |
4,511 | fhs/pyhdf | pyhdf/SD.py | SD.select | def select(self, name_or_index):
"""Locate a dataset.
Args::
name_or_index dataset name or index number
Returns::
SDS instance for the dataset
C library equivalent : SDselect
"""
if isinstance(name_or_index, type(1)):
idx = name_or_index
else:
try:
idx = self.nametoindex(name_or_index)
except HDF4Error:
raise HDF4Error("select: non-existent dataset")
id = _C.SDselect(self._id, idx)
_checkErr('select', id, "cannot execute")
return SDS(self, id) | python | def select(self, name_or_index):
"""Locate a dataset.
Args::
name_or_index dataset name or index number
Returns::
SDS instance for the dataset
C library equivalent : SDselect
"""
if isinstance(name_or_index, type(1)):
idx = name_or_index
else:
try:
idx = self.nametoindex(name_or_index)
except HDF4Error:
raise HDF4Error("select: non-existent dataset")
id = _C.SDselect(self._id, idx)
_checkErr('select', id, "cannot execute")
return SDS(self, id) | [
"def",
"select",
"(",
"self",
",",
"name_or_index",
")",
":",
"if",
"isinstance",
"(",
"name_or_index",
",",
"type",
"(",
"1",
")",
")",
":",
"idx",
"=",
"name_or_index",
"else",
":",
"try",
":",
"idx",
"=",
"self",
".",
"nametoindex",
"(",
"name_or_index",
")",
"except",
"HDF4Error",
":",
"raise",
"HDF4Error",
"(",
"\"select: non-existent dataset\"",
")",
"id",
"=",
"_C",
".",
"SDselect",
"(",
"self",
".",
"_id",
",",
"idx",
")",
"_checkErr",
"(",
"'select'",
",",
"id",
",",
"\"cannot execute\"",
")",
"return",
"SDS",
"(",
"self",
",",
"id",
")"
] | Locate a dataset.
Args::
name_or_index dataset name or index number
Returns::
SDS instance for the dataset
C library equivalent : SDselect | [
"Locate",
"a",
"dataset",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1603-L1626 |
4,512 | fhs/pyhdf | pyhdf/SD.py | SD.attributes | def attributes(self, full=0):
"""Return a dictionnary describing every global
attribute attached to the SD interface.
Args::
full true to get complete info about each attribute
false to report only each attribute value
Returns::
Empty dictionnary if no global attribute defined
Otherwise, dictionnary where each key is the name of a
global attribute. If parameter 'full' is false,
key value is the attribute value. If 'full' is true,
key value is a tuple with the following elements:
- attribute value
- attribute index number
- attribute type
- attribute length
C library equivalent : no equivalent
"""
# Get the number of global attributes.
nsds, natts = self.info()
# Inquire each attribute
res = {}
for n in range(natts):
a = self.attr(n)
name, aType, nVal = a.info()
if full:
res[name] = (a.get(), a.index(), aType, nVal)
else:
res[name] = a.get()
return res | python | def attributes(self, full=0):
"""Return a dictionnary describing every global
attribute attached to the SD interface.
Args::
full true to get complete info about each attribute
false to report only each attribute value
Returns::
Empty dictionnary if no global attribute defined
Otherwise, dictionnary where each key is the name of a
global attribute. If parameter 'full' is false,
key value is the attribute value. If 'full' is true,
key value is a tuple with the following elements:
- attribute value
- attribute index number
- attribute type
- attribute length
C library equivalent : no equivalent
"""
# Get the number of global attributes.
nsds, natts = self.info()
# Inquire each attribute
res = {}
for n in range(natts):
a = self.attr(n)
name, aType, nVal = a.info()
if full:
res[name] = (a.get(), a.index(), aType, nVal)
else:
res[name] = a.get()
return res | [
"def",
"attributes",
"(",
"self",
",",
"full",
"=",
"0",
")",
":",
"# Get the number of global attributes.",
"nsds",
",",
"natts",
"=",
"self",
".",
"info",
"(",
")",
"# Inquire each attribute",
"res",
"=",
"{",
"}",
"for",
"n",
"in",
"range",
"(",
"natts",
")",
":",
"a",
"=",
"self",
".",
"attr",
"(",
"n",
")",
"name",
",",
"aType",
",",
"nVal",
"=",
"a",
".",
"info",
"(",
")",
"if",
"full",
":",
"res",
"[",
"name",
"]",
"=",
"(",
"a",
".",
"get",
"(",
")",
",",
"a",
".",
"index",
"(",
")",
",",
"aType",
",",
"nVal",
")",
"else",
":",
"res",
"[",
"name",
"]",
"=",
"a",
".",
"get",
"(",
")",
"return",
"res"
] | Return a dictionnary describing every global
attribute attached to the SD interface.
Args::
full true to get complete info about each attribute
false to report only each attribute value
Returns::
Empty dictionnary if no global attribute defined
Otherwise, dictionnary where each key is the name of a
global attribute. If parameter 'full' is false,
key value is the attribute value. If 'full' is true,
key value is a tuple with the following elements:
- attribute value
- attribute index number
- attribute type
- attribute length
C library equivalent : no equivalent | [
"Return",
"a",
"dictionnary",
"describing",
"every",
"global",
"attribute",
"attached",
"to",
"the",
"SD",
"interface",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1651-L1689 |
4,513 | fhs/pyhdf | pyhdf/SD.py | SD.datasets | def datasets(self):
"""Return a dictionnary describing all the file datasets.
Args::
no argument
Returns::
Empty dictionnary if no dataset is defined.
Otherwise, dictionnary whose keys are the file dataset names,
and values are tuples describing the corresponding datasets.
Each tuple holds the following elements in order:
- tuple holding the names of the dimensions defining the
dataset coordinate axes
- tuple holding the dataset shape (dimension lengths);
if a dimension is unlimited, the reported length corresponds
to the dimension current length
- dataset type
- dataset index number
C library equivalent : no equivalent
"""
# Get number of datasets
nDs = self.info()[0]
# Inquire each var
res = {}
for n in range(nDs):
# Get dataset info.
v = self.select(n)
vName, vRank, vLen, vType, vAtt = v.info()
if vRank < 2: # need a sequence
vLen = [vLen]
# Get dimension info.
dimNames = []
dimLengths = []
for dimNum in range(vRank):
d = v.dim(dimNum)
dimNames.append(d.info()[0])
dimLengths.append(vLen[dimNum])
res[vName] = (tuple(dimNames), tuple(dimLengths),
vType, n)
return res | python | def datasets(self):
"""Return a dictionnary describing all the file datasets.
Args::
no argument
Returns::
Empty dictionnary if no dataset is defined.
Otherwise, dictionnary whose keys are the file dataset names,
and values are tuples describing the corresponding datasets.
Each tuple holds the following elements in order:
- tuple holding the names of the dimensions defining the
dataset coordinate axes
- tuple holding the dataset shape (dimension lengths);
if a dimension is unlimited, the reported length corresponds
to the dimension current length
- dataset type
- dataset index number
C library equivalent : no equivalent
"""
# Get number of datasets
nDs = self.info()[0]
# Inquire each var
res = {}
for n in range(nDs):
# Get dataset info.
v = self.select(n)
vName, vRank, vLen, vType, vAtt = v.info()
if vRank < 2: # need a sequence
vLen = [vLen]
# Get dimension info.
dimNames = []
dimLengths = []
for dimNum in range(vRank):
d = v.dim(dimNum)
dimNames.append(d.info()[0])
dimLengths.append(vLen[dimNum])
res[vName] = (tuple(dimNames), tuple(dimLengths),
vType, n)
return res | [
"def",
"datasets",
"(",
"self",
")",
":",
"# Get number of datasets",
"nDs",
"=",
"self",
".",
"info",
"(",
")",
"[",
"0",
"]",
"# Inquire each var",
"res",
"=",
"{",
"}",
"for",
"n",
"in",
"range",
"(",
"nDs",
")",
":",
"# Get dataset info.",
"v",
"=",
"self",
".",
"select",
"(",
"n",
")",
"vName",
",",
"vRank",
",",
"vLen",
",",
"vType",
",",
"vAtt",
"=",
"v",
".",
"info",
"(",
")",
"if",
"vRank",
"<",
"2",
":",
"# need a sequence",
"vLen",
"=",
"[",
"vLen",
"]",
"# Get dimension info.",
"dimNames",
"=",
"[",
"]",
"dimLengths",
"=",
"[",
"]",
"for",
"dimNum",
"in",
"range",
"(",
"vRank",
")",
":",
"d",
"=",
"v",
".",
"dim",
"(",
"dimNum",
")",
"dimNames",
".",
"append",
"(",
"d",
".",
"info",
"(",
")",
"[",
"0",
"]",
")",
"dimLengths",
".",
"append",
"(",
"vLen",
"[",
"dimNum",
"]",
")",
"res",
"[",
"vName",
"]",
"=",
"(",
"tuple",
"(",
"dimNames",
")",
",",
"tuple",
"(",
"dimLengths",
")",
",",
"vType",
",",
"n",
")",
"return",
"res"
] | Return a dictionnary describing all the file datasets.
Args::
no argument
Returns::
Empty dictionnary if no dataset is defined.
Otherwise, dictionnary whose keys are the file dataset names,
and values are tuples describing the corresponding datasets.
Each tuple holds the following elements in order:
- tuple holding the names of the dimensions defining the
dataset coordinate axes
- tuple holding the dataset shape (dimension lengths);
if a dimension is unlimited, the reported length corresponds
to the dimension current length
- dataset type
- dataset index number
C library equivalent : no equivalent | [
"Return",
"a",
"dictionnary",
"describing",
"all",
"the",
"file",
"datasets",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1691-L1736 |
4,514 | fhs/pyhdf | pyhdf/SD.py | SDS.endaccess | def endaccess(self):
"""Terminates access to the SDS.
Args::
no argument
Returns::
None.
The SDS instance should not be used afterwards.
The 'endaccess()' method is implicitly called when
the SDS instance is deleted.
C library equivalent : SDendaccess
"""
status = _C.SDendaccess(self._id)
_checkErr('endaccess', status, "cannot execute")
self._id = None | python | def endaccess(self):
"""Terminates access to the SDS.
Args::
no argument
Returns::
None.
The SDS instance should not be used afterwards.
The 'endaccess()' method is implicitly called when
the SDS instance is deleted.
C library equivalent : SDendaccess
"""
status = _C.SDendaccess(self._id)
_checkErr('endaccess', status, "cannot execute")
self._id = None | [
"def",
"endaccess",
"(",
"self",
")",
":",
"status",
"=",
"_C",
".",
"SDendaccess",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'endaccess'",
",",
"status",
",",
"\"cannot execute\"",
")",
"self",
".",
"_id",
"=",
"None"
] | Terminates access to the SDS.
Args::
no argument
Returns::
None.
The SDS instance should not be used afterwards.
The 'endaccess()' method is implicitly called when
the SDS instance is deleted.
C library equivalent : SDendaccess | [
"Terminates",
"access",
"to",
"the",
"SDS",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1817-L1837 |
4,515 | fhs/pyhdf | pyhdf/SD.py | SDS.dim | def dim(self, dim_index):
"""Get an SDim instance given a dimension index number.
Args::
dim_index index number of the dimension (numbering starts at 0)
C library equivalent : SDgetdimid
"""
id = _C.SDgetdimid(self._id, dim_index)
_checkErr('dim', id, 'invalid SDS identifier or dimension index')
return SDim(self, id, dim_index) | python | def dim(self, dim_index):
"""Get an SDim instance given a dimension index number.
Args::
dim_index index number of the dimension (numbering starts at 0)
C library equivalent : SDgetdimid
"""
id = _C.SDgetdimid(self._id, dim_index)
_checkErr('dim', id, 'invalid SDS identifier or dimension index')
return SDim(self, id, dim_index) | [
"def",
"dim",
"(",
"self",
",",
"dim_index",
")",
":",
"id",
"=",
"_C",
".",
"SDgetdimid",
"(",
"self",
".",
"_id",
",",
"dim_index",
")",
"_checkErr",
"(",
"'dim'",
",",
"id",
",",
"'invalid SDS identifier or dimension index'",
")",
"return",
"SDim",
"(",
"self",
",",
"id",
",",
"dim_index",
")"
] | Get an SDim instance given a dimension index number.
Args::
dim_index index number of the dimension (numbering starts at 0)
C library equivalent : SDgetdimid | [
"Get",
"an",
"SDim",
"instance",
"given",
"a",
"dimension",
"index",
"number",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1840-L1851 |
4,516 | fhs/pyhdf | pyhdf/SD.py | SDS.get | def get(self, start=None, count=None, stride=None):
"""Read data from the dataset.
Args::
start : indices where to start reading in the data array;
default to 0 on all dimensions
count : number of values to read along each dimension;
default to the current length of all dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to read the whole dataset contents, one should
simply call the method with no argument.
Returns::
numpy array initialized with the data.
C library equivalent : SDreaddata
The dataset can also be read using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access".
"""
# Obtain SDS info.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
if isinstance(dim_sizes, type(1)):
dim_sizes = [dim_sizes]
except HDF4Error:
raise HDF4Error('get : cannot execute')
# Validate args.
if start is None:
start = [0] * rank
elif isinstance(start, type(1)):
start = [start]
if count is None:
count = dim_sizes
if count[0] == 0:
count[0] = 1
elif isinstance(count, type(1)):
count = [count]
if stride is None:
stride = [1] * rank
elif isinstance(stride, type(1)):
stride = [stride]
if len(start) != rank or len(count) != rank or len(stride) != rank:
raise HDF4Error('get : start, stride or count ' \
'do not match SDS rank')
for n in range(rank):
if start[n] < 0 or start[n] + \
(abs(count[n]) - 1) * stride[n] >= dim_sizes[n]:
raise HDF4Error('get arguments violate ' \
'the size (%d) of dimension %d' \
% (dim_sizes[n], n))
if not data_type in SDC.equivNumericTypes:
raise HDF4Error('get cannot currrently deal with '\
'the SDS data type')
return _C._SDreaddata_0(self._id, data_type, start, count, stride) | python | def get(self, start=None, count=None, stride=None):
"""Read data from the dataset.
Args::
start : indices where to start reading in the data array;
default to 0 on all dimensions
count : number of values to read along each dimension;
default to the current length of all dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to read the whole dataset contents, one should
simply call the method with no argument.
Returns::
numpy array initialized with the data.
C library equivalent : SDreaddata
The dataset can also be read using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access".
"""
# Obtain SDS info.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
if isinstance(dim_sizes, type(1)):
dim_sizes = [dim_sizes]
except HDF4Error:
raise HDF4Error('get : cannot execute')
# Validate args.
if start is None:
start = [0] * rank
elif isinstance(start, type(1)):
start = [start]
if count is None:
count = dim_sizes
if count[0] == 0:
count[0] = 1
elif isinstance(count, type(1)):
count = [count]
if stride is None:
stride = [1] * rank
elif isinstance(stride, type(1)):
stride = [stride]
if len(start) != rank or len(count) != rank or len(stride) != rank:
raise HDF4Error('get : start, stride or count ' \
'do not match SDS rank')
for n in range(rank):
if start[n] < 0 or start[n] + \
(abs(count[n]) - 1) * stride[n] >= dim_sizes[n]:
raise HDF4Error('get arguments violate ' \
'the size (%d) of dimension %d' \
% (dim_sizes[n], n))
if not data_type in SDC.equivNumericTypes:
raise HDF4Error('get cannot currrently deal with '\
'the SDS data type')
return _C._SDreaddata_0(self._id, data_type, start, count, stride) | [
"def",
"get",
"(",
"self",
",",
"start",
"=",
"None",
",",
"count",
"=",
"None",
",",
"stride",
"=",
"None",
")",
":",
"# Obtain SDS info.",
"try",
":",
"sds_name",
",",
"rank",
",",
"dim_sizes",
",",
"data_type",
",",
"n_attrs",
"=",
"self",
".",
"info",
"(",
")",
"if",
"isinstance",
"(",
"dim_sizes",
",",
"type",
"(",
"1",
")",
")",
":",
"dim_sizes",
"=",
"[",
"dim_sizes",
"]",
"except",
"HDF4Error",
":",
"raise",
"HDF4Error",
"(",
"'get : cannot execute'",
")",
"# Validate args.",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"[",
"0",
"]",
"*",
"rank",
"elif",
"isinstance",
"(",
"start",
",",
"type",
"(",
"1",
")",
")",
":",
"start",
"=",
"[",
"start",
"]",
"if",
"count",
"is",
"None",
":",
"count",
"=",
"dim_sizes",
"if",
"count",
"[",
"0",
"]",
"==",
"0",
":",
"count",
"[",
"0",
"]",
"=",
"1",
"elif",
"isinstance",
"(",
"count",
",",
"type",
"(",
"1",
")",
")",
":",
"count",
"=",
"[",
"count",
"]",
"if",
"stride",
"is",
"None",
":",
"stride",
"=",
"[",
"1",
"]",
"*",
"rank",
"elif",
"isinstance",
"(",
"stride",
",",
"type",
"(",
"1",
")",
")",
":",
"stride",
"=",
"[",
"stride",
"]",
"if",
"len",
"(",
"start",
")",
"!=",
"rank",
"or",
"len",
"(",
"count",
")",
"!=",
"rank",
"or",
"len",
"(",
"stride",
")",
"!=",
"rank",
":",
"raise",
"HDF4Error",
"(",
"'get : start, stride or count '",
"'do not match SDS rank'",
")",
"for",
"n",
"in",
"range",
"(",
"rank",
")",
":",
"if",
"start",
"[",
"n",
"]",
"<",
"0",
"or",
"start",
"[",
"n",
"]",
"+",
"(",
"abs",
"(",
"count",
"[",
"n",
"]",
")",
"-",
"1",
")",
"*",
"stride",
"[",
"n",
"]",
">=",
"dim_sizes",
"[",
"n",
"]",
":",
"raise",
"HDF4Error",
"(",
"'get arguments violate '",
"'the size (%d) of dimension %d'",
"%",
"(",
"dim_sizes",
"[",
"n",
"]",
",",
"n",
")",
")",
"if",
"not",
"data_type",
"in",
"SDC",
".",
"equivNumericTypes",
":",
"raise",
"HDF4Error",
"(",
"'get cannot currrently deal with '",
"'the SDS data type'",
")",
"return",
"_C",
".",
"_SDreaddata_0",
"(",
"self",
".",
"_id",
",",
"data_type",
",",
"start",
",",
"count",
",",
"stride",
")"
] | Read data from the dataset.
Args::
start : indices where to start reading in the data array;
default to 0 on all dimensions
count : number of values to read along each dimension;
default to the current length of all dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to read the whole dataset contents, one should
simply call the method with no argument.
Returns::
numpy array initialized with the data.
C library equivalent : SDreaddata
The dataset can also be read using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access". | [
"Read",
"data",
"from",
"the",
"dataset",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1853-L1920 |
4,517 | fhs/pyhdf | pyhdf/SD.py | SDS.set | def set(self, data, start=None, count=None, stride=None):
"""Write data to the dataset.
Args::
data : array of data to write; can be given as a numpy
array, or as Python sequence (whose elements can be
imbricated sequences)
start : indices where to start writing in the dataset;
default to 0 on all dimensions
count : number of values to write along each dimension;
default to the current length of dataset dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to write the whole dataset at once, one has simply
to call the method with the dataset values in parameter
'data', omitting all other parameters.
Returns::
None.
C library equivalent : SDwritedata
The dataset can also be written using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access".
"""
# Obtain SDS info.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
if isinstance(dim_sizes, type(1)):
dim_sizes = [dim_sizes]
except HDF4Error:
raise HDF4Error('set : cannot execute')
# Validate args.
if start is None:
start = [0] * rank
elif isinstance(start, type(1)):
start = [start]
if count is None:
count = dim_sizes
if count[0] == 0:
count[0] = 1
elif isinstance(count, type(1)):
count = [count]
if stride is None:
stride = [1] * rank
elif isinstance(stride, type(1)):
stride = [stride]
if len(start) != rank or len(count) != rank or len(stride) != rank:
raise HDF4Error('set : start, stride or count '\
'do not match SDS rank')
unlimited = self.isrecord()
for n in range(rank):
ok = 1
if start[n] < 0:
ok = 0
elif n > 0 or not unlimited:
if start[n] + (abs(count[n]) - 1) * stride[n] >= dim_sizes[n]:
ok = 0
if not ok:
raise HDF4Error('set arguments violate '\
'the size (%d) of dimension %d' \
% (dim_sizes[n], n))
# ??? Check support for UINT16
if not data_type in SDC.equivNumericTypes:
raise HDF4Error('set cannot currrently deal '\
'with the SDS data type')
_C._SDwritedata_0(self._id, data_type, start, count, data, stride) | python | def set(self, data, start=None, count=None, stride=None):
"""Write data to the dataset.
Args::
data : array of data to write; can be given as a numpy
array, or as Python sequence (whose elements can be
imbricated sequences)
start : indices where to start writing in the dataset;
default to 0 on all dimensions
count : number of values to write along each dimension;
default to the current length of dataset dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to write the whole dataset at once, one has simply
to call the method with the dataset values in parameter
'data', omitting all other parameters.
Returns::
None.
C library equivalent : SDwritedata
The dataset can also be written using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access".
"""
# Obtain SDS info.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
if isinstance(dim_sizes, type(1)):
dim_sizes = [dim_sizes]
except HDF4Error:
raise HDF4Error('set : cannot execute')
# Validate args.
if start is None:
start = [0] * rank
elif isinstance(start, type(1)):
start = [start]
if count is None:
count = dim_sizes
if count[0] == 0:
count[0] = 1
elif isinstance(count, type(1)):
count = [count]
if stride is None:
stride = [1] * rank
elif isinstance(stride, type(1)):
stride = [stride]
if len(start) != rank or len(count) != rank or len(stride) != rank:
raise HDF4Error('set : start, stride or count '\
'do not match SDS rank')
unlimited = self.isrecord()
for n in range(rank):
ok = 1
if start[n] < 0:
ok = 0
elif n > 0 or not unlimited:
if start[n] + (abs(count[n]) - 1) * stride[n] >= dim_sizes[n]:
ok = 0
if not ok:
raise HDF4Error('set arguments violate '\
'the size (%d) of dimension %d' \
% (dim_sizes[n], n))
# ??? Check support for UINT16
if not data_type in SDC.equivNumericTypes:
raise HDF4Error('set cannot currrently deal '\
'with the SDS data type')
_C._SDwritedata_0(self._id, data_type, start, count, data, stride) | [
"def",
"set",
"(",
"self",
",",
"data",
",",
"start",
"=",
"None",
",",
"count",
"=",
"None",
",",
"stride",
"=",
"None",
")",
":",
"# Obtain SDS info.",
"try",
":",
"sds_name",
",",
"rank",
",",
"dim_sizes",
",",
"data_type",
",",
"n_attrs",
"=",
"self",
".",
"info",
"(",
")",
"if",
"isinstance",
"(",
"dim_sizes",
",",
"type",
"(",
"1",
")",
")",
":",
"dim_sizes",
"=",
"[",
"dim_sizes",
"]",
"except",
"HDF4Error",
":",
"raise",
"HDF4Error",
"(",
"'set : cannot execute'",
")",
"# Validate args.",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"[",
"0",
"]",
"*",
"rank",
"elif",
"isinstance",
"(",
"start",
",",
"type",
"(",
"1",
")",
")",
":",
"start",
"=",
"[",
"start",
"]",
"if",
"count",
"is",
"None",
":",
"count",
"=",
"dim_sizes",
"if",
"count",
"[",
"0",
"]",
"==",
"0",
":",
"count",
"[",
"0",
"]",
"=",
"1",
"elif",
"isinstance",
"(",
"count",
",",
"type",
"(",
"1",
")",
")",
":",
"count",
"=",
"[",
"count",
"]",
"if",
"stride",
"is",
"None",
":",
"stride",
"=",
"[",
"1",
"]",
"*",
"rank",
"elif",
"isinstance",
"(",
"stride",
",",
"type",
"(",
"1",
")",
")",
":",
"stride",
"=",
"[",
"stride",
"]",
"if",
"len",
"(",
"start",
")",
"!=",
"rank",
"or",
"len",
"(",
"count",
")",
"!=",
"rank",
"or",
"len",
"(",
"stride",
")",
"!=",
"rank",
":",
"raise",
"HDF4Error",
"(",
"'set : start, stride or count '",
"'do not match SDS rank'",
")",
"unlimited",
"=",
"self",
".",
"isrecord",
"(",
")",
"for",
"n",
"in",
"range",
"(",
"rank",
")",
":",
"ok",
"=",
"1",
"if",
"start",
"[",
"n",
"]",
"<",
"0",
":",
"ok",
"=",
"0",
"elif",
"n",
">",
"0",
"or",
"not",
"unlimited",
":",
"if",
"start",
"[",
"n",
"]",
"+",
"(",
"abs",
"(",
"count",
"[",
"n",
"]",
")",
"-",
"1",
")",
"*",
"stride",
"[",
"n",
"]",
">=",
"dim_sizes",
"[",
"n",
"]",
":",
"ok",
"=",
"0",
"if",
"not",
"ok",
":",
"raise",
"HDF4Error",
"(",
"'set arguments violate '",
"'the size (%d) of dimension %d'",
"%",
"(",
"dim_sizes",
"[",
"n",
"]",
",",
"n",
")",
")",
"# ??? Check support for UINT16",
"if",
"not",
"data_type",
"in",
"SDC",
".",
"equivNumericTypes",
":",
"raise",
"HDF4Error",
"(",
"'set cannot currrently deal '",
"'with the SDS data type'",
")",
"_C",
".",
"_SDwritedata_0",
"(",
"self",
".",
"_id",
",",
"data_type",
",",
"start",
",",
"count",
",",
"data",
",",
"stride",
")"
] | Write data to the dataset.
Args::
data : array of data to write; can be given as a numpy
array, or as Python sequence (whose elements can be
imbricated sequences)
start : indices where to start writing in the dataset;
default to 0 on all dimensions
count : number of values to write along each dimension;
default to the current length of dataset dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to write the whole dataset at once, one has simply
to call the method with the dataset values in parameter
'data', omitting all other parameters.
Returns::
None.
C library equivalent : SDwritedata
The dataset can also be written using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access". | [
"Write",
"data",
"to",
"the",
"dataset",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L1922-L2001 |
4,518 | fhs/pyhdf | pyhdf/SD.py | SDS.info | def info(self):
"""Retrieves information about the dataset.
Args::
no argument
Returns::
5-element tuple holding:
- dataset name
- dataset rank (number of dimensions)
- dataset shape, that is a list giving the length of each
dataset dimension; if the first dimension is unlimited, then
the first value of the list gives the current length of the
unlimited dimension
- data type (one of the SDC.xxx values)
- number of attributes defined for the dataset
C library equivalent : SDgetinfo
"""
buf = _C.array_int32(_C.H4_MAX_VAR_DIMS)
status, sds_name, rank, data_type, n_attrs = \
_C.SDgetinfo(self._id, buf)
_checkErr('info', status, "cannot execute")
dim_sizes = _array_to_ret(buf, rank)
return sds_name, rank, dim_sizes, data_type, n_attrs | python | def info(self):
"""Retrieves information about the dataset.
Args::
no argument
Returns::
5-element tuple holding:
- dataset name
- dataset rank (number of dimensions)
- dataset shape, that is a list giving the length of each
dataset dimension; if the first dimension is unlimited, then
the first value of the list gives the current length of the
unlimited dimension
- data type (one of the SDC.xxx values)
- number of attributes defined for the dataset
C library equivalent : SDgetinfo
"""
buf = _C.array_int32(_C.H4_MAX_VAR_DIMS)
status, sds_name, rank, data_type, n_attrs = \
_C.SDgetinfo(self._id, buf)
_checkErr('info', status, "cannot execute")
dim_sizes = _array_to_ret(buf, rank)
return sds_name, rank, dim_sizes, data_type, n_attrs | [
"def",
"info",
"(",
"self",
")",
":",
"buf",
"=",
"_C",
".",
"array_int32",
"(",
"_C",
".",
"H4_MAX_VAR_DIMS",
")",
"status",
",",
"sds_name",
",",
"rank",
",",
"data_type",
",",
"n_attrs",
"=",
"_C",
".",
"SDgetinfo",
"(",
"self",
".",
"_id",
",",
"buf",
")",
"_checkErr",
"(",
"'info'",
",",
"status",
",",
"\"cannot execute\"",
")",
"dim_sizes",
"=",
"_array_to_ret",
"(",
"buf",
",",
"rank",
")",
"return",
"sds_name",
",",
"rank",
",",
"dim_sizes",
",",
"data_type",
",",
"n_attrs"
] | Retrieves information about the dataset.
Args::
no argument
Returns::
5-element tuple holding:
- dataset name
- dataset rank (number of dimensions)
- dataset shape, that is a list giving the length of each
dataset dimension; if the first dimension is unlimited, then
the first value of the list gives the current length of the
unlimited dimension
- data type (one of the SDC.xxx values)
- number of attributes defined for the dataset
C library equivalent : SDgetinfo | [
"Retrieves",
"information",
"about",
"the",
"dataset",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2102-L2130 |
4,519 | fhs/pyhdf | pyhdf/SD.py | SDS.checkempty | def checkempty(self):
"""Determine whether the dataset is empty.
Args::
no argument
Returns::
True(1) if dataset is empty, False(0) if not
C library equivalent : SDcheckempty
"""
status, emptySDS = _C.SDcheckempty(self._id)
_checkErr('checkempty', status, 'invalid SDS identifier')
return emptySDS | python | def checkempty(self):
"""Determine whether the dataset is empty.
Args::
no argument
Returns::
True(1) if dataset is empty, False(0) if not
C library equivalent : SDcheckempty
"""
status, emptySDS = _C.SDcheckempty(self._id)
_checkErr('checkempty', status, 'invalid SDS identifier')
return emptySDS | [
"def",
"checkempty",
"(",
"self",
")",
":",
"status",
",",
"emptySDS",
"=",
"_C",
".",
"SDcheckempty",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'checkempty'",
",",
"status",
",",
"'invalid SDS identifier'",
")",
"return",
"emptySDS"
] | Determine whether the dataset is empty.
Args::
no argument
Returns::
True(1) if dataset is empty, False(0) if not
C library equivalent : SDcheckempty | [
"Determine",
"whether",
"the",
"dataset",
"is",
"empty",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2132-L2148 |
4,520 | fhs/pyhdf | pyhdf/SD.py | SDS.ref | def ref(self):
"""Get the reference number of the dataset.
Args::
no argument
Returns::
dataset reference number
C library equivalent : SDidtoref
"""
sds_ref = _C.SDidtoref(self._id)
_checkErr('idtoref', sds_ref, 'illegal SDS identifier')
return sds_ref | python | def ref(self):
"""Get the reference number of the dataset.
Args::
no argument
Returns::
dataset reference number
C library equivalent : SDidtoref
"""
sds_ref = _C.SDidtoref(self._id)
_checkErr('idtoref', sds_ref, 'illegal SDS identifier')
return sds_ref | [
"def",
"ref",
"(",
"self",
")",
":",
"sds_ref",
"=",
"_C",
".",
"SDidtoref",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'idtoref'",
",",
"sds_ref",
",",
"'illegal SDS identifier'",
")",
"return",
"sds_ref"
] | Get the reference number of the dataset.
Args::
no argument
Returns::
dataset reference number
C library equivalent : SDidtoref | [
"Get",
"the",
"reference",
"number",
"of",
"the",
"dataset",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2150-L2166 |
4,521 | fhs/pyhdf | pyhdf/SD.py | SDS.getcal | def getcal(self):
"""Retrieve the SDS calibration coefficients.
Args::
no argument
Returns::
5-element tuple holding:
- cal: calibration factor (attribute 'scale_factor')
- cal_error : calibration factor error
(attribute 'scale_factor_err')
- offset: calibration offset (attribute 'add_offset')
- offset_err : offset error (attribute 'add_offset_err')
- data_type : type of the data resulting from applying
the calibration formula to the dataset values
(attribute 'calibrated_nt')
An exception is raised if no calibration data are defined.
Original dataset values 'orival' are converted to calibrated
values 'calval' through the formula::
calval = cal * (orival - offset)
The calibration coefficients are part of the so-called
"standard" SDS attributes. The values inside the tuple returned
by 'getcal' are those of the following attributes, in order::
scale_factor, scale_factor_err, add_offset, add_offset_err,
calibrated_nt
C library equivalent: SDgetcal()
"""
status, cal, cal_error, offset, offset_err, data_type = \
_C.SDgetcal(self._id)
_checkErr('getcal', status, 'no calibration record')
return cal, cal_error, offset, offset_err, data_type | python | def getcal(self):
"""Retrieve the SDS calibration coefficients.
Args::
no argument
Returns::
5-element tuple holding:
- cal: calibration factor (attribute 'scale_factor')
- cal_error : calibration factor error
(attribute 'scale_factor_err')
- offset: calibration offset (attribute 'add_offset')
- offset_err : offset error (attribute 'add_offset_err')
- data_type : type of the data resulting from applying
the calibration formula to the dataset values
(attribute 'calibrated_nt')
An exception is raised if no calibration data are defined.
Original dataset values 'orival' are converted to calibrated
values 'calval' through the formula::
calval = cal * (orival - offset)
The calibration coefficients are part of the so-called
"standard" SDS attributes. The values inside the tuple returned
by 'getcal' are those of the following attributes, in order::
scale_factor, scale_factor_err, add_offset, add_offset_err,
calibrated_nt
C library equivalent: SDgetcal()
"""
status, cal, cal_error, offset, offset_err, data_type = \
_C.SDgetcal(self._id)
_checkErr('getcal', status, 'no calibration record')
return cal, cal_error, offset, offset_err, data_type | [
"def",
"getcal",
"(",
"self",
")",
":",
"status",
",",
"cal",
",",
"cal_error",
",",
"offset",
",",
"offset_err",
",",
"data_type",
"=",
"_C",
".",
"SDgetcal",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'getcal'",
",",
"status",
",",
"'no calibration record'",
")",
"return",
"cal",
",",
"cal_error",
",",
"offset",
",",
"offset_err",
",",
"data_type"
] | Retrieve the SDS calibration coefficients.
Args::
no argument
Returns::
5-element tuple holding:
- cal: calibration factor (attribute 'scale_factor')
- cal_error : calibration factor error
(attribute 'scale_factor_err')
- offset: calibration offset (attribute 'add_offset')
- offset_err : offset error (attribute 'add_offset_err')
- data_type : type of the data resulting from applying
the calibration formula to the dataset values
(attribute 'calibrated_nt')
An exception is raised if no calibration data are defined.
Original dataset values 'orival' are converted to calibrated
values 'calval' through the formula::
calval = cal * (orival - offset)
The calibration coefficients are part of the so-called
"standard" SDS attributes. The values inside the tuple returned
by 'getcal' are those of the following attributes, in order::
scale_factor, scale_factor_err, add_offset, add_offset_err,
calibrated_nt
C library equivalent: SDgetcal() | [
"Retrieve",
"the",
"SDS",
"calibration",
"coefficients",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2206-L2246 |
4,522 | fhs/pyhdf | pyhdf/SD.py | SDS.getdatastrs | def getdatastrs(self):
"""Retrieve the dataset standard string attributes.
Args::
no argument
Returns::
4-element tuple holding:
- dataset label string (attribute 'long_name')
- dataset unit (attribute 'units')
- dataset output format (attribute 'format')
- dataset coordinate system (attribute 'coordsys')
The values returned by 'getdatastrs' are part of the
so-called "standard" SDS attributes. Those 4 values
correspond respectively to the following attributes::
long_name, units, format, coordsys .
C library equivalent: SDgetdatastrs
"""
status, label, unit, format, coord_system = \
_C.SDgetdatastrs(self._id, 128)
_checkErr('getdatastrs', status, 'cannot execute')
return label, unit, format, coord_system | python | def getdatastrs(self):
"""Retrieve the dataset standard string attributes.
Args::
no argument
Returns::
4-element tuple holding:
- dataset label string (attribute 'long_name')
- dataset unit (attribute 'units')
- dataset output format (attribute 'format')
- dataset coordinate system (attribute 'coordsys')
The values returned by 'getdatastrs' are part of the
so-called "standard" SDS attributes. Those 4 values
correspond respectively to the following attributes::
long_name, units, format, coordsys .
C library equivalent: SDgetdatastrs
"""
status, label, unit, format, coord_system = \
_C.SDgetdatastrs(self._id, 128)
_checkErr('getdatastrs', status, 'cannot execute')
return label, unit, format, coord_system | [
"def",
"getdatastrs",
"(",
"self",
")",
":",
"status",
",",
"label",
",",
"unit",
",",
"format",
",",
"coord_system",
"=",
"_C",
".",
"SDgetdatastrs",
"(",
"self",
".",
"_id",
",",
"128",
")",
"_checkErr",
"(",
"'getdatastrs'",
",",
"status",
",",
"'cannot execute'",
")",
"return",
"label",
",",
"unit",
",",
"format",
",",
"coord_system"
] | Retrieve the dataset standard string attributes.
Args::
no argument
Returns::
4-element tuple holding:
- dataset label string (attribute 'long_name')
- dataset unit (attribute 'units')
- dataset output format (attribute 'format')
- dataset coordinate system (attribute 'coordsys')
The values returned by 'getdatastrs' are part of the
so-called "standard" SDS attributes. Those 4 values
correspond respectively to the following attributes::
long_name, units, format, coordsys .
C library equivalent: SDgetdatastrs | [
"Retrieve",
"the",
"dataset",
"standard",
"string",
"attributes",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2248-L2276 |
4,523 | fhs/pyhdf | pyhdf/SD.py | SDS.getrange | def getrange(self):
"""Retrieve the dataset min and max values.
Args::
no argument
Returns::
(min, max) tuple (attribute 'valid_range')
Note that those are the values as stored
by the 'setrange' method. 'getrange' does *NOT* compute the
min and max from the current dataset contents.
An exception is raised if the range is not set.
The range returned by 'getrange' is part of the so-called
"standard" SDS attributes. It corresponds to the following
attribute::
valid_range
C library equivalent: SDgetrange
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = \
self.info()
except HDF4Error:
raise HDF4Error('getrange : invalid SDS identifier')
n_values = 1
convert = _array_to_ret
if data_type == SDC.CHAR8:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
convert = _array_to_str
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf1 = _C.array_int8(n_values)
buf2 = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf1 = _C.array_int16(n_values)
buf2 = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf1 = _C.array_uint16(n_values)
buf2 = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf1 = _C.array_int32(n_values)
buf2 = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf1 = _C.array_uint32(n_values)
buf2 = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf1 = _C.array_float32(n_values)
buf2 = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf1 = _C.array_float64(n_values)
buf2 = _C.array_float64(n_values)
else:
raise HDF4Error("getrange: SDS has an illegal or " \
"unsupported type %d" % data)
# Note: The C routine returns the max in buf1 and the min
# in buf2. We swap the values returned by the Python
# interface, since it is more natural to return
# min first, then max.
status = _C.SDgetrange(self._id, buf1, buf2)
_checkErr('getrange', status, 'range not set')
return convert(buf2, n_values), convert(buf1, n_values) | python | def getrange(self):
"""Retrieve the dataset min and max values.
Args::
no argument
Returns::
(min, max) tuple (attribute 'valid_range')
Note that those are the values as stored
by the 'setrange' method. 'getrange' does *NOT* compute the
min and max from the current dataset contents.
An exception is raised if the range is not set.
The range returned by 'getrange' is part of the so-called
"standard" SDS attributes. It corresponds to the following
attribute::
valid_range
C library equivalent: SDgetrange
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = \
self.info()
except HDF4Error:
raise HDF4Error('getrange : invalid SDS identifier')
n_values = 1
convert = _array_to_ret
if data_type == SDC.CHAR8:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
convert = _array_to_str
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf1 = _C.array_int8(n_values)
buf2 = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf1 = _C.array_int16(n_values)
buf2 = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf1 = _C.array_uint16(n_values)
buf2 = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf1 = _C.array_int32(n_values)
buf2 = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf1 = _C.array_uint32(n_values)
buf2 = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf1 = _C.array_float32(n_values)
buf2 = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf1 = _C.array_float64(n_values)
buf2 = _C.array_float64(n_values)
else:
raise HDF4Error("getrange: SDS has an illegal or " \
"unsupported type %d" % data)
# Note: The C routine returns the max in buf1 and the min
# in buf2. We swap the values returned by the Python
# interface, since it is more natural to return
# min first, then max.
status = _C.SDgetrange(self._id, buf1, buf2)
_checkErr('getrange', status, 'range not set')
return convert(buf2, n_values), convert(buf1, n_values) | [
"def",
"getrange",
"(",
"self",
")",
":",
"# Obtain SDS data type.",
"try",
":",
"sds_name",
",",
"rank",
",",
"dim_sizes",
",",
"data_type",
",",
"n_attrs",
"=",
"self",
".",
"info",
"(",
")",
"except",
"HDF4Error",
":",
"raise",
"HDF4Error",
"(",
"'getrange : invalid SDS identifier'",
")",
"n_values",
"=",
"1",
"convert",
"=",
"_array_to_ret",
"if",
"data_type",
"==",
"SDC",
".",
"CHAR8",
":",
"buf1",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"convert",
"=",
"_array_to_str",
"elif",
"data_type",
"in",
"[",
"SDC",
".",
"UCHAR8",
",",
"SDC",
".",
"UINT8",
"]",
":",
"buf1",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT8",
":",
"buf1",
"=",
"_C",
".",
"array_int8",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_int8",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT16",
":",
"buf1",
"=",
"_C",
".",
"array_int16",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_int16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT16",
":",
"buf1",
"=",
"_C",
".",
"array_uint16",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_uint16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT32",
":",
"buf1",
"=",
"_C",
".",
"array_int32",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_int32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT32",
":",
"buf1",
"=",
"_C",
".",
"array_uint32",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_uint32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT32",
":",
"buf1",
"=",
"_C",
".",
"array_float32",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_float32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT64",
":",
"buf1",
"=",
"_C",
".",
"array_float64",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_float64",
"(",
"n_values",
")",
"else",
":",
"raise",
"HDF4Error",
"(",
"\"getrange: SDS has an illegal or \"",
"\"unsupported type %d\"",
"%",
"data",
")",
"# Note: The C routine returns the max in buf1 and the min",
"# in buf2. We swap the values returned by the Python",
"# interface, since it is more natural to return",
"# min first, then max.",
"status",
"=",
"_C",
".",
"SDgetrange",
"(",
"self",
".",
"_id",
",",
"buf1",
",",
"buf2",
")",
"_checkErr",
"(",
"'getrange'",
",",
"status",
",",
"'range not set'",
")",
"return",
"convert",
"(",
"buf2",
",",
"n_values",
")",
",",
"convert",
"(",
"buf1",
",",
"n_values",
")"
] | Retrieve the dataset min and max values.
Args::
no argument
Returns::
(min, max) tuple (attribute 'valid_range')
Note that those are the values as stored
by the 'setrange' method. 'getrange' does *NOT* compute the
min and max from the current dataset contents.
An exception is raised if the range is not set.
The range returned by 'getrange' is part of the so-called
"standard" SDS attributes. It corresponds to the following
attribute::
valid_range
C library equivalent: SDgetrange | [
"Retrieve",
"the",
"dataset",
"min",
"and",
"max",
"values",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2344-L2426 |
4,524 | fhs/pyhdf | pyhdf/SD.py | SDS.setcal | def setcal(self, cal, cal_error, offset, offset_err, data_type):
"""Set the dataset calibration coefficients.
Args::
cal the calibraton factor (attribute 'scale_factor')
cal_error calibration factor error
(attribute 'scale_factor_err')
offset offset value (attribute 'add_offset')
offset_err offset error (attribute 'add_offset_err')
data_type data type of the values resulting from applying the
calibration formula to the dataset values
(one of the SDC.xxx constants)
(attribute 'calibrated_nt')
Returns::
None
See method 'getcal' for the definition of the calibration
formula.
Calibration coefficients are part of the so-called standard
SDS attributes. Calling 'setcal' is equivalent to setting
the following attributes, which correspond to the method
parameters, in order::
scale_factor, scale_factor_err, add_offset, add_offset_err,
calibrated_nt
C library equivalent: SDsetcal
"""
status = _C.SDsetcal(self._id, cal, cal_error,
offset, offset_err, data_type)
_checkErr('setcal', status, 'cannot execute') | python | def setcal(self, cal, cal_error, offset, offset_err, data_type):
"""Set the dataset calibration coefficients.
Args::
cal the calibraton factor (attribute 'scale_factor')
cal_error calibration factor error
(attribute 'scale_factor_err')
offset offset value (attribute 'add_offset')
offset_err offset error (attribute 'add_offset_err')
data_type data type of the values resulting from applying the
calibration formula to the dataset values
(one of the SDC.xxx constants)
(attribute 'calibrated_nt')
Returns::
None
See method 'getcal' for the definition of the calibration
formula.
Calibration coefficients are part of the so-called standard
SDS attributes. Calling 'setcal' is equivalent to setting
the following attributes, which correspond to the method
parameters, in order::
scale_factor, scale_factor_err, add_offset, add_offset_err,
calibrated_nt
C library equivalent: SDsetcal
"""
status = _C.SDsetcal(self._id, cal, cal_error,
offset, offset_err, data_type)
_checkErr('setcal', status, 'cannot execute') | [
"def",
"setcal",
"(",
"self",
",",
"cal",
",",
"cal_error",
",",
"offset",
",",
"offset_err",
",",
"data_type",
")",
":",
"status",
"=",
"_C",
".",
"SDsetcal",
"(",
"self",
".",
"_id",
",",
"cal",
",",
"cal_error",
",",
"offset",
",",
"offset_err",
",",
"data_type",
")",
"_checkErr",
"(",
"'setcal'",
",",
"status",
",",
"'cannot execute'",
")"
] | Set the dataset calibration coefficients.
Args::
cal the calibraton factor (attribute 'scale_factor')
cal_error calibration factor error
(attribute 'scale_factor_err')
offset offset value (attribute 'add_offset')
offset_err offset error (attribute 'add_offset_err')
data_type data type of the values resulting from applying the
calibration formula to the dataset values
(one of the SDC.xxx constants)
(attribute 'calibrated_nt')
Returns::
None
See method 'getcal' for the definition of the calibration
formula.
Calibration coefficients are part of the so-called standard
SDS attributes. Calling 'setcal' is equivalent to setting
the following attributes, which correspond to the method
parameters, in order::
scale_factor, scale_factor_err, add_offset, add_offset_err,
calibrated_nt
C library equivalent: SDsetcal | [
"Set",
"the",
"dataset",
"calibration",
"coefficients",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2428-L2463 |
4,525 | fhs/pyhdf | pyhdf/SD.py | SDS.setdatastrs | def setdatastrs(self, label, unit, format, coord_sys):
"""Set the dataset standard string type attributes.
Args::
label dataset label (attribute 'long_name')
unit dataset unit (attribute 'units')
format dataset format (attribute 'format')
coord_sys dataset coordinate system (attribute 'coordsys')
Returns::
None
Those strings are part of the so-called standard
SDS attributes. Calling 'setdatastrs' is equivalent to setting
the following attributes, which correspond to the method
parameters, in order::
long_name, units, format, coordsys
C library equivalent: SDsetdatastrs
"""
status = _C.SDsetdatastrs(self._id, label, unit, format, coord_sys)
_checkErr('setdatastrs', status, 'cannot execute') | python | def setdatastrs(self, label, unit, format, coord_sys):
"""Set the dataset standard string type attributes.
Args::
label dataset label (attribute 'long_name')
unit dataset unit (attribute 'units')
format dataset format (attribute 'format')
coord_sys dataset coordinate system (attribute 'coordsys')
Returns::
None
Those strings are part of the so-called standard
SDS attributes. Calling 'setdatastrs' is equivalent to setting
the following attributes, which correspond to the method
parameters, in order::
long_name, units, format, coordsys
C library equivalent: SDsetdatastrs
"""
status = _C.SDsetdatastrs(self._id, label, unit, format, coord_sys)
_checkErr('setdatastrs', status, 'cannot execute') | [
"def",
"setdatastrs",
"(",
"self",
",",
"label",
",",
"unit",
",",
"format",
",",
"coord_sys",
")",
":",
"status",
"=",
"_C",
".",
"SDsetdatastrs",
"(",
"self",
".",
"_id",
",",
"label",
",",
"unit",
",",
"format",
",",
"coord_sys",
")",
"_checkErr",
"(",
"'setdatastrs'",
",",
"status",
",",
"'cannot execute'",
")"
] | Set the dataset standard string type attributes.
Args::
label dataset label (attribute 'long_name')
unit dataset unit (attribute 'units')
format dataset format (attribute 'format')
coord_sys dataset coordinate system (attribute 'coordsys')
Returns::
None
Those strings are part of the so-called standard
SDS attributes. Calling 'setdatastrs' is equivalent to setting
the following attributes, which correspond to the method
parameters, in order::
long_name, units, format, coordsys
C library equivalent: SDsetdatastrs | [
"Set",
"the",
"dataset",
"standard",
"string",
"type",
"attributes",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2465-L2490 |
4,526 | fhs/pyhdf | pyhdf/SD.py | SDS.setfillvalue | def setfillvalue(self, fill_val):
"""Set the dataset fill value.
Args::
fill_val dataset fill value (attribute '_FillValue')
Returns::
None
The fill value is part of the so-called "standard" SDS
attributes. Calling 'setfillvalue' is equivalent to setting
the following attribute::
_FillValue
C library equivalent: SDsetfillvalue
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
except HDF4Error:
raise HDF4Error('setfillvalue : cannot execute')
n_values = 1 # Fill value stands for 1 value.
if data_type == SDC.CHAR8:
buf = _C.array_byte(n_values)
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("setfillvalue: SDS has an illegal or " \
"unsupported type %d" % data_type)
buf[0] = fill_val
status = _C.SDsetfillvalue(self._id, buf)
_checkErr('setfillvalue', status, 'cannot execute') | python | def setfillvalue(self, fill_val):
"""Set the dataset fill value.
Args::
fill_val dataset fill value (attribute '_FillValue')
Returns::
None
The fill value is part of the so-called "standard" SDS
attributes. Calling 'setfillvalue' is equivalent to setting
the following attribute::
_FillValue
C library equivalent: SDsetfillvalue
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
except HDF4Error:
raise HDF4Error('setfillvalue : cannot execute')
n_values = 1 # Fill value stands for 1 value.
if data_type == SDC.CHAR8:
buf = _C.array_byte(n_values)
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("setfillvalue: SDS has an illegal or " \
"unsupported type %d" % data_type)
buf[0] = fill_val
status = _C.SDsetfillvalue(self._id, buf)
_checkErr('setfillvalue', status, 'cannot execute') | [
"def",
"setfillvalue",
"(",
"self",
",",
"fill_val",
")",
":",
"# Obtain SDS data type.",
"try",
":",
"sds_name",
",",
"rank",
",",
"dim_sizes",
",",
"data_type",
",",
"n_attrs",
"=",
"self",
".",
"info",
"(",
")",
"except",
"HDF4Error",
":",
"raise",
"HDF4Error",
"(",
"'setfillvalue : cannot execute'",
")",
"n_values",
"=",
"1",
"# Fill value stands for 1 value.",
"if",
"data_type",
"==",
"SDC",
".",
"CHAR8",
":",
"buf",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"elif",
"data_type",
"in",
"[",
"SDC",
".",
"UCHAR8",
",",
"SDC",
".",
"UINT8",
"]",
":",
"buf",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT8",
":",
"buf",
"=",
"_C",
".",
"array_int8",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT16",
":",
"buf",
"=",
"_C",
".",
"array_int16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT16",
":",
"buf",
"=",
"_C",
".",
"array_uint16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT32",
":",
"buf",
"=",
"_C",
".",
"array_int32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT32",
":",
"buf",
"=",
"_C",
".",
"array_uint32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT32",
":",
"buf",
"=",
"_C",
".",
"array_float32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT64",
":",
"buf",
"=",
"_C",
".",
"array_float64",
"(",
"n_values",
")",
"else",
":",
"raise",
"HDF4Error",
"(",
"\"setfillvalue: SDS has an illegal or \"",
"\"unsupported type %d\"",
"%",
"data_type",
")",
"buf",
"[",
"0",
"]",
"=",
"fill_val",
"status",
"=",
"_C",
".",
"SDsetfillvalue",
"(",
"self",
".",
"_id",
",",
"buf",
")",
"_checkErr",
"(",
"'setfillvalue'",
",",
"status",
",",
"'cannot execute'",
")"
] | Set the dataset fill value.
Args::
fill_val dataset fill value (attribute '_FillValue')
Returns::
None
The fill value is part of the so-called "standard" SDS
attributes. Calling 'setfillvalue' is equivalent to setting
the following attribute::
_FillValue
C library equivalent: SDsetfillvalue | [
"Set",
"the",
"dataset",
"fill",
"value",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2492-L2552 |
4,527 | fhs/pyhdf | pyhdf/SD.py | SDS.setrange | def setrange(self, min, max):
"""Set the dataset min and max values.
Args::
min dataset minimum value (attribute 'valid_range')
max dataset maximum value (attribute 'valid_range')
Returns::
None
The data range is part of the so-called "standard" SDS
attributes. Calling method 'setrange' is equivalent to
setting the following attribute with a 2-element [min,max]
array::
valid_range
C library equivalent: SDsetrange
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
except HDF4Error:
raise HDF4Error('setrange : cannot execute')
n_values = 1
if data_type == SDC.CHAR8:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf1 = _C.array_int8(n_values)
buf2 = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf1 = _C.array_int16(n_values)
buf2 = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf1 = _C.array_uint16(n_values)
buf2 = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf1 = _C.array_int32(n_values)
buf2 = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf1 = _C.array_uint32(n_values)
buf2 = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf1 = _C.array_float32(n_values)
buf2 = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf1 = _C.array_float64(n_values)
buf2 = _C.array_float64(n_values)
else:
raise HDF4Error("SDsetrange: SDS has an illegal or " \
"unsupported type %d" % data_type)
buf1[0] = max
buf2[0] = min
status = _C.SDsetrange(self._id, buf1, buf2)
_checkErr('setrange', status, 'cannot execute') | python | def setrange(self, min, max):
"""Set the dataset min and max values.
Args::
min dataset minimum value (attribute 'valid_range')
max dataset maximum value (attribute 'valid_range')
Returns::
None
The data range is part of the so-called "standard" SDS
attributes. Calling method 'setrange' is equivalent to
setting the following attribute with a 2-element [min,max]
array::
valid_range
C library equivalent: SDsetrange
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
except HDF4Error:
raise HDF4Error('setrange : cannot execute')
n_values = 1
if data_type == SDC.CHAR8:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf1 = _C.array_int8(n_values)
buf2 = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf1 = _C.array_int16(n_values)
buf2 = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf1 = _C.array_uint16(n_values)
buf2 = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf1 = _C.array_int32(n_values)
buf2 = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf1 = _C.array_uint32(n_values)
buf2 = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf1 = _C.array_float32(n_values)
buf2 = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf1 = _C.array_float64(n_values)
buf2 = _C.array_float64(n_values)
else:
raise HDF4Error("SDsetrange: SDS has an illegal or " \
"unsupported type %d" % data_type)
buf1[0] = max
buf2[0] = min
status = _C.SDsetrange(self._id, buf1, buf2)
_checkErr('setrange', status, 'cannot execute') | [
"def",
"setrange",
"(",
"self",
",",
"min",
",",
"max",
")",
":",
"# Obtain SDS data type.",
"try",
":",
"sds_name",
",",
"rank",
",",
"dim_sizes",
",",
"data_type",
",",
"n_attrs",
"=",
"self",
".",
"info",
"(",
")",
"except",
"HDF4Error",
":",
"raise",
"HDF4Error",
"(",
"'setrange : cannot execute'",
")",
"n_values",
"=",
"1",
"if",
"data_type",
"==",
"SDC",
".",
"CHAR8",
":",
"buf1",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"elif",
"data_type",
"in",
"[",
"SDC",
".",
"UCHAR8",
",",
"SDC",
".",
"UINT8",
"]",
":",
"buf1",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT8",
":",
"buf1",
"=",
"_C",
".",
"array_int8",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_int8",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT16",
":",
"buf1",
"=",
"_C",
".",
"array_int16",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_int16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT16",
":",
"buf1",
"=",
"_C",
".",
"array_uint16",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_uint16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT32",
":",
"buf1",
"=",
"_C",
".",
"array_int32",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_int32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT32",
":",
"buf1",
"=",
"_C",
".",
"array_uint32",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_uint32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT32",
":",
"buf1",
"=",
"_C",
".",
"array_float32",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_float32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT64",
":",
"buf1",
"=",
"_C",
".",
"array_float64",
"(",
"n_values",
")",
"buf2",
"=",
"_C",
".",
"array_float64",
"(",
"n_values",
")",
"else",
":",
"raise",
"HDF4Error",
"(",
"\"SDsetrange: SDS has an illegal or \"",
"\"unsupported type %d\"",
"%",
"data_type",
")",
"buf1",
"[",
"0",
"]",
"=",
"max",
"buf2",
"[",
"0",
"]",
"=",
"min",
"status",
"=",
"_C",
".",
"SDsetrange",
"(",
"self",
".",
"_id",
",",
"buf1",
",",
"buf2",
")",
"_checkErr",
"(",
"'setrange'",
",",
"status",
",",
"'cannot execute'",
")"
] | Set the dataset min and max values.
Args::
min dataset minimum value (attribute 'valid_range')
max dataset maximum value (attribute 'valid_range')
Returns::
None
The data range is part of the so-called "standard" SDS
attributes. Calling method 'setrange' is equivalent to
setting the following attribute with a 2-element [min,max]
array::
valid_range
C library equivalent: SDsetrange | [
"Set",
"the",
"dataset",
"min",
"and",
"max",
"values",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2555-L2629 |
4,528 | fhs/pyhdf | pyhdf/SD.py | SDS.getcompress | def getcompress(self):
"""Retrieves info about dataset compression type and mode.
Args::
no argument
Returns::
tuple holding:
- compression type (one of the SDC.COMP_xxx constants)
- optional values, depending on the compression type
COMP_NONE 0 value no additional value
COMP_SKPHUFF 1 value : skip size
COMP_DEFLATE 1 value : gzip compression level (1 to 9)
COMP_SZIP 5 values : options mask,
pixels per block (2 to 32)
pixels per scanline,
bits per pixel (number of bits in the SDS datatype)
pixels (number of elements in the SDS)
Note: in the context of an SDS, the word "pixel"
should really be understood as meaning "data element",
eg a cell value inside a multidimensional grid.
Test the options mask against constants SDC.COMP_SZIP_NN
and SDC.COMP_SZIP_EC, eg :
if optionMask & SDC.COMP_SZIP_EC:
print "EC encoding scheme used"
An exception is raised if dataset is not compressed.
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
C library equivalent: SDgetcompress
"""
status, comp_type, value, v2, v3, v4, v5 = _C._SDgetcompress(self._id)
_checkErr('getcompress', status, 'no compression')
if comp_type == SDC.COMP_NONE:
return (comp_type,)
elif comp_type == SDC.COMP_SZIP:
return comp_type, value, v2, v3, v4, v5
else:
return comp_type, value | python | def getcompress(self):
"""Retrieves info about dataset compression type and mode.
Args::
no argument
Returns::
tuple holding:
- compression type (one of the SDC.COMP_xxx constants)
- optional values, depending on the compression type
COMP_NONE 0 value no additional value
COMP_SKPHUFF 1 value : skip size
COMP_DEFLATE 1 value : gzip compression level (1 to 9)
COMP_SZIP 5 values : options mask,
pixels per block (2 to 32)
pixels per scanline,
bits per pixel (number of bits in the SDS datatype)
pixels (number of elements in the SDS)
Note: in the context of an SDS, the word "pixel"
should really be understood as meaning "data element",
eg a cell value inside a multidimensional grid.
Test the options mask against constants SDC.COMP_SZIP_NN
and SDC.COMP_SZIP_EC, eg :
if optionMask & SDC.COMP_SZIP_EC:
print "EC encoding scheme used"
An exception is raised if dataset is not compressed.
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
C library equivalent: SDgetcompress
"""
status, comp_type, value, v2, v3, v4, v5 = _C._SDgetcompress(self._id)
_checkErr('getcompress', status, 'no compression')
if comp_type == SDC.COMP_NONE:
return (comp_type,)
elif comp_type == SDC.COMP_SZIP:
return comp_type, value, v2, v3, v4, v5
else:
return comp_type, value | [
"def",
"getcompress",
"(",
"self",
")",
":",
"status",
",",
"comp_type",
",",
"value",
",",
"v2",
",",
"v3",
",",
"v4",
",",
"v5",
"=",
"_C",
".",
"_SDgetcompress",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'getcompress'",
",",
"status",
",",
"'no compression'",
")",
"if",
"comp_type",
"==",
"SDC",
".",
"COMP_NONE",
":",
"return",
"(",
"comp_type",
",",
")",
"elif",
"comp_type",
"==",
"SDC",
".",
"COMP_SZIP",
":",
"return",
"comp_type",
",",
"value",
",",
"v2",
",",
"v3",
",",
"v4",
",",
"v5",
"else",
":",
"return",
"comp_type",
",",
"value"
] | Retrieves info about dataset compression type and mode.
Args::
no argument
Returns::
tuple holding:
- compression type (one of the SDC.COMP_xxx constants)
- optional values, depending on the compression type
COMP_NONE 0 value no additional value
COMP_SKPHUFF 1 value : skip size
COMP_DEFLATE 1 value : gzip compression level (1 to 9)
COMP_SZIP 5 values : options mask,
pixels per block (2 to 32)
pixels per scanline,
bits per pixel (number of bits in the SDS datatype)
pixels (number of elements in the SDS)
Note: in the context of an SDS, the word "pixel"
should really be understood as meaning "data element",
eg a cell value inside a multidimensional grid.
Test the options mask against constants SDC.COMP_SZIP_NN
and SDC.COMP_SZIP_EC, eg :
if optionMask & SDC.COMP_SZIP_EC:
print "EC encoding scheme used"
An exception is raised if dataset is not compressed.
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
C library equivalent: SDgetcompress | [
"Retrieves",
"info",
"about",
"dataset",
"compression",
"type",
"and",
"mode",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2631-L2677 |
4,529 | fhs/pyhdf | pyhdf/SD.py | SDS.setcompress | def setcompress(self, comp_type, value=0, v2=0):
"""Compresses the dataset using a specified compression method.
Args::
comp_type compression type, identified by one of the
SDC.COMP_xxx constants
value,v2 auxiliary value(s) needed by some compression types
SDC.COMP_SKPHUFF Skipping-Huffman; compression value=data size in bytes, v2 is ignored
SDC.COMP_DEFLATE Gzip compression; value=deflate level (1 to 9), v2 is ignored
SDC.COMP_SZIP Szip compression; value=encoding scheme (SDC.COMP_SZIP_EC or
SDC.COMP_SZIP_NN), v2=pixels per block (2 to 32)
Returns::
None
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
SDC.COMP_DEFLATE applies the GZIP compression to the dataset,
and the value varies from 1 to 9, according to the level of
compression desired.
SDC.COMP_SZIP compresses the dataset using the SZIP algorithm. See the HDF User's Guide
for details about the encoding scheme and the number of pixels per block. SZIP is new
with HDF 4.2.
'setcompress' must be called before writing to the dataset.
The dataset must be written all at once, unless it is
appendable (has an unlimited dimension). Updating the dataset
in not allowed. Refer to the HDF user's guide for more details
on how to use data compression.
C library equivalent: SDsetcompress
"""
status = _C._SDsetcompress(self._id, comp_type, value, v2)
_checkErr('setcompress', status, 'cannot execute') | python | def setcompress(self, comp_type, value=0, v2=0):
"""Compresses the dataset using a specified compression method.
Args::
comp_type compression type, identified by one of the
SDC.COMP_xxx constants
value,v2 auxiliary value(s) needed by some compression types
SDC.COMP_SKPHUFF Skipping-Huffman; compression value=data size in bytes, v2 is ignored
SDC.COMP_DEFLATE Gzip compression; value=deflate level (1 to 9), v2 is ignored
SDC.COMP_SZIP Szip compression; value=encoding scheme (SDC.COMP_SZIP_EC or
SDC.COMP_SZIP_NN), v2=pixels per block (2 to 32)
Returns::
None
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
SDC.COMP_DEFLATE applies the GZIP compression to the dataset,
and the value varies from 1 to 9, according to the level of
compression desired.
SDC.COMP_SZIP compresses the dataset using the SZIP algorithm. See the HDF User's Guide
for details about the encoding scheme and the number of pixels per block. SZIP is new
with HDF 4.2.
'setcompress' must be called before writing to the dataset.
The dataset must be written all at once, unless it is
appendable (has an unlimited dimension). Updating the dataset
in not allowed. Refer to the HDF user's guide for more details
on how to use data compression.
C library equivalent: SDsetcompress
"""
status = _C._SDsetcompress(self._id, comp_type, value, v2)
_checkErr('setcompress', status, 'cannot execute') | [
"def",
"setcompress",
"(",
"self",
",",
"comp_type",
",",
"value",
"=",
"0",
",",
"v2",
"=",
"0",
")",
":",
"status",
"=",
"_C",
".",
"_SDsetcompress",
"(",
"self",
".",
"_id",
",",
"comp_type",
",",
"value",
",",
"v2",
")",
"_checkErr",
"(",
"'setcompress'",
",",
"status",
",",
"'cannot execute'",
")"
] | Compresses the dataset using a specified compression method.
Args::
comp_type compression type, identified by one of the
SDC.COMP_xxx constants
value,v2 auxiliary value(s) needed by some compression types
SDC.COMP_SKPHUFF Skipping-Huffman; compression value=data size in bytes, v2 is ignored
SDC.COMP_DEFLATE Gzip compression; value=deflate level (1 to 9), v2 is ignored
SDC.COMP_SZIP Szip compression; value=encoding scheme (SDC.COMP_SZIP_EC or
SDC.COMP_SZIP_NN), v2=pixels per block (2 to 32)
Returns::
None
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
SDC.COMP_DEFLATE applies the GZIP compression to the dataset,
and the value varies from 1 to 9, according to the level of
compression desired.
SDC.COMP_SZIP compresses the dataset using the SZIP algorithm. See the HDF User's Guide
for details about the encoding scheme and the number of pixels per block. SZIP is new
with HDF 4.2.
'setcompress' must be called before writing to the dataset.
The dataset must be written all at once, unless it is
appendable (has an unlimited dimension). Updating the dataset
in not allowed. Refer to the HDF user's guide for more details
on how to use data compression.
C library equivalent: SDsetcompress | [
"Compresses",
"the",
"dataset",
"using",
"a",
"specified",
"compression",
"method",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2679-L2718 |
4,530 | fhs/pyhdf | pyhdf/SD.py | SDS.setexternalfile | def setexternalfile(self, filename, offset=0):
"""Store the dataset data in an external file.
Args::
filename external file name
offset offset in bytes where to start writing in
the external file
Returns::
None
C library equivalent : SDsetexternalfile
"""
status = _C.SDsetexternalfile(self._id, filename, offset)
_checkErr('setexternalfile', status, 'execution error') | python | def setexternalfile(self, filename, offset=0):
"""Store the dataset data in an external file.
Args::
filename external file name
offset offset in bytes where to start writing in
the external file
Returns::
None
C library equivalent : SDsetexternalfile
"""
status = _C.SDsetexternalfile(self._id, filename, offset)
_checkErr('setexternalfile', status, 'execution error') | [
"def",
"setexternalfile",
"(",
"self",
",",
"filename",
",",
"offset",
"=",
"0",
")",
":",
"status",
"=",
"_C",
".",
"SDsetexternalfile",
"(",
"self",
".",
"_id",
",",
"filename",
",",
"offset",
")",
"_checkErr",
"(",
"'setexternalfile'",
",",
"status",
",",
"'execution error'",
")"
] | Store the dataset data in an external file.
Args::
filename external file name
offset offset in bytes where to start writing in
the external file
Returns::
None
C library equivalent : SDsetexternalfile | [
"Store",
"the",
"dataset",
"data",
"in",
"an",
"external",
"file",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2721-L2738 |
4,531 | fhs/pyhdf | pyhdf/SD.py | SDS.dimensions | def dimensions(self, full=0):
"""Return a dictionnary describing every dataset dimension.
Args::
full true to get complete info about each dimension
false to report only each dimension length
Returns::
Dictionnary where each key is a dimension name. If no name
has been given to the dimension, the key is set to
'fakeDimx' where 'x' is the dimension index number.
If parameter 'full' is false, key value is the dimension
length. If 'full' is true, key value is a 5-element tuple
with the following elements:
- dimension length; for an unlimited dimension, the reported
length is the current dimension length
- dimension index number
- 1 if the dimension is unlimited, 0 otherwise
- dimension scale type, or 0 if no scale is defined for
the dimension
- number of attributes defined on the dimension
C library equivalent : no equivalent
"""
# Get the number of dimensions and their lengths.
nDims, dimLen = self.info()[1:3]
if isinstance(dimLen, int): # need a sequence
dimLen = [dimLen]
# Check if the dataset is appendable.
unlim = self.isrecord()
# Inquire each dimension
res = {}
for n in range(nDims):
d = self.dim(n)
# The length reported by info() is 0 for an unlimited dimension.
# Rather use the lengths reported by SDS.info()
name, k, scaleType, nAtt = d.info()
length = dimLen[n]
if full:
res[name] = (length, n, unlim and n == 0,
scaleType, nAtt)
else:
res[name] = length
return res | python | def dimensions(self, full=0):
"""Return a dictionnary describing every dataset dimension.
Args::
full true to get complete info about each dimension
false to report only each dimension length
Returns::
Dictionnary where each key is a dimension name. If no name
has been given to the dimension, the key is set to
'fakeDimx' where 'x' is the dimension index number.
If parameter 'full' is false, key value is the dimension
length. If 'full' is true, key value is a 5-element tuple
with the following elements:
- dimension length; for an unlimited dimension, the reported
length is the current dimension length
- dimension index number
- 1 if the dimension is unlimited, 0 otherwise
- dimension scale type, or 0 if no scale is defined for
the dimension
- number of attributes defined on the dimension
C library equivalent : no equivalent
"""
# Get the number of dimensions and their lengths.
nDims, dimLen = self.info()[1:3]
if isinstance(dimLen, int): # need a sequence
dimLen = [dimLen]
# Check if the dataset is appendable.
unlim = self.isrecord()
# Inquire each dimension
res = {}
for n in range(nDims):
d = self.dim(n)
# The length reported by info() is 0 for an unlimited dimension.
# Rather use the lengths reported by SDS.info()
name, k, scaleType, nAtt = d.info()
length = dimLen[n]
if full:
res[name] = (length, n, unlim and n == 0,
scaleType, nAtt)
else:
res[name] = length
return res | [
"def",
"dimensions",
"(",
"self",
",",
"full",
"=",
"0",
")",
":",
"# Get the number of dimensions and their lengths.",
"nDims",
",",
"dimLen",
"=",
"self",
".",
"info",
"(",
")",
"[",
"1",
":",
"3",
"]",
"if",
"isinstance",
"(",
"dimLen",
",",
"int",
")",
":",
"# need a sequence",
"dimLen",
"=",
"[",
"dimLen",
"]",
"# Check if the dataset is appendable.",
"unlim",
"=",
"self",
".",
"isrecord",
"(",
")",
"# Inquire each dimension",
"res",
"=",
"{",
"}",
"for",
"n",
"in",
"range",
"(",
"nDims",
")",
":",
"d",
"=",
"self",
".",
"dim",
"(",
"n",
")",
"# The length reported by info() is 0 for an unlimited dimension.",
"# Rather use the lengths reported by SDS.info()",
"name",
",",
"k",
",",
"scaleType",
",",
"nAtt",
"=",
"d",
".",
"info",
"(",
")",
"length",
"=",
"dimLen",
"[",
"n",
"]",
"if",
"full",
":",
"res",
"[",
"name",
"]",
"=",
"(",
"length",
",",
"n",
",",
"unlim",
"and",
"n",
"==",
"0",
",",
"scaleType",
",",
"nAtt",
")",
"else",
":",
"res",
"[",
"name",
"]",
"=",
"length",
"return",
"res"
] | Return a dictionnary describing every dataset dimension.
Args::
full true to get complete info about each dimension
false to report only each dimension length
Returns::
Dictionnary where each key is a dimension name. If no name
has been given to the dimension, the key is set to
'fakeDimx' where 'x' is the dimension index number.
If parameter 'full' is false, key value is the dimension
length. If 'full' is true, key value is a 5-element tuple
with the following elements:
- dimension length; for an unlimited dimension, the reported
length is the current dimension length
- dimension index number
- 1 if the dimension is unlimited, 0 otherwise
- dimension scale type, or 0 if no scale is defined for
the dimension
- number of attributes defined on the dimension
C library equivalent : no equivalent | [
"Return",
"a",
"dictionnary",
"describing",
"every",
"dataset",
"dimension",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2800-L2849 |
4,532 | fhs/pyhdf | pyhdf/SD.py | SDim.info | def info(self):
"""Return info about the dimension instance.
Args::
no argument
Returns::
4-element tuple holding:
- dimension name; 'fakeDimx' is returned if the dimension
has not been named yet, where 'x' is the dimension
index number
- dimension length; 0 is returned if the dimension is unlimited;
call the SDim.length() or SDS.info() methods to obtain the
current dimension length
- scale data type (one of the SDC.xxx constants); 0 is
returned if no scale has been set on the dimension
- number of attributes attached to the dimension
C library equivalent : SDdiminfo
"""
status, dim_name, dim_size, data_type, n_attrs = \
_C.SDdiminfo(self._id)
_checkErr('info', status, 'cannot execute')
return dim_name, dim_size, data_type, n_attrs | python | def info(self):
"""Return info about the dimension instance.
Args::
no argument
Returns::
4-element tuple holding:
- dimension name; 'fakeDimx' is returned if the dimension
has not been named yet, where 'x' is the dimension
index number
- dimension length; 0 is returned if the dimension is unlimited;
call the SDim.length() or SDS.info() methods to obtain the
current dimension length
- scale data type (one of the SDC.xxx constants); 0 is
returned if no scale has been set on the dimension
- number of attributes attached to the dimension
C library equivalent : SDdiminfo
"""
status, dim_name, dim_size, data_type, n_attrs = \
_C.SDdiminfo(self._id)
_checkErr('info', status, 'cannot execute')
return dim_name, dim_size, data_type, n_attrs | [
"def",
"info",
"(",
"self",
")",
":",
"status",
",",
"dim_name",
",",
"dim_size",
",",
"data_type",
",",
"n_attrs",
"=",
"_C",
".",
"SDdiminfo",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'info'",
",",
"status",
",",
"'cannot execute'",
")",
"return",
"dim_name",
",",
"dim_size",
",",
"data_type",
",",
"n_attrs"
] | Return info about the dimension instance.
Args::
no argument
Returns::
4-element tuple holding:
- dimension name; 'fakeDimx' is returned if the dimension
has not been named yet, where 'x' is the dimension
index number
- dimension length; 0 is returned if the dimension is unlimited;
call the SDim.length() or SDS.info() methods to obtain the
current dimension length
- scale data type (one of the SDC.xxx constants); 0 is
returned if no scale has been set on the dimension
- number of attributes attached to the dimension
C library equivalent : SDdiminfo | [
"Return",
"info",
"about",
"the",
"dimension",
"instance",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2892-L2918 |
4,533 | fhs/pyhdf | pyhdf/SD.py | SDim.setname | def setname(self, dim_name):
"""Set the dimension name.
Args::
dim_name dimension name; setting 2 dimensions to the same
name make the dimensions "shared"; in order to be
shared, the dimesions must be deined similarly.
Returns::
None
C library equivalent : SDsetdimname
"""
status = _C.SDsetdimname(self._id, dim_name)
_checkErr('setname', status, 'cannot execute') | python | def setname(self, dim_name):
"""Set the dimension name.
Args::
dim_name dimension name; setting 2 dimensions to the same
name make the dimensions "shared"; in order to be
shared, the dimesions must be deined similarly.
Returns::
None
C library equivalent : SDsetdimname
"""
status = _C.SDsetdimname(self._id, dim_name)
_checkErr('setname', status, 'cannot execute') | [
"def",
"setname",
"(",
"self",
",",
"dim_name",
")",
":",
"status",
"=",
"_C",
".",
"SDsetdimname",
"(",
"self",
".",
"_id",
",",
"dim_name",
")",
"_checkErr",
"(",
"'setname'",
",",
"status",
",",
"'cannot execute'",
")"
] | Set the dimension name.
Args::
dim_name dimension name; setting 2 dimensions to the same
name make the dimensions "shared"; in order to be
shared, the dimesions must be deined similarly.
Returns::
None
C library equivalent : SDsetdimname | [
"Set",
"the",
"dimension",
"name",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2939-L2956 |
4,534 | fhs/pyhdf | pyhdf/SD.py | SDim.getscale | def getscale(self):
"""Obtain the scale values along a dimension.
Args::
no argument
Returns::
list with the scale values; the list length is equal to the
dimension length; the element type is equal to the dimension
data type, as set when the 'setdimscale()' method was called.
C library equivalent : SDgetdimscale
"""
# Get dimension info. If data_type is 0, no scale have been set
# on the dimension.
status, dim_name, dim_size, data_type, n_attrs = _C.SDdiminfo(self._id)
_checkErr('getscale', status, 'cannot execute')
if data_type == 0:
raise HDF4Error("no scale set on that dimension")
# dim_size is 0 for an unlimited dimension. The actual length is
# obtained through SDgetinfo.
if dim_size == 0:
dim_size = self._sds.info()[2][self._index]
# Get scale values.
if data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(dim_size)
elif data_type == SDC.INT8:
buf = _C.array_int8(dim_size)
elif data_type == SDC.INT16:
buf = _C.array_int16(dim_size)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(dim_size)
elif data_type == SDC.INT32:
buf = _C.array_int32(dim_size)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(dim_size)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(dim_size)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(dim_size)
else:
raise HDF4Error("getscale: dimension has an "\
"illegal or unsupported type %d" % data_type)
status = _C.SDgetdimscale(self._id, buf)
_checkErr('getscale', status, 'cannot execute')
return _array_to_ret(buf, dim_size) | python | def getscale(self):
"""Obtain the scale values along a dimension.
Args::
no argument
Returns::
list with the scale values; the list length is equal to the
dimension length; the element type is equal to the dimension
data type, as set when the 'setdimscale()' method was called.
C library equivalent : SDgetdimscale
"""
# Get dimension info. If data_type is 0, no scale have been set
# on the dimension.
status, dim_name, dim_size, data_type, n_attrs = _C.SDdiminfo(self._id)
_checkErr('getscale', status, 'cannot execute')
if data_type == 0:
raise HDF4Error("no scale set on that dimension")
# dim_size is 0 for an unlimited dimension. The actual length is
# obtained through SDgetinfo.
if dim_size == 0:
dim_size = self._sds.info()[2][self._index]
# Get scale values.
if data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(dim_size)
elif data_type == SDC.INT8:
buf = _C.array_int8(dim_size)
elif data_type == SDC.INT16:
buf = _C.array_int16(dim_size)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(dim_size)
elif data_type == SDC.INT32:
buf = _C.array_int32(dim_size)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(dim_size)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(dim_size)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(dim_size)
else:
raise HDF4Error("getscale: dimension has an "\
"illegal or unsupported type %d" % data_type)
status = _C.SDgetdimscale(self._id, buf)
_checkErr('getscale', status, 'cannot execute')
return _array_to_ret(buf, dim_size) | [
"def",
"getscale",
"(",
"self",
")",
":",
"# Get dimension info. If data_type is 0, no scale have been set",
"# on the dimension.",
"status",
",",
"dim_name",
",",
"dim_size",
",",
"data_type",
",",
"n_attrs",
"=",
"_C",
".",
"SDdiminfo",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'getscale'",
",",
"status",
",",
"'cannot execute'",
")",
"if",
"data_type",
"==",
"0",
":",
"raise",
"HDF4Error",
"(",
"\"no scale set on that dimension\"",
")",
"# dim_size is 0 for an unlimited dimension. The actual length is",
"# obtained through SDgetinfo.",
"if",
"dim_size",
"==",
"0",
":",
"dim_size",
"=",
"self",
".",
"_sds",
".",
"info",
"(",
")",
"[",
"2",
"]",
"[",
"self",
".",
"_index",
"]",
"# Get scale values.",
"if",
"data_type",
"in",
"[",
"SDC",
".",
"UCHAR8",
",",
"SDC",
".",
"UINT8",
"]",
":",
"buf",
"=",
"_C",
".",
"array_byte",
"(",
"dim_size",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT8",
":",
"buf",
"=",
"_C",
".",
"array_int8",
"(",
"dim_size",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT16",
":",
"buf",
"=",
"_C",
".",
"array_int16",
"(",
"dim_size",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT16",
":",
"buf",
"=",
"_C",
".",
"array_uint16",
"(",
"dim_size",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT32",
":",
"buf",
"=",
"_C",
".",
"array_int32",
"(",
"dim_size",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT32",
":",
"buf",
"=",
"_C",
".",
"array_uint32",
"(",
"dim_size",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT32",
":",
"buf",
"=",
"_C",
".",
"array_float32",
"(",
"dim_size",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT64",
":",
"buf",
"=",
"_C",
".",
"array_float64",
"(",
"dim_size",
")",
"else",
":",
"raise",
"HDF4Error",
"(",
"\"getscale: dimension has an \"",
"\"illegal or unsupported type %d\"",
"%",
"data_type",
")",
"status",
"=",
"_C",
".",
"SDgetdimscale",
"(",
"self",
".",
"_id",
",",
"buf",
")",
"_checkErr",
"(",
"'getscale'",
",",
"status",
",",
"'cannot execute'",
")",
"return",
"_array_to_ret",
"(",
"buf",
",",
"dim_size",
")"
] | Obtain the scale values along a dimension.
Args::
no argument
Returns::
list with the scale values; the list length is equal to the
dimension length; the element type is equal to the dimension
data type, as set when the 'setdimscale()' method was called.
C library equivalent : SDgetdimscale | [
"Obtain",
"the",
"scale",
"values",
"along",
"a",
"dimension",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L2959-L3018 |
4,535 | fhs/pyhdf | pyhdf/SD.py | SDim.setscale | def setscale(self, data_type, scale):
"""Initialize the scale values along the dimension.
Args::
data_type data type code (one of the SDC.xxx constants)
scale sequence holding the scale values; the number of
values must match the current length of the dataset
along that dimension
C library equivalent : SDsetdimscale
Setting a scale on a dimension generates what HDF calls a
"coordinate variable". This is a rank 1 dataset similar to any
other dataset, which is created to hold the scale values. The
dataset name is identical to that of the dimension on which
setscale() is called, and the data type passed in 'data_type'
determines the type of the dataset. To distinguish between such
a dataset and a "normal" dataset, call the iscoordvar() method
of the dataset instance.
"""
try:
n_values = len(scale)
except:
n_values = 1
# Validate args
info = self._sds.info()
if info[1] == 1:
dim_size = info[2]
else:
dim_size = info[2][self._index]
if n_values != dim_size:
raise HDF4Error('number of scale values (%d) does not match ' \
'dimension size (%d)' % (n_values, dim_size))
if data_type == SDC.CHAR8:
buf = _C.array_byte(n_values)
# Allow a string as the scale argument.
# Becomes a noop if already a list.
scale = list(scale)
for n in range(n_values):
scale[n] = ord(scale[n])
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("setscale: illegal or usupported data_type")
if n_values == 1:
buf[0] = scale
else:
for n in range(n_values):
buf[n] = scale[n]
status = _C.SDsetdimscale(self._id, n_values, data_type, buf)
_checkErr('setscale', status, 'cannot execute') | python | def setscale(self, data_type, scale):
"""Initialize the scale values along the dimension.
Args::
data_type data type code (one of the SDC.xxx constants)
scale sequence holding the scale values; the number of
values must match the current length of the dataset
along that dimension
C library equivalent : SDsetdimscale
Setting a scale on a dimension generates what HDF calls a
"coordinate variable". This is a rank 1 dataset similar to any
other dataset, which is created to hold the scale values. The
dataset name is identical to that of the dimension on which
setscale() is called, and the data type passed in 'data_type'
determines the type of the dataset. To distinguish between such
a dataset and a "normal" dataset, call the iscoordvar() method
of the dataset instance.
"""
try:
n_values = len(scale)
except:
n_values = 1
# Validate args
info = self._sds.info()
if info[1] == 1:
dim_size = info[2]
else:
dim_size = info[2][self._index]
if n_values != dim_size:
raise HDF4Error('number of scale values (%d) does not match ' \
'dimension size (%d)' % (n_values, dim_size))
if data_type == SDC.CHAR8:
buf = _C.array_byte(n_values)
# Allow a string as the scale argument.
# Becomes a noop if already a list.
scale = list(scale)
for n in range(n_values):
scale[n] = ord(scale[n])
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("setscale: illegal or usupported data_type")
if n_values == 1:
buf[0] = scale
else:
for n in range(n_values):
buf[n] = scale[n]
status = _C.SDsetdimscale(self._id, n_values, data_type, buf)
_checkErr('setscale', status, 'cannot execute') | [
"def",
"setscale",
"(",
"self",
",",
"data_type",
",",
"scale",
")",
":",
"try",
":",
"n_values",
"=",
"len",
"(",
"scale",
")",
"except",
":",
"n_values",
"=",
"1",
"# Validate args",
"info",
"=",
"self",
".",
"_sds",
".",
"info",
"(",
")",
"if",
"info",
"[",
"1",
"]",
"==",
"1",
":",
"dim_size",
"=",
"info",
"[",
"2",
"]",
"else",
":",
"dim_size",
"=",
"info",
"[",
"2",
"]",
"[",
"self",
".",
"_index",
"]",
"if",
"n_values",
"!=",
"dim_size",
":",
"raise",
"HDF4Error",
"(",
"'number of scale values (%d) does not match '",
"'dimension size (%d)'",
"%",
"(",
"n_values",
",",
"dim_size",
")",
")",
"if",
"data_type",
"==",
"SDC",
".",
"CHAR8",
":",
"buf",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"# Allow a string as the scale argument.",
"# Becomes a noop if already a list.",
"scale",
"=",
"list",
"(",
"scale",
")",
"for",
"n",
"in",
"range",
"(",
"n_values",
")",
":",
"scale",
"[",
"n",
"]",
"=",
"ord",
"(",
"scale",
"[",
"n",
"]",
")",
"elif",
"data_type",
"in",
"[",
"SDC",
".",
"UCHAR8",
",",
"SDC",
".",
"UINT8",
"]",
":",
"buf",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT8",
":",
"buf",
"=",
"_C",
".",
"array_int8",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT16",
":",
"buf",
"=",
"_C",
".",
"array_int16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT16",
":",
"buf",
"=",
"_C",
".",
"array_uint16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"INT32",
":",
"buf",
"=",
"_C",
".",
"array_int32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"UINT32",
":",
"buf",
"=",
"_C",
".",
"array_uint32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT32",
":",
"buf",
"=",
"_C",
".",
"array_float32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"SDC",
".",
"FLOAT64",
":",
"buf",
"=",
"_C",
".",
"array_float64",
"(",
"n_values",
")",
"else",
":",
"raise",
"HDF4Error",
"(",
"\"setscale: illegal or usupported data_type\"",
")",
"if",
"n_values",
"==",
"1",
":",
"buf",
"[",
"0",
"]",
"=",
"scale",
"else",
":",
"for",
"n",
"in",
"range",
"(",
"n_values",
")",
":",
"buf",
"[",
"n",
"]",
"=",
"scale",
"[",
"n",
"]",
"status",
"=",
"_C",
".",
"SDsetdimscale",
"(",
"self",
".",
"_id",
",",
"n_values",
",",
"data_type",
",",
"buf",
")",
"_checkErr",
"(",
"'setscale'",
",",
"status",
",",
"'cannot execute'",
")"
] | Initialize the scale values along the dimension.
Args::
data_type data type code (one of the SDC.xxx constants)
scale sequence holding the scale values; the number of
values must match the current length of the dataset
along that dimension
C library equivalent : SDsetdimscale
Setting a scale on a dimension generates what HDF calls a
"coordinate variable". This is a rank 1 dataset similar to any
other dataset, which is created to hold the scale values. The
dataset name is identical to that of the dimension on which
setscale() is called, and the data type passed in 'data_type'
determines the type of the dataset. To distinguish between such
a dataset and a "normal" dataset, call the iscoordvar() method
of the dataset instance. | [
"Initialize",
"the",
"scale",
"values",
"along",
"the",
"dimension",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L3020-L3098 |
4,536 | fhs/pyhdf | pyhdf/SD.py | SDim.getstrs | def getstrs(self):
"""Retrieve the dimension standard string attributes.
Args::
no argument
Returns::
3-element tuple holding:
-dimension label (attribute 'long_name')
-dimension unit (attribute 'units')
-dimension format (attribute 'format')
An exception is raised if the standard attributes have
not been set.
C library equivalent: SDgetdimstrs
"""
status, label, unit, format = _C.SDgetdimstrs(self._id, 128)
_checkErr('getstrs', status, 'cannot execute')
return label, unit, format | python | def getstrs(self):
"""Retrieve the dimension standard string attributes.
Args::
no argument
Returns::
3-element tuple holding:
-dimension label (attribute 'long_name')
-dimension unit (attribute 'units')
-dimension format (attribute 'format')
An exception is raised if the standard attributes have
not been set.
C library equivalent: SDgetdimstrs
"""
status, label, unit, format = _C.SDgetdimstrs(self._id, 128)
_checkErr('getstrs', status, 'cannot execute')
return label, unit, format | [
"def",
"getstrs",
"(",
"self",
")",
":",
"status",
",",
"label",
",",
"unit",
",",
"format",
"=",
"_C",
".",
"SDgetdimstrs",
"(",
"self",
".",
"_id",
",",
"128",
")",
"_checkErr",
"(",
"'getstrs'",
",",
"status",
",",
"'cannot execute'",
")",
"return",
"label",
",",
"unit",
",",
"format"
] | Retrieve the dimension standard string attributes.
Args::
no argument
Returns::
3-element tuple holding:
-dimension label (attribute 'long_name')
-dimension unit (attribute 'units')
-dimension format (attribute 'format')
An exception is raised if the standard attributes have
not been set.
C library equivalent: SDgetdimstrs | [
"Retrieve",
"the",
"dimension",
"standard",
"string",
"attributes",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L3100-L3122 |
4,537 | fhs/pyhdf | pyhdf/SD.py | SDim.setstrs | def setstrs(self, label, unit, format):
"""Set the dimension standard string attributes.
Args::
label dimension label (attribute 'long_name')
unit dimension unit (attribute 'units')
format dimension format (attribute 'format')
Returns::
None
C library equivalent: SDsetdimstrs
"""
status = _C.SDsetdimstrs(self._id, label, unit, format)
_checkErr('setstrs', status, 'cannot execute') | python | def setstrs(self, label, unit, format):
"""Set the dimension standard string attributes.
Args::
label dimension label (attribute 'long_name')
unit dimension unit (attribute 'units')
format dimension format (attribute 'format')
Returns::
None
C library equivalent: SDsetdimstrs
"""
status = _C.SDsetdimstrs(self._id, label, unit, format)
_checkErr('setstrs', status, 'cannot execute') | [
"def",
"setstrs",
"(",
"self",
",",
"label",
",",
"unit",
",",
"format",
")",
":",
"status",
"=",
"_C",
".",
"SDsetdimstrs",
"(",
"self",
".",
"_id",
",",
"label",
",",
"unit",
",",
"format",
")",
"_checkErr",
"(",
"'setstrs'",
",",
"status",
",",
"'cannot execute'",
")"
] | Set the dimension standard string attributes.
Args::
label dimension label (attribute 'long_name')
unit dimension unit (attribute 'units')
format dimension format (attribute 'format')
Returns::
None
C library equivalent: SDsetdimstrs | [
"Set",
"the",
"dimension",
"standard",
"string",
"attributes",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/SD.py#L3124-L3141 |
4,538 | fhs/pyhdf | pyhdf/VS.py | VS.attach | def attach(self, num_name, write=0):
"""Locate an existing vdata or create a new vdata in the HDF file,
returning a VD instance.
Args::
num_name Name or reference number of the vdata. An existing vdata
can be specified either through its reference number or
its name. Use -1 to create a new vdata.
Note that uniqueness is not imposed on vdatas names,
whereas refnums are guaranteed to be unique. Thus
knowledge of its reference number may be the only way
to get at a wanted vdata.
write Set to 0 to open the vdata in read-only mode,
set to 1 to open it in write mode
Returns::
VD instance representing the vdata
C library equivalent : VSattach
After creating a new vdata (num_name == -1), fields must be
defined using method fdefine() of the VD instance, and those
fields must be allocated to the vdata with method setfields().
Same results can be achieved, but more simply, by calling the
create() method of the VS instance.
"""
mode = write and 'w' or 'r'
if isinstance(num_name, str):
num = self.find(num_name)
else:
num = num_name
vd = _C.VSattach(self._hdf_inst._id, num, mode)
if vd < 0:
_checkErr('attach', vd, 'cannot attach vdata')
return VD(self, vd) | python | def attach(self, num_name, write=0):
"""Locate an existing vdata or create a new vdata in the HDF file,
returning a VD instance.
Args::
num_name Name or reference number of the vdata. An existing vdata
can be specified either through its reference number or
its name. Use -1 to create a new vdata.
Note that uniqueness is not imposed on vdatas names,
whereas refnums are guaranteed to be unique. Thus
knowledge of its reference number may be the only way
to get at a wanted vdata.
write Set to 0 to open the vdata in read-only mode,
set to 1 to open it in write mode
Returns::
VD instance representing the vdata
C library equivalent : VSattach
After creating a new vdata (num_name == -1), fields must be
defined using method fdefine() of the VD instance, and those
fields must be allocated to the vdata with method setfields().
Same results can be achieved, but more simply, by calling the
create() method of the VS instance.
"""
mode = write and 'w' or 'r'
if isinstance(num_name, str):
num = self.find(num_name)
else:
num = num_name
vd = _C.VSattach(self._hdf_inst._id, num, mode)
if vd < 0:
_checkErr('attach', vd, 'cannot attach vdata')
return VD(self, vd) | [
"def",
"attach",
"(",
"self",
",",
"num_name",
",",
"write",
"=",
"0",
")",
":",
"mode",
"=",
"write",
"and",
"'w'",
"or",
"'r'",
"if",
"isinstance",
"(",
"num_name",
",",
"str",
")",
":",
"num",
"=",
"self",
".",
"find",
"(",
"num_name",
")",
"else",
":",
"num",
"=",
"num_name",
"vd",
"=",
"_C",
".",
"VSattach",
"(",
"self",
".",
"_hdf_inst",
".",
"_id",
",",
"num",
",",
"mode",
")",
"if",
"vd",
"<",
"0",
":",
"_checkErr",
"(",
"'attach'",
",",
"vd",
",",
"'cannot attach vdata'",
")",
"return",
"VD",
"(",
"self",
",",
"vd",
")"
] | Locate an existing vdata or create a new vdata in the HDF file,
returning a VD instance.
Args::
num_name Name or reference number of the vdata. An existing vdata
can be specified either through its reference number or
its name. Use -1 to create a new vdata.
Note that uniqueness is not imposed on vdatas names,
whereas refnums are guaranteed to be unique. Thus
knowledge of its reference number may be the only way
to get at a wanted vdata.
write Set to 0 to open the vdata in read-only mode,
set to 1 to open it in write mode
Returns::
VD instance representing the vdata
C library equivalent : VSattach
After creating a new vdata (num_name == -1), fields must be
defined using method fdefine() of the VD instance, and those
fields must be allocated to the vdata with method setfields().
Same results can be achieved, but more simply, by calling the
create() method of the VS instance. | [
"Locate",
"an",
"existing",
"vdata",
"or",
"create",
"a",
"new",
"vdata",
"in",
"the",
"HDF",
"file",
"returning",
"a",
"VD",
"instance",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L872-L911 |
4,539 | fhs/pyhdf | pyhdf/VS.py | VS.create | def create(self, name, fields):
"""Create a new vdata, setting its name and allocating
its fields.
Args::
name Name to assign to the vdata
fields Sequence of field definitions. Each field definition
is a sequence with the following elements in order:
- field name
- field type (one of HC.xxx constants)
- field order (number of values)
Fields are allocated to the vdata in the given order
Returns::
VD instance representing the created vdata
Calling the create() method is equivalent to the following calls:
- vd = attach(-1,1), to create a new vdata and open it in
write mode
- vd._name = name, to set the vdata name
- vd.fdefine(...), to define the name, type and order of
each field
- vd.setfields(...), to allocate fields to the vdata
C library equivalent : no equivalent
"""
try:
# Create new vdata (-1), open in write mode (1)
vd = self.attach(-1, 1)
# Set vdata name
vd._name = name
# Define fields
allNames = []
for name, type, order in fields:
vd.fdefine(name, type, order)
allNames.append(name)
# Allocate fields to the vdata
vd.setfields(*allNames)
return vd
except HDF4Error as msg:
raise HDF4Error("error creating vdata (%s)" % msg) | python | def create(self, name, fields):
"""Create a new vdata, setting its name and allocating
its fields.
Args::
name Name to assign to the vdata
fields Sequence of field definitions. Each field definition
is a sequence with the following elements in order:
- field name
- field type (one of HC.xxx constants)
- field order (number of values)
Fields are allocated to the vdata in the given order
Returns::
VD instance representing the created vdata
Calling the create() method is equivalent to the following calls:
- vd = attach(-1,1), to create a new vdata and open it in
write mode
- vd._name = name, to set the vdata name
- vd.fdefine(...), to define the name, type and order of
each field
- vd.setfields(...), to allocate fields to the vdata
C library equivalent : no equivalent
"""
try:
# Create new vdata (-1), open in write mode (1)
vd = self.attach(-1, 1)
# Set vdata name
vd._name = name
# Define fields
allNames = []
for name, type, order in fields:
vd.fdefine(name, type, order)
allNames.append(name)
# Allocate fields to the vdata
vd.setfields(*allNames)
return vd
except HDF4Error as msg:
raise HDF4Error("error creating vdata (%s)" % msg) | [
"def",
"create",
"(",
"self",
",",
"name",
",",
"fields",
")",
":",
"try",
":",
"# Create new vdata (-1), open in write mode (1)",
"vd",
"=",
"self",
".",
"attach",
"(",
"-",
"1",
",",
"1",
")",
"# Set vdata name",
"vd",
".",
"_name",
"=",
"name",
"# Define fields",
"allNames",
"=",
"[",
"]",
"for",
"name",
",",
"type",
",",
"order",
"in",
"fields",
":",
"vd",
".",
"fdefine",
"(",
"name",
",",
"type",
",",
"order",
")",
"allNames",
".",
"append",
"(",
"name",
")",
"# Allocate fields to the vdata",
"vd",
".",
"setfields",
"(",
"*",
"allNames",
")",
"return",
"vd",
"except",
"HDF4Error",
"as",
"msg",
":",
"raise",
"HDF4Error",
"(",
"\"error creating vdata (%s)\"",
"%",
"msg",
")"
] | Create a new vdata, setting its name and allocating
its fields.
Args::
name Name to assign to the vdata
fields Sequence of field definitions. Each field definition
is a sequence with the following elements in order:
- field name
- field type (one of HC.xxx constants)
- field order (number of values)
Fields are allocated to the vdata in the given order
Returns::
VD instance representing the created vdata
Calling the create() method is equivalent to the following calls:
- vd = attach(-1,1), to create a new vdata and open it in
write mode
- vd._name = name, to set the vdata name
- vd.fdefine(...), to define the name, type and order of
each field
- vd.setfields(...), to allocate fields to the vdata
C library equivalent : no equivalent | [
"Create",
"a",
"new",
"vdata",
"setting",
"its",
"name",
"and",
"allocating",
"its",
"fields",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L913-L959 |
4,540 | fhs/pyhdf | pyhdf/VS.py | VS.next | def next(self, vRef):
"""Get the reference number of the vdata following a given
vdata.
Args::
vRef Reference number of the vdata preceding the one
we require. Set to -1 to get the first vdata in
the HDF file. Knowing its reference number,
the vdata can then be opened (attached) by passing this
reference number to the attach() method.
Returns::
Reference number of the vdata following the one given
by argument vref
An exception is raised if no vdata follows the one given by vRef.
C library equivalent : VSgetid
"""
num = _C.VSgetid(self._hdf_inst._id, vRef)
_checkErr('next', num, 'cannot get next vdata')
return num | python | def next(self, vRef):
"""Get the reference number of the vdata following a given
vdata.
Args::
vRef Reference number of the vdata preceding the one
we require. Set to -1 to get the first vdata in
the HDF file. Knowing its reference number,
the vdata can then be opened (attached) by passing this
reference number to the attach() method.
Returns::
Reference number of the vdata following the one given
by argument vref
An exception is raised if no vdata follows the one given by vRef.
C library equivalent : VSgetid
"""
num = _C.VSgetid(self._hdf_inst._id, vRef)
_checkErr('next', num, 'cannot get next vdata')
return num | [
"def",
"next",
"(",
"self",
",",
"vRef",
")",
":",
"num",
"=",
"_C",
".",
"VSgetid",
"(",
"self",
".",
"_hdf_inst",
".",
"_id",
",",
"vRef",
")",
"_checkErr",
"(",
"'next'",
",",
"num",
",",
"'cannot get next vdata'",
")",
"return",
"num"
] | Get the reference number of the vdata following a given
vdata.
Args::
vRef Reference number of the vdata preceding the one
we require. Set to -1 to get the first vdata in
the HDF file. Knowing its reference number,
the vdata can then be opened (attached) by passing this
reference number to the attach() method.
Returns::
Reference number of the vdata following the one given
by argument vref
An exception is raised if no vdata follows the one given by vRef.
C library equivalent : VSgetid | [
"Get",
"the",
"reference",
"number",
"of",
"the",
"vdata",
"following",
"a",
"given",
"vdata",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L984-L1008 |
4,541 | fhs/pyhdf | pyhdf/VS.py | VS.vdatainfo | def vdatainfo(self, listAttr=0):
"""Return info about all the file vdatas.
Args::
listAttr Set to 0 to ignore vdatas used to store attribute
values, 1 to list them (see the VD._isattr readonly
attribute)
Returns::
List of vdata descriptions. Each vdata is described as
a 9-element tuple, composed of the following:
- vdata name
- vdata class
- vdata reference number
- vdata number of records
- vdata number of fields
- vdata number of attributes
- vdata record size in bytes
- vdata tag number
- vdata interlace mode
C library equivalent : no equivalent
"""
lst = []
ref = -1 # start at beginning
while True:
try:
nxtRef = self.next(ref)
except HDF4Error: # no vdata left
break
# Attach the vdata and check for an "attribute" vdata.
ref = nxtRef
vdObj = self.attach(ref)
if listAttr or not vdObj._isattr:
# Append a list of vdata properties.
lst.append((vdObj._name,
vdObj._class,
vdObj._refnum,
vdObj._nrecs,
vdObj._nfields,
vdObj._nattrs,
vdObj._recsize,
vdObj._tag,
vdObj._interlace))
vdObj.detach()
return lst | python | def vdatainfo(self, listAttr=0):
"""Return info about all the file vdatas.
Args::
listAttr Set to 0 to ignore vdatas used to store attribute
values, 1 to list them (see the VD._isattr readonly
attribute)
Returns::
List of vdata descriptions. Each vdata is described as
a 9-element tuple, composed of the following:
- vdata name
- vdata class
- vdata reference number
- vdata number of records
- vdata number of fields
- vdata number of attributes
- vdata record size in bytes
- vdata tag number
- vdata interlace mode
C library equivalent : no equivalent
"""
lst = []
ref = -1 # start at beginning
while True:
try:
nxtRef = self.next(ref)
except HDF4Error: # no vdata left
break
# Attach the vdata and check for an "attribute" vdata.
ref = nxtRef
vdObj = self.attach(ref)
if listAttr or not vdObj._isattr:
# Append a list of vdata properties.
lst.append((vdObj._name,
vdObj._class,
vdObj._refnum,
vdObj._nrecs,
vdObj._nfields,
vdObj._nattrs,
vdObj._recsize,
vdObj._tag,
vdObj._interlace))
vdObj.detach()
return lst | [
"def",
"vdatainfo",
"(",
"self",
",",
"listAttr",
"=",
"0",
")",
":",
"lst",
"=",
"[",
"]",
"ref",
"=",
"-",
"1",
"# start at beginning",
"while",
"True",
":",
"try",
":",
"nxtRef",
"=",
"self",
".",
"next",
"(",
"ref",
")",
"except",
"HDF4Error",
":",
"# no vdata left",
"break",
"# Attach the vdata and check for an \"attribute\" vdata.",
"ref",
"=",
"nxtRef",
"vdObj",
"=",
"self",
".",
"attach",
"(",
"ref",
")",
"if",
"listAttr",
"or",
"not",
"vdObj",
".",
"_isattr",
":",
"# Append a list of vdata properties.",
"lst",
".",
"append",
"(",
"(",
"vdObj",
".",
"_name",
",",
"vdObj",
".",
"_class",
",",
"vdObj",
".",
"_refnum",
",",
"vdObj",
".",
"_nrecs",
",",
"vdObj",
".",
"_nfields",
",",
"vdObj",
".",
"_nattrs",
",",
"vdObj",
".",
"_recsize",
",",
"vdObj",
".",
"_tag",
",",
"vdObj",
".",
"_interlace",
")",
")",
"vdObj",
".",
"detach",
"(",
")",
"return",
"lst"
] | Return info about all the file vdatas.
Args::
listAttr Set to 0 to ignore vdatas used to store attribute
values, 1 to list them (see the VD._isattr readonly
attribute)
Returns::
List of vdata descriptions. Each vdata is described as
a 9-element tuple, composed of the following:
- vdata name
- vdata class
- vdata reference number
- vdata number of records
- vdata number of fields
- vdata number of attributes
- vdata record size in bytes
- vdata tag number
- vdata interlace mode
C library equivalent : no equivalent | [
"Return",
"info",
"about",
"all",
"the",
"file",
"vdatas",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L1010-L1060 |
4,542 | fhs/pyhdf | pyhdf/VS.py | VS.storedata | def storedata(self, fieldName, values, data_type, vName, vClass):
"""Create and initialize a single field vdata, returning
the vdata reference number.
Args::
fieldName Name of the single field in the vadata to create
values Sequence of values to store in the field;. Each value can
itself be a sequence, in which case the field will be
multivalued (all second-level sequences must be of
the same length)
data_type Values type (one of HC.xxx constants). All values
must be of the same type
vName Name of the vdata to create
vClass Vdata class (string)
Returns::
vdata reference number
C library equivalent : VHstoredata / VHstoredatam
"""
# See if the field is multi-valued.
nrecs = len(values)
if type(values[0]) in [list, tuple]:
order = len(values[0])
# Replace input list with a flattened list.
newValues = []
for el in values:
for e in el:
newValues.append(e)
values = newValues
else:
order = 1
n_values = nrecs * order
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
# Allow values to be passed as a string.
# Noop if a list is passed.
values = list(values)
for n in range(n_values):
values[n] = ord(values[n])
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf = _C.array_int8(n_values)
values = list(values)
for n in range(n_values):
v = values[n]
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2s complement
values[n] = v
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("storedata: illegal or unimplemented data_type")
for n in range(n_values):
buf[n] = values[n]
if order == 1:
vd = _C.VHstoredata(self._hdf_inst._id, fieldName, buf,
nrecs, data_type, vName, vClass)
else:
vd = _C.VHstoredatam(self._hdf_inst._id, fieldName, buf,
nrecs, data_type, vName, vClass, order)
_checkErr('storedata', vd, 'cannot create vdata')
return vd | python | def storedata(self, fieldName, values, data_type, vName, vClass):
"""Create and initialize a single field vdata, returning
the vdata reference number.
Args::
fieldName Name of the single field in the vadata to create
values Sequence of values to store in the field;. Each value can
itself be a sequence, in which case the field will be
multivalued (all second-level sequences must be of
the same length)
data_type Values type (one of HC.xxx constants). All values
must be of the same type
vName Name of the vdata to create
vClass Vdata class (string)
Returns::
vdata reference number
C library equivalent : VHstoredata / VHstoredatam
"""
# See if the field is multi-valued.
nrecs = len(values)
if type(values[0]) in [list, tuple]:
order = len(values[0])
# Replace input list with a flattened list.
newValues = []
for el in values:
for e in el:
newValues.append(e)
values = newValues
else:
order = 1
n_values = nrecs * order
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
# Allow values to be passed as a string.
# Noop if a list is passed.
values = list(values)
for n in range(n_values):
values[n] = ord(values[n])
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf = _C.array_int8(n_values)
values = list(values)
for n in range(n_values):
v = values[n]
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2s complement
values[n] = v
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("storedata: illegal or unimplemented data_type")
for n in range(n_values):
buf[n] = values[n]
if order == 1:
vd = _C.VHstoredata(self._hdf_inst._id, fieldName, buf,
nrecs, data_type, vName, vClass)
else:
vd = _C.VHstoredatam(self._hdf_inst._id, fieldName, buf,
nrecs, data_type, vName, vClass, order)
_checkErr('storedata', vd, 'cannot create vdata')
return vd | [
"def",
"storedata",
"(",
"self",
",",
"fieldName",
",",
"values",
",",
"data_type",
",",
"vName",
",",
"vClass",
")",
":",
"# See if the field is multi-valued.",
"nrecs",
"=",
"len",
"(",
"values",
")",
"if",
"type",
"(",
"values",
"[",
"0",
"]",
")",
"in",
"[",
"list",
",",
"tuple",
"]",
":",
"order",
"=",
"len",
"(",
"values",
"[",
"0",
"]",
")",
"# Replace input list with a flattened list.",
"newValues",
"=",
"[",
"]",
"for",
"el",
"in",
"values",
":",
"for",
"e",
"in",
"el",
":",
"newValues",
".",
"append",
"(",
"e",
")",
"values",
"=",
"newValues",
"else",
":",
"order",
"=",
"1",
"n_values",
"=",
"nrecs",
"*",
"order",
"if",
"data_type",
"==",
"HC",
".",
"CHAR8",
":",
"buf",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"# Allow values to be passed as a string.",
"# Noop if a list is passed.",
"values",
"=",
"list",
"(",
"values",
")",
"for",
"n",
"in",
"range",
"(",
"n_values",
")",
":",
"values",
"[",
"n",
"]",
"=",
"ord",
"(",
"values",
"[",
"n",
"]",
")",
"elif",
"data_type",
"in",
"[",
"HC",
".",
"UCHAR8",
",",
"HC",
".",
"UINT8",
"]",
":",
"buf",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"HC",
".",
"INT8",
":",
"# SWIG refuses negative values here. We found that if we",
"# pass them as byte values, it will work.",
"buf",
"=",
"_C",
".",
"array_int8",
"(",
"n_values",
")",
"values",
"=",
"list",
"(",
"values",
")",
"for",
"n",
"in",
"range",
"(",
"n_values",
")",
":",
"v",
"=",
"values",
"[",
"n",
"]",
"if",
"v",
">=",
"0",
":",
"v",
"&=",
"0x7f",
"else",
":",
"v",
"=",
"abs",
"(",
"v",
")",
"&",
"0x7f",
"if",
"v",
":",
"v",
"=",
"256",
"-",
"v",
"else",
":",
"v",
"=",
"128",
"# -128 in 2s complement",
"values",
"[",
"n",
"]",
"=",
"v",
"elif",
"data_type",
"==",
"HC",
".",
"INT16",
":",
"buf",
"=",
"_C",
".",
"array_int16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"HC",
".",
"UINT16",
":",
"buf",
"=",
"_C",
".",
"array_uint16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"HC",
".",
"INT32",
":",
"buf",
"=",
"_C",
".",
"array_int32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"HC",
".",
"UINT32",
":",
"buf",
"=",
"_C",
".",
"array_uint32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"HC",
".",
"FLOAT32",
":",
"buf",
"=",
"_C",
".",
"array_float32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"HC",
".",
"FLOAT64",
":",
"buf",
"=",
"_C",
".",
"array_float64",
"(",
"n_values",
")",
"else",
":",
"raise",
"HDF4Error",
"(",
"\"storedata: illegal or unimplemented data_type\"",
")",
"for",
"n",
"in",
"range",
"(",
"n_values",
")",
":",
"buf",
"[",
"n",
"]",
"=",
"values",
"[",
"n",
"]",
"if",
"order",
"==",
"1",
":",
"vd",
"=",
"_C",
".",
"VHstoredata",
"(",
"self",
".",
"_hdf_inst",
".",
"_id",
",",
"fieldName",
",",
"buf",
",",
"nrecs",
",",
"data_type",
",",
"vName",
",",
"vClass",
")",
"else",
":",
"vd",
"=",
"_C",
".",
"VHstoredatam",
"(",
"self",
".",
"_hdf_inst",
".",
"_id",
",",
"fieldName",
",",
"buf",
",",
"nrecs",
",",
"data_type",
",",
"vName",
",",
"vClass",
",",
"order",
")",
"_checkErr",
"(",
"'storedata'",
",",
"vd",
",",
"'cannot create vdata'",
")",
"return",
"vd"
] | Create and initialize a single field vdata, returning
the vdata reference number.
Args::
fieldName Name of the single field in the vadata to create
values Sequence of values to store in the field;. Each value can
itself be a sequence, in which case the field will be
multivalued (all second-level sequences must be of
the same length)
data_type Values type (one of HC.xxx constants). All values
must be of the same type
vName Name of the vdata to create
vClass Vdata class (string)
Returns::
vdata reference number
C library equivalent : VHstoredata / VHstoredatam | [
"Create",
"and",
"initialize",
"a",
"single",
"field",
"vdata",
"returning",
"the",
"vdata",
"reference",
"number",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L1062-L1159 |
4,543 | fhs/pyhdf | pyhdf/VS.py | VD.field | def field(self, name_index):
"""Get a VDField instance representing a field of the vdata.
Args::
name_index name or index number of the field
Returns::
VDfield instance representing the field
C library equivalent : no equivalent
"""
# Transform a name to an index number
if isinstance(name_index, str):
status, index = _C.VSfindex(self._id, name_index)
_checkErr('field', status, "illegal field name: %s" % name_index)
else:
n = _C.VFnfields(self._id)
_checkErr('field', n, 'cannot execute')
index = name_index
if index >= n:
raise HDF4Error("field: illegal index number")
return VDField(self, index) | python | def field(self, name_index):
"""Get a VDField instance representing a field of the vdata.
Args::
name_index name or index number of the field
Returns::
VDfield instance representing the field
C library equivalent : no equivalent
"""
# Transform a name to an index number
if isinstance(name_index, str):
status, index = _C.VSfindex(self._id, name_index)
_checkErr('field', status, "illegal field name: %s" % name_index)
else:
n = _C.VFnfields(self._id)
_checkErr('field', n, 'cannot execute')
index = name_index
if index >= n:
raise HDF4Error("field: illegal index number")
return VDField(self, index) | [
"def",
"field",
"(",
"self",
",",
"name_index",
")",
":",
"# Transform a name to an index number",
"if",
"isinstance",
"(",
"name_index",
",",
"str",
")",
":",
"status",
",",
"index",
"=",
"_C",
".",
"VSfindex",
"(",
"self",
".",
"_id",
",",
"name_index",
")",
"_checkErr",
"(",
"'field'",
",",
"status",
",",
"\"illegal field name: %s\"",
"%",
"name_index",
")",
"else",
":",
"n",
"=",
"_C",
".",
"VFnfields",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'field'",
",",
"n",
",",
"'cannot execute'",
")",
"index",
"=",
"name_index",
"if",
"index",
">=",
"n",
":",
"raise",
"HDF4Error",
"(",
"\"field: illegal index number\"",
")",
"return",
"VDField",
"(",
"self",
",",
"index",
")"
] | Get a VDField instance representing a field of the vdata.
Args::
name_index name or index number of the field
Returns::
VDfield instance representing the field
C library equivalent : no equivalent | [
"Get",
"a",
"VDField",
"instance",
"representing",
"a",
"field",
"of",
"the",
"vdata",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L1480-L1504 |
4,544 | fhs/pyhdf | pyhdf/VS.py | VD.seek | def seek(self, recIndex):
"""Seek to the beginning of the record identified by its
record index. A succeeding read will load this record in
memory.
Args::
recIndex index of the record in the vdata; numbering
starts at 0. Legal values range from 0
(start of vdata) to the current number of
records (at end of vdata).
Returns::
record index
An exception is raised if an attempt is made to seek beyond the
last record.
The C API prohibits seeking past the next-to-last record,
forcing one to read the last record to advance to the end
of the vdata. The python API removes this limitation.
Seeking to the end of the vdata can also be done by calling
method ``seekend()``.
C library equivalent : VSseek
"""
if recIndex > self._nrecs - 1:
if recIndex == self._nrecs:
return self.seekend()
else:
raise HDF4Error("attempt to seek past last record")
n = _C.VSseek(self._id, recIndex)
_checkErr('seek', n, 'cannot seek')
self._offset = n
return n | python | def seek(self, recIndex):
"""Seek to the beginning of the record identified by its
record index. A succeeding read will load this record in
memory.
Args::
recIndex index of the record in the vdata; numbering
starts at 0. Legal values range from 0
(start of vdata) to the current number of
records (at end of vdata).
Returns::
record index
An exception is raised if an attempt is made to seek beyond the
last record.
The C API prohibits seeking past the next-to-last record,
forcing one to read the last record to advance to the end
of the vdata. The python API removes this limitation.
Seeking to the end of the vdata can also be done by calling
method ``seekend()``.
C library equivalent : VSseek
"""
if recIndex > self._nrecs - 1:
if recIndex == self._nrecs:
return self.seekend()
else:
raise HDF4Error("attempt to seek past last record")
n = _C.VSseek(self._id, recIndex)
_checkErr('seek', n, 'cannot seek')
self._offset = n
return n | [
"def",
"seek",
"(",
"self",
",",
"recIndex",
")",
":",
"if",
"recIndex",
">",
"self",
".",
"_nrecs",
"-",
"1",
":",
"if",
"recIndex",
"==",
"self",
".",
"_nrecs",
":",
"return",
"self",
".",
"seekend",
"(",
")",
"else",
":",
"raise",
"HDF4Error",
"(",
"\"attempt to seek past last record\"",
")",
"n",
"=",
"_C",
".",
"VSseek",
"(",
"self",
".",
"_id",
",",
"recIndex",
")",
"_checkErr",
"(",
"'seek'",
",",
"n",
",",
"'cannot seek'",
")",
"self",
".",
"_offset",
"=",
"n",
"return",
"n"
] | Seek to the beginning of the record identified by its
record index. A succeeding read will load this record in
memory.
Args::
recIndex index of the record in the vdata; numbering
starts at 0. Legal values range from 0
(start of vdata) to the current number of
records (at end of vdata).
Returns::
record index
An exception is raised if an attempt is made to seek beyond the
last record.
The C API prohibits seeking past the next-to-last record,
forcing one to read the last record to advance to the end
of the vdata. The python API removes this limitation.
Seeking to the end of the vdata can also be done by calling
method ``seekend()``.
C library equivalent : VSseek | [
"Seek",
"to",
"the",
"beginning",
"of",
"the",
"record",
"identified",
"by",
"its",
"record",
"index",
".",
"A",
"succeeding",
"read",
"will",
"load",
"this",
"record",
"in",
"memory",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L1507-L1544 |
4,545 | fhs/pyhdf | pyhdf/VS.py | VD.inquire | def inquire(self):
"""Retrieve info about the vdata.
Args::
no argument
Returns::
5-element tuple with the following elements:
-number of records in the vdata
-interlace mode
-list of vdata field names
-size in bytes of the vdata record
-name of the vdata
C library equivalent : VSinquire
"""
status, nRecs, interlace, fldNames, size, vName = \
_C.VSinquire(self._id)
_checkErr('inquire', status, "cannot query vdata info")
return nRecs, interlace, fldNames.split(','), size, vName | python | def inquire(self):
"""Retrieve info about the vdata.
Args::
no argument
Returns::
5-element tuple with the following elements:
-number of records in the vdata
-interlace mode
-list of vdata field names
-size in bytes of the vdata record
-name of the vdata
C library equivalent : VSinquire
"""
status, nRecs, interlace, fldNames, size, vName = \
_C.VSinquire(self._id)
_checkErr('inquire', status, "cannot query vdata info")
return nRecs, interlace, fldNames.split(','), size, vName | [
"def",
"inquire",
"(",
"self",
")",
":",
"status",
",",
"nRecs",
",",
"interlace",
",",
"fldNames",
",",
"size",
",",
"vName",
"=",
"_C",
".",
"VSinquire",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'inquire'",
",",
"status",
",",
"\"cannot query vdata info\"",
")",
"return",
"nRecs",
",",
"interlace",
",",
"fldNames",
".",
"split",
"(",
"','",
")",
",",
"size",
",",
"vName"
] | Retrieve info about the vdata.
Args::
no argument
Returns::
5-element tuple with the following elements:
-number of records in the vdata
-interlace mode
-list of vdata field names
-size in bytes of the vdata record
-name of the vdata
C library equivalent : VSinquire | [
"Retrieve",
"info",
"about",
"the",
"vdata",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L1861-L1883 |
4,546 | fhs/pyhdf | pyhdf/VS.py | VD.fieldinfo | def fieldinfo(self):
"""Retrieve info about all vdata fields.
Args::
no argument
Returns::
list where each element describes a field of the vdata;
each field is described by an 7-element tuple containing
the following elements:
- field name
- field data type (one of HC.xxx constants)
- field order
- number of attributes attached to the field
- field index number
- field external size
- field internal size
C library equivalent : no equivalent
"""
lst = []
for n in range(self._nfields):
fld = self.field(n)
lst.append((fld._name,
fld._type,
fld._order,
fld._nattrs,
fld._index,
fld._esize,
fld._isize))
return lst | python | def fieldinfo(self):
"""Retrieve info about all vdata fields.
Args::
no argument
Returns::
list where each element describes a field of the vdata;
each field is described by an 7-element tuple containing
the following elements:
- field name
- field data type (one of HC.xxx constants)
- field order
- number of attributes attached to the field
- field index number
- field external size
- field internal size
C library equivalent : no equivalent
"""
lst = []
for n in range(self._nfields):
fld = self.field(n)
lst.append((fld._name,
fld._type,
fld._order,
fld._nattrs,
fld._index,
fld._esize,
fld._isize))
return lst | [
"def",
"fieldinfo",
"(",
"self",
")",
":",
"lst",
"=",
"[",
"]",
"for",
"n",
"in",
"range",
"(",
"self",
".",
"_nfields",
")",
":",
"fld",
"=",
"self",
".",
"field",
"(",
"n",
")",
"lst",
".",
"append",
"(",
"(",
"fld",
".",
"_name",
",",
"fld",
".",
"_type",
",",
"fld",
".",
"_order",
",",
"fld",
".",
"_nattrs",
",",
"fld",
".",
"_index",
",",
"fld",
".",
"_esize",
",",
"fld",
".",
"_isize",
")",
")",
"return",
"lst"
] | Retrieve info about all vdata fields.
Args::
no argument
Returns::
list where each element describes a field of the vdata;
each field is described by an 7-element tuple containing
the following elements:
- field name
- field data type (one of HC.xxx constants)
- field order
- number of attributes attached to the field
- field index number
- field external size
- field internal size
C library equivalent : no equivalent | [
"Retrieve",
"info",
"about",
"all",
"vdata",
"fields",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L1886-L1921 |
4,547 | fhs/pyhdf | pyhdf/VS.py | VD.sizeof | def sizeof(self, fields):
"""Retrieve the size in bytes of the given fields.
Args::
fields sequence of field names to query
Returns::
total size of the fields in bytes
C library equivalent : VSsizeof
"""
if type(fields) in [tuple, list]:
str = ','.join(fields)
else:
str = fields
n = _C.VSsizeof(self._id, str)
_checkErr('sizeof', n, "cannot retrieve field sizes")
return n | python | def sizeof(self, fields):
"""Retrieve the size in bytes of the given fields.
Args::
fields sequence of field names to query
Returns::
total size of the fields in bytes
C library equivalent : VSsizeof
"""
if type(fields) in [tuple, list]:
str = ','.join(fields)
else:
str = fields
n = _C.VSsizeof(self._id, str)
_checkErr('sizeof', n, "cannot retrieve field sizes")
return n | [
"def",
"sizeof",
"(",
"self",
",",
"fields",
")",
":",
"if",
"type",
"(",
"fields",
")",
"in",
"[",
"tuple",
",",
"list",
"]",
":",
"str",
"=",
"','",
".",
"join",
"(",
"fields",
")",
"else",
":",
"str",
"=",
"fields",
"n",
"=",
"_C",
".",
"VSsizeof",
"(",
"self",
".",
"_id",
",",
"str",
")",
"_checkErr",
"(",
"'sizeof'",
",",
"n",
",",
"\"cannot retrieve field sizes\"",
")",
"return",
"n"
] | Retrieve the size in bytes of the given fields.
Args::
fields sequence of field names to query
Returns::
total size of the fields in bytes
C library equivalent : VSsizeof | [
"Retrieve",
"the",
"size",
"in",
"bytes",
"of",
"the",
"given",
"fields",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L1923-L1943 |
4,548 | fhs/pyhdf | pyhdf/VS.py | VD.fexist | def fexist(self, fields):
"""Check if a vdata contains a given set of fields.
Args::
fields sequence of field names whose presence in the
vdata must be checked
Returns::
true (1) if the given fields are present
false (0) otherwise
C library equivalent : VSfexist
"""
if type(fields) in [tuple, list]:
str = ','.join(fields)
else:
str = fields
ret = _C.VSfexist(self._id, str)
if ret < 0:
return 0
else:
return 1 | python | def fexist(self, fields):
"""Check if a vdata contains a given set of fields.
Args::
fields sequence of field names whose presence in the
vdata must be checked
Returns::
true (1) if the given fields are present
false (0) otherwise
C library equivalent : VSfexist
"""
if type(fields) in [tuple, list]:
str = ','.join(fields)
else:
str = fields
ret = _C.VSfexist(self._id, str)
if ret < 0:
return 0
else:
return 1 | [
"def",
"fexist",
"(",
"self",
",",
"fields",
")",
":",
"if",
"type",
"(",
"fields",
")",
"in",
"[",
"tuple",
",",
"list",
"]",
":",
"str",
"=",
"','",
".",
"join",
"(",
"fields",
")",
"else",
":",
"str",
"=",
"fields",
"ret",
"=",
"_C",
".",
"VSfexist",
"(",
"self",
".",
"_id",
",",
"str",
")",
"if",
"ret",
"<",
"0",
":",
"return",
"0",
"else",
":",
"return",
"1"
] | Check if a vdata contains a given set of fields.
Args::
fields sequence of field names whose presence in the
vdata must be checked
Returns::
true (1) if the given fields are present
false (0) otherwise
C library equivalent : VSfexist | [
"Check",
"if",
"a",
"vdata",
"contains",
"a",
"given",
"set",
"of",
"fields",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L1945-L1969 |
4,549 | fhs/pyhdf | pyhdf/VS.py | VDField.find | def find(self, name):
"""Search the field for a given attribute.
Args::
name attribute name
Returns::
if found, VDAttr instance describing the attribute
None otherwise
C library equivalent : VSfindattr
"""
try:
att = self.attr(name)
if att._index is None:
att = None
except HDF4Error:
att = None
return att | python | def find(self, name):
"""Search the field for a given attribute.
Args::
name attribute name
Returns::
if found, VDAttr instance describing the attribute
None otherwise
C library equivalent : VSfindattr
"""
try:
att = self.attr(name)
if att._index is None:
att = None
except HDF4Error:
att = None
return att | [
"def",
"find",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"att",
"=",
"self",
".",
"attr",
"(",
"name",
")",
"if",
"att",
".",
"_index",
"is",
"None",
":",
"att",
"=",
"None",
"except",
"HDF4Error",
":",
"att",
"=",
"None",
"return",
"att"
] | Search the field for a given attribute.
Args::
name attribute name
Returns::
if found, VDAttr instance describing the attribute
None otherwise
C library equivalent : VSfindattr | [
"Search",
"the",
"field",
"for",
"a",
"given",
"attribute",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L2236-L2257 |
4,550 | fhs/pyhdf | pyhdf/VS.py | VDAttr.set | def set(self, data_type, values):
"""Set the attribute value.
Args::
data_type : attribute data type (see constants HC.xxx)
values : attribute value(s); specify a list to create
a multi-valued attribute; a string valued
attribute can be created by setting 'data_type'
to HC.CHAR8 and 'values' to the corresponding
string
If the attribute already exists, it will be
updated. However, it is illegal to try to change
its data type or its order (number of values).
Returns::
None
C library equivalent : VSsetattr
"""
try:
n_values = len(values)
except:
values = [values]
n_values = 1
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
# Allow values to be passed as a string.
# Noop if a list is passed.
values = list(values)
for n in range(n_values):
if not isinstance(values[n], int):
values[n] = ord(values[n])
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf = _C.array_int8(n_values)
values = list(values)
for n in range(n_values):
v = values[n]
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2s complement
values[n] = v
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("set: illegal or unimplemented data_type")
for n in range(n_values):
buf[n] = values[n]
status = _C.VSsetattr(self._vd_inst._id, self._fIndex, self._name,
data_type, n_values, buf)
_checkErr('attr', status, 'cannot execute')
# Update the attribute index
self._index = _C.VSfindattr(self._vd_inst._id, self._fIndex,
self._name);
if self._index < 0:
raise HDF4Error("set: error retrieving attribute index") | python | def set(self, data_type, values):
"""Set the attribute value.
Args::
data_type : attribute data type (see constants HC.xxx)
values : attribute value(s); specify a list to create
a multi-valued attribute; a string valued
attribute can be created by setting 'data_type'
to HC.CHAR8 and 'values' to the corresponding
string
If the attribute already exists, it will be
updated. However, it is illegal to try to change
its data type or its order (number of values).
Returns::
None
C library equivalent : VSsetattr
"""
try:
n_values = len(values)
except:
values = [values]
n_values = 1
if data_type == HC.CHAR8:
buf = _C.array_byte(n_values)
# Allow values to be passed as a string.
# Noop if a list is passed.
values = list(values)
for n in range(n_values):
if not isinstance(values[n], int):
values[n] = ord(values[n])
elif data_type in [HC.UCHAR8, HC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == HC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf = _C.array_int8(n_values)
values = list(values)
for n in range(n_values):
v = values[n]
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2s complement
values[n] = v
elif data_type == HC.INT16:
buf = _C.array_int16(n_values)
elif data_type == HC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == HC.INT32:
buf = _C.array_int32(n_values)
elif data_type == HC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == HC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == HC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("set: illegal or unimplemented data_type")
for n in range(n_values):
buf[n] = values[n]
status = _C.VSsetattr(self._vd_inst._id, self._fIndex, self._name,
data_type, n_values, buf)
_checkErr('attr', status, 'cannot execute')
# Update the attribute index
self._index = _C.VSfindattr(self._vd_inst._id, self._fIndex,
self._name);
if self._index < 0:
raise HDF4Error("set: error retrieving attribute index") | [
"def",
"set",
"(",
"self",
",",
"data_type",
",",
"values",
")",
":",
"try",
":",
"n_values",
"=",
"len",
"(",
"values",
")",
"except",
":",
"values",
"=",
"[",
"values",
"]",
"n_values",
"=",
"1",
"if",
"data_type",
"==",
"HC",
".",
"CHAR8",
":",
"buf",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"# Allow values to be passed as a string.",
"# Noop if a list is passed.",
"values",
"=",
"list",
"(",
"values",
")",
"for",
"n",
"in",
"range",
"(",
"n_values",
")",
":",
"if",
"not",
"isinstance",
"(",
"values",
"[",
"n",
"]",
",",
"int",
")",
":",
"values",
"[",
"n",
"]",
"=",
"ord",
"(",
"values",
"[",
"n",
"]",
")",
"elif",
"data_type",
"in",
"[",
"HC",
".",
"UCHAR8",
",",
"HC",
".",
"UINT8",
"]",
":",
"buf",
"=",
"_C",
".",
"array_byte",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"HC",
".",
"INT8",
":",
"# SWIG refuses negative values here. We found that if we",
"# pass them as byte values, it will work.",
"buf",
"=",
"_C",
".",
"array_int8",
"(",
"n_values",
")",
"values",
"=",
"list",
"(",
"values",
")",
"for",
"n",
"in",
"range",
"(",
"n_values",
")",
":",
"v",
"=",
"values",
"[",
"n",
"]",
"if",
"v",
">=",
"0",
":",
"v",
"&=",
"0x7f",
"else",
":",
"v",
"=",
"abs",
"(",
"v",
")",
"&",
"0x7f",
"if",
"v",
":",
"v",
"=",
"256",
"-",
"v",
"else",
":",
"v",
"=",
"128",
"# -128 in 2s complement",
"values",
"[",
"n",
"]",
"=",
"v",
"elif",
"data_type",
"==",
"HC",
".",
"INT16",
":",
"buf",
"=",
"_C",
".",
"array_int16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"HC",
".",
"UINT16",
":",
"buf",
"=",
"_C",
".",
"array_uint16",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"HC",
".",
"INT32",
":",
"buf",
"=",
"_C",
".",
"array_int32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"HC",
".",
"UINT32",
":",
"buf",
"=",
"_C",
".",
"array_uint32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"HC",
".",
"FLOAT32",
":",
"buf",
"=",
"_C",
".",
"array_float32",
"(",
"n_values",
")",
"elif",
"data_type",
"==",
"HC",
".",
"FLOAT64",
":",
"buf",
"=",
"_C",
".",
"array_float64",
"(",
"n_values",
")",
"else",
":",
"raise",
"HDF4Error",
"(",
"\"set: illegal or unimplemented data_type\"",
")",
"for",
"n",
"in",
"range",
"(",
"n_values",
")",
":",
"buf",
"[",
"n",
"]",
"=",
"values",
"[",
"n",
"]",
"status",
"=",
"_C",
".",
"VSsetattr",
"(",
"self",
".",
"_vd_inst",
".",
"_id",
",",
"self",
".",
"_fIndex",
",",
"self",
".",
"_name",
",",
"data_type",
",",
"n_values",
",",
"buf",
")",
"_checkErr",
"(",
"'attr'",
",",
"status",
",",
"'cannot execute'",
")",
"# Update the attribute index",
"self",
".",
"_index",
"=",
"_C",
".",
"VSfindattr",
"(",
"self",
".",
"_vd_inst",
".",
"_id",
",",
"self",
".",
"_fIndex",
",",
"self",
".",
"_name",
")",
"if",
"self",
".",
"_index",
"<",
"0",
":",
"raise",
"HDF4Error",
"(",
"\"set: error retrieving attribute index\"",
")"
] | Set the attribute value.
Args::
data_type : attribute data type (see constants HC.xxx)
values : attribute value(s); specify a list to create
a multi-valued attribute; a string valued
attribute can be created by setting 'data_type'
to HC.CHAR8 and 'values' to the corresponding
string
If the attribute already exists, it will be
updated. However, it is illegal to try to change
its data type or its order (number of values).
Returns::
None
C library equivalent : VSsetattr | [
"Set",
"the",
"attribute",
"value",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L2406-L2493 |
4,551 | fhs/pyhdf | pyhdf/HDF.py | getlibversion | def getlibversion():
"""Get the library version info.
Args:
no argument
Returns:
4-element tuple with the following components:
-major version number (int)
-minor version number (int)
-complete library version number (int)
-additional information (string)
C library equivalent : Hgetlibversion
"""
status, major_v, minor_v, release, info = _C.Hgetlibversion()
_checkErr('getlibversion', status, "cannot get lib version")
return major_v, minor_v, release, info | python | def getlibversion():
"""Get the library version info.
Args:
no argument
Returns:
4-element tuple with the following components:
-major version number (int)
-minor version number (int)
-complete library version number (int)
-additional information (string)
C library equivalent : Hgetlibversion
"""
status, major_v, minor_v, release, info = _C.Hgetlibversion()
_checkErr('getlibversion', status, "cannot get lib version")
return major_v, minor_v, release, info | [
"def",
"getlibversion",
"(",
")",
":",
"status",
",",
"major_v",
",",
"minor_v",
",",
"release",
",",
"info",
"=",
"_C",
".",
"Hgetlibversion",
"(",
")",
"_checkErr",
"(",
"'getlibversion'",
",",
"status",
",",
"\"cannot get lib version\"",
")",
"return",
"major_v",
",",
"minor_v",
",",
"release",
",",
"info"
] | Get the library version info.
Args:
no argument
Returns:
4-element tuple with the following components:
-major version number (int)
-minor version number (int)
-complete library version number (int)
-additional information (string)
C library equivalent : Hgetlibversion | [
"Get",
"the",
"library",
"version",
"info",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/HDF.py#L99-L116 |
4,552 | fhs/pyhdf | pyhdf/HDF.py | HDF.getfileversion | def getfileversion(self):
"""Get file version info.
Args:
no argument
Returns:
4-element tuple with the following components:
-major version number (int)
-minor version number (int)
-complete library version number (int)
-additional information (string)
C library equivalent : Hgetlibversion
"""
status, major_v, minor_v, release, info = _C.Hgetfileversion(self._id)
_checkErr('getfileversion', status, "cannot get file version")
return major_v, minor_v, release, info | python | def getfileversion(self):
"""Get file version info.
Args:
no argument
Returns:
4-element tuple with the following components:
-major version number (int)
-minor version number (int)
-complete library version number (int)
-additional information (string)
C library equivalent : Hgetlibversion
"""
status, major_v, minor_v, release, info = _C.Hgetfileversion(self._id)
_checkErr('getfileversion', status, "cannot get file version")
return major_v, minor_v, release, info | [
"def",
"getfileversion",
"(",
"self",
")",
":",
"status",
",",
"major_v",
",",
"minor_v",
",",
"release",
",",
"info",
"=",
"_C",
".",
"Hgetfileversion",
"(",
"self",
".",
"_id",
")",
"_checkErr",
"(",
"'getfileversion'",
",",
"status",
",",
"\"cannot get file version\"",
")",
"return",
"major_v",
",",
"minor_v",
",",
"release",
",",
"info"
] | Get file version info.
Args:
no argument
Returns:
4-element tuple with the following components:
-major version number (int)
-minor version number (int)
-complete library version number (int)
-additional information (string)
C library equivalent : Hgetlibversion | [
"Get",
"file",
"version",
"info",
"."
] | dbdc1810a74a38df50dcad81fe903e239d2b388d | https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/HDF.py#L244-L261 |
4,553 | mattmakai/underwear | underwear/run_underwear.py | colorize | def colorize(lead, num, color):
""" Print 'lead' = 'num' in 'color' """
if num != 0 and ANSIBLE_COLOR and color is not None:
return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color))
else:
return "%s=%-4s" % (lead, str(num)) | python | def colorize(lead, num, color):
""" Print 'lead' = 'num' in 'color' """
if num != 0 and ANSIBLE_COLOR and color is not None:
return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color))
else:
return "%s=%-4s" % (lead, str(num)) | [
"def",
"colorize",
"(",
"lead",
",",
"num",
",",
"color",
")",
":",
"if",
"num",
"!=",
"0",
"and",
"ANSIBLE_COLOR",
"and",
"color",
"is",
"not",
"None",
":",
"return",
"\"%s%s%-15s\"",
"%",
"(",
"stringc",
"(",
"lead",
",",
"color",
")",
",",
"stringc",
"(",
"\"=\"",
",",
"color",
")",
",",
"stringc",
"(",
"str",
"(",
"num",
")",
",",
"color",
")",
")",
"else",
":",
"return",
"\"%s=%-4s\"",
"%",
"(",
"lead",
",",
"str",
"(",
"num",
")",
")"
] | Print 'lead' = 'num' in 'color' | [
"Print",
"lead",
"=",
"num",
"in",
"color"
] | 7c484c7937d2df86dc569d411249ba366ed43ead | https://github.com/mattmakai/underwear/blob/7c484c7937d2df86dc569d411249ba366ed43ead/underwear/run_underwear.py#L24-L29 |
4,554 | zapier/django-drip | drip/admin.py | DripAdmin.timeline | def timeline(self, request, drip_id, into_past, into_future):
"""
Return a list of people who should get emails.
"""
from django.shortcuts import render, get_object_or_404
drip = get_object_or_404(Drip, id=drip_id)
shifted_drips = []
seen_users = set()
for shifted_drip in drip.drip.walk(into_past=int(into_past), into_future=int(into_future)+1):
shifted_drip.prune()
shifted_drips.append({
'drip': shifted_drip,
'qs': shifted_drip.get_queryset().exclude(id__in=seen_users)
})
seen_users.update(shifted_drip.get_queryset().values_list('id', flat=True))
return render(request, 'drip/timeline.html', locals()) | python | def timeline(self, request, drip_id, into_past, into_future):
"""
Return a list of people who should get emails.
"""
from django.shortcuts import render, get_object_or_404
drip = get_object_or_404(Drip, id=drip_id)
shifted_drips = []
seen_users = set()
for shifted_drip in drip.drip.walk(into_past=int(into_past), into_future=int(into_future)+1):
shifted_drip.prune()
shifted_drips.append({
'drip': shifted_drip,
'qs': shifted_drip.get_queryset().exclude(id__in=seen_users)
})
seen_users.update(shifted_drip.get_queryset().values_list('id', flat=True))
return render(request, 'drip/timeline.html', locals()) | [
"def",
"timeline",
"(",
"self",
",",
"request",
",",
"drip_id",
",",
"into_past",
",",
"into_future",
")",
":",
"from",
"django",
".",
"shortcuts",
"import",
"render",
",",
"get_object_or_404",
"drip",
"=",
"get_object_or_404",
"(",
"Drip",
",",
"id",
"=",
"drip_id",
")",
"shifted_drips",
"=",
"[",
"]",
"seen_users",
"=",
"set",
"(",
")",
"for",
"shifted_drip",
"in",
"drip",
".",
"drip",
".",
"walk",
"(",
"into_past",
"=",
"int",
"(",
"into_past",
")",
",",
"into_future",
"=",
"int",
"(",
"into_future",
")",
"+",
"1",
")",
":",
"shifted_drip",
".",
"prune",
"(",
")",
"shifted_drips",
".",
"append",
"(",
"{",
"'drip'",
":",
"shifted_drip",
",",
"'qs'",
":",
"shifted_drip",
".",
"get_queryset",
"(",
")",
".",
"exclude",
"(",
"id__in",
"=",
"seen_users",
")",
"}",
")",
"seen_users",
".",
"update",
"(",
"shifted_drip",
".",
"get_queryset",
"(",
")",
".",
"values_list",
"(",
"'id'",
",",
"flat",
"=",
"True",
")",
")",
"return",
"render",
"(",
"request",
",",
"'drip/timeline.html'",
",",
"locals",
"(",
")",
")"
] | Return a list of people who should get emails. | [
"Return",
"a",
"list",
"of",
"people",
"who",
"should",
"get",
"emails",
"."
] | ffbef6927a1a20f4c353ecb108c1b484502d2b29 | https://github.com/zapier/django-drip/blob/ffbef6927a1a20f4c353ecb108c1b484502d2b29/drip/admin.py#L33-L51 |
4,555 | zapier/django-drip | drip/drips.py | DripBase.walk | def walk(self, into_past=0, into_future=0):
"""
Walk over a date range and create new instances of self with new ranges.
"""
walked_range = []
for shift in range(-into_past, into_future):
kwargs = dict(drip_model=self.drip_model,
name=self.name,
now_shift_kwargs={'days': shift})
walked_range.append(self.__class__(**kwargs))
return walked_range | python | def walk(self, into_past=0, into_future=0):
"""
Walk over a date range and create new instances of self with new ranges.
"""
walked_range = []
for shift in range(-into_past, into_future):
kwargs = dict(drip_model=self.drip_model,
name=self.name,
now_shift_kwargs={'days': shift})
walked_range.append(self.__class__(**kwargs))
return walked_range | [
"def",
"walk",
"(",
"self",
",",
"into_past",
"=",
"0",
",",
"into_future",
"=",
"0",
")",
":",
"walked_range",
"=",
"[",
"]",
"for",
"shift",
"in",
"range",
"(",
"-",
"into_past",
",",
"into_future",
")",
":",
"kwargs",
"=",
"dict",
"(",
"drip_model",
"=",
"self",
".",
"drip_model",
",",
"name",
"=",
"self",
".",
"name",
",",
"now_shift_kwargs",
"=",
"{",
"'days'",
":",
"shift",
"}",
")",
"walked_range",
".",
"append",
"(",
"self",
".",
"__class__",
"(",
"*",
"*",
"kwargs",
")",
")",
"return",
"walked_range"
] | Walk over a date range and create new instances of self with new ranges. | [
"Walk",
"over",
"a",
"date",
"range",
"and",
"create",
"new",
"instances",
"of",
"self",
"with",
"new",
"ranges",
"."
] | ffbef6927a1a20f4c353ecb108c1b484502d2b29 | https://github.com/zapier/django-drip/blob/ffbef6927a1a20f4c353ecb108c1b484502d2b29/drip/drips.py#L146-L156 |
4,556 | zapier/django-drip | drip/drips.py | DripBase.run | def run(self):
"""
Get the queryset, prune sent people, and send it.
"""
if not self.drip_model.enabled:
return None
self.prune()
count = self.send()
return count | python | def run(self):
"""
Get the queryset, prune sent people, and send it.
"""
if not self.drip_model.enabled:
return None
self.prune()
count = self.send()
return count | [
"def",
"run",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"drip_model",
".",
"enabled",
":",
"return",
"None",
"self",
".",
"prune",
"(",
")",
"count",
"=",
"self",
".",
"send",
"(",
")",
"return",
"count"
] | Get the queryset, prune sent people, and send it. | [
"Get",
"the",
"queryset",
"prune",
"sent",
"people",
"and",
"send",
"it",
"."
] | ffbef6927a1a20f4c353ecb108c1b484502d2b29 | https://github.com/zapier/django-drip/blob/ffbef6927a1a20f4c353ecb108c1b484502d2b29/drip/drips.py#L194-L204 |
4,557 | zapier/django-drip | drip/drips.py | DripBase.prune | def prune(self):
"""
Do an exclude for all Users who have a SentDrip already.
"""
target_user_ids = self.get_queryset().values_list('id', flat=True)
exclude_user_ids = SentDrip.objects.filter(date__lt=conditional_now(),
drip=self.drip_model,
user__id__in=target_user_ids)\
.values_list('user_id', flat=True)
self._queryset = self.get_queryset().exclude(id__in=exclude_user_ids) | python | def prune(self):
"""
Do an exclude for all Users who have a SentDrip already.
"""
target_user_ids = self.get_queryset().values_list('id', flat=True)
exclude_user_ids = SentDrip.objects.filter(date__lt=conditional_now(),
drip=self.drip_model,
user__id__in=target_user_ids)\
.values_list('user_id', flat=True)
self._queryset = self.get_queryset().exclude(id__in=exclude_user_ids) | [
"def",
"prune",
"(",
"self",
")",
":",
"target_user_ids",
"=",
"self",
".",
"get_queryset",
"(",
")",
".",
"values_list",
"(",
"'id'",
",",
"flat",
"=",
"True",
")",
"exclude_user_ids",
"=",
"SentDrip",
".",
"objects",
".",
"filter",
"(",
"date__lt",
"=",
"conditional_now",
"(",
")",
",",
"drip",
"=",
"self",
".",
"drip_model",
",",
"user__id__in",
"=",
"target_user_ids",
")",
".",
"values_list",
"(",
"'user_id'",
",",
"flat",
"=",
"True",
")",
"self",
".",
"_queryset",
"=",
"self",
".",
"get_queryset",
"(",
")",
".",
"exclude",
"(",
"id__in",
"=",
"exclude_user_ids",
")"
] | Do an exclude for all Users who have a SentDrip already. | [
"Do",
"an",
"exclude",
"for",
"all",
"Users",
"who",
"have",
"a",
"SentDrip",
"already",
"."
] | ffbef6927a1a20f4c353ecb108c1b484502d2b29 | https://github.com/zapier/django-drip/blob/ffbef6927a1a20f4c353ecb108c1b484502d2b29/drip/drips.py#L206-L215 |
4,558 | zapier/django-drip | drip/drips.py | DripBase.send | def send(self):
"""
Send the message to each user on the queryset.
Create SentDrip for each user that gets a message.
Returns count of created SentDrips.
"""
if not self.from_email:
self.from_email = getattr(settings, 'DRIP_FROM_EMAIL', settings.DEFAULT_FROM_EMAIL)
MessageClass = message_class_for(self.drip_model.message_class)
count = 0
for user in self.get_queryset():
message_instance = MessageClass(self, user)
try:
result = message_instance.message.send()
if result:
SentDrip.objects.create(
drip=self.drip_model,
user=user,
from_email=self.from_email,
from_email_name=self.from_email_name,
subject=message_instance.subject,
body=message_instance.body
)
count += 1
except Exception as e:
logging.error("Failed to send drip %s to user %s: %s" % (self.drip_model.id, user, e))
return count | python | def send(self):
"""
Send the message to each user on the queryset.
Create SentDrip for each user that gets a message.
Returns count of created SentDrips.
"""
if not self.from_email:
self.from_email = getattr(settings, 'DRIP_FROM_EMAIL', settings.DEFAULT_FROM_EMAIL)
MessageClass = message_class_for(self.drip_model.message_class)
count = 0
for user in self.get_queryset():
message_instance = MessageClass(self, user)
try:
result = message_instance.message.send()
if result:
SentDrip.objects.create(
drip=self.drip_model,
user=user,
from_email=self.from_email,
from_email_name=self.from_email_name,
subject=message_instance.subject,
body=message_instance.body
)
count += 1
except Exception as e:
logging.error("Failed to send drip %s to user %s: %s" % (self.drip_model.id, user, e))
return count | [
"def",
"send",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"from_email",
":",
"self",
".",
"from_email",
"=",
"getattr",
"(",
"settings",
",",
"'DRIP_FROM_EMAIL'",
",",
"settings",
".",
"DEFAULT_FROM_EMAIL",
")",
"MessageClass",
"=",
"message_class_for",
"(",
"self",
".",
"drip_model",
".",
"message_class",
")",
"count",
"=",
"0",
"for",
"user",
"in",
"self",
".",
"get_queryset",
"(",
")",
":",
"message_instance",
"=",
"MessageClass",
"(",
"self",
",",
"user",
")",
"try",
":",
"result",
"=",
"message_instance",
".",
"message",
".",
"send",
"(",
")",
"if",
"result",
":",
"SentDrip",
".",
"objects",
".",
"create",
"(",
"drip",
"=",
"self",
".",
"drip_model",
",",
"user",
"=",
"user",
",",
"from_email",
"=",
"self",
".",
"from_email",
",",
"from_email_name",
"=",
"self",
".",
"from_email_name",
",",
"subject",
"=",
"message_instance",
".",
"subject",
",",
"body",
"=",
"message_instance",
".",
"body",
")",
"count",
"+=",
"1",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"error",
"(",
"\"Failed to send drip %s to user %s: %s\"",
"%",
"(",
"self",
".",
"drip_model",
".",
"id",
",",
"user",
",",
"e",
")",
")",
"return",
"count"
] | Send the message to each user on the queryset.
Create SentDrip for each user that gets a message.
Returns count of created SentDrips. | [
"Send",
"the",
"message",
"to",
"each",
"user",
"on",
"the",
"queryset",
"."
] | ffbef6927a1a20f4c353ecb108c1b484502d2b29 | https://github.com/zapier/django-drip/blob/ffbef6927a1a20f4c353ecb108c1b484502d2b29/drip/drips.py#L217-L248 |
4,559 | ladybug-tools/ladybug | ladybug/euclid.py | Vector2.angle | def angle(self, other):
"""Return the angle to the vector other"""
return math.acos(self.dot(other) / (self.magnitude() * other.magnitude())) | python | def angle(self, other):
"""Return the angle to the vector other"""
return math.acos(self.dot(other) / (self.magnitude() * other.magnitude())) | [
"def",
"angle",
"(",
"self",
",",
"other",
")",
":",
"return",
"math",
".",
"acos",
"(",
"self",
".",
"dot",
"(",
"other",
")",
"/",
"(",
"self",
".",
"magnitude",
"(",
")",
"*",
"other",
".",
"magnitude",
"(",
")",
")",
")"
] | Return the angle to the vector other | [
"Return",
"the",
"angle",
"to",
"the",
"vector",
"other"
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/euclid.py#L298-L300 |
4,560 | ladybug-tools/ladybug | ladybug/euclid.py | Vector2.project | def project(self, other):
"""Return one vector projected on the vector other"""
n = other.normalized()
return self.dot(n) * n | python | def project(self, other):
"""Return one vector projected on the vector other"""
n = other.normalized()
return self.dot(n) * n | [
"def",
"project",
"(",
"self",
",",
"other",
")",
":",
"n",
"=",
"other",
".",
"normalized",
"(",
")",
"return",
"self",
".",
"dot",
"(",
"n",
")",
"*",
"n"
] | Return one vector projected on the vector other | [
"Return",
"one",
"vector",
"projected",
"on",
"the",
"vector",
"other"
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/euclid.py#L302-L305 |
4,561 | ladybug-tools/ladybug | ladybug/euclid.py | Vector3.rotate_around | def rotate_around(self, axis, theta):
"""Return the vector rotated around axis through angle theta.
Right hand rule applies.
"""
# Adapted from equations published by Glenn Murray.
# http://inside.mines.edu/~gmurray/ArbitraryAxisRotation/ArbitraryAxisRotation.html
x, y, z = self.x, self.y, self.z
u, v, w = axis.x, axis.y, axis.z
# Extracted common factors for simplicity and efficiency
r2 = u**2 + v**2 + w**2
r = math.sqrt(r2)
ct = math.cos(theta)
st = math.sin(theta) / r
dt = (u * x + v * y + w * z) * (1 - ct) / r2
return Vector3((u * dt + x * ct + (-w * y + v * z) * st),
(v * dt + y * ct + (w * x - u * z) * st),
(w * dt + z * ct + (-v * x + u * y) * st)) | python | def rotate_around(self, axis, theta):
"""Return the vector rotated around axis through angle theta.
Right hand rule applies.
"""
# Adapted from equations published by Glenn Murray.
# http://inside.mines.edu/~gmurray/ArbitraryAxisRotation/ArbitraryAxisRotation.html
x, y, z = self.x, self.y, self.z
u, v, w = axis.x, axis.y, axis.z
# Extracted common factors for simplicity and efficiency
r2 = u**2 + v**2 + w**2
r = math.sqrt(r2)
ct = math.cos(theta)
st = math.sin(theta) / r
dt = (u * x + v * y + w * z) * (1 - ct) / r2
return Vector3((u * dt + x * ct + (-w * y + v * z) * st),
(v * dt + y * ct + (w * x - u * z) * st),
(w * dt + z * ct + (-v * x + u * y) * st)) | [
"def",
"rotate_around",
"(",
"self",
",",
"axis",
",",
"theta",
")",
":",
"# Adapted from equations published by Glenn Murray.",
"# http://inside.mines.edu/~gmurray/ArbitraryAxisRotation/ArbitraryAxisRotation.html",
"x",
",",
"y",
",",
"z",
"=",
"self",
".",
"x",
",",
"self",
".",
"y",
",",
"self",
".",
"z",
"u",
",",
"v",
",",
"w",
"=",
"axis",
".",
"x",
",",
"axis",
".",
"y",
",",
"axis",
".",
"z",
"# Extracted common factors for simplicity and efficiency",
"r2",
"=",
"u",
"**",
"2",
"+",
"v",
"**",
"2",
"+",
"w",
"**",
"2",
"r",
"=",
"math",
".",
"sqrt",
"(",
"r2",
")",
"ct",
"=",
"math",
".",
"cos",
"(",
"theta",
")",
"st",
"=",
"math",
".",
"sin",
"(",
"theta",
")",
"/",
"r",
"dt",
"=",
"(",
"u",
"*",
"x",
"+",
"v",
"*",
"y",
"+",
"w",
"*",
"z",
")",
"*",
"(",
"1",
"-",
"ct",
")",
"/",
"r2",
"return",
"Vector3",
"(",
"(",
"u",
"*",
"dt",
"+",
"x",
"*",
"ct",
"+",
"(",
"-",
"w",
"*",
"y",
"+",
"v",
"*",
"z",
")",
"*",
"st",
")",
",",
"(",
"v",
"*",
"dt",
"+",
"y",
"*",
"ct",
"+",
"(",
"w",
"*",
"x",
"-",
"u",
"*",
"z",
")",
"*",
"st",
")",
",",
"(",
"w",
"*",
"dt",
"+",
"z",
"*",
"ct",
"+",
"(",
"-",
"v",
"*",
"x",
"+",
"u",
"*",
"y",
")",
"*",
"st",
")",
")"
] | Return the vector rotated around axis through angle theta.
Right hand rule applies. | [
"Return",
"the",
"vector",
"rotated",
"around",
"axis",
"through",
"angle",
"theta",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/euclid.py#L588-L607 |
4,562 | ladybug-tools/ladybug | ladybug/futil.py | preparedir | def preparedir(target_dir, remove_content=True):
"""Prepare a folder for analysis.
This method creates the folder if it is not created, and removes the file in
the folder if the folder already existed.
"""
if os.path.isdir(target_dir):
if remove_content:
nukedir(target_dir, False)
return True
else:
try:
os.makedirs(target_dir)
return True
except Exception as e:
print("Failed to create folder: %s\n%s" % (target_dir, e))
return False | python | def preparedir(target_dir, remove_content=True):
"""Prepare a folder for analysis.
This method creates the folder if it is not created, and removes the file in
the folder if the folder already existed.
"""
if os.path.isdir(target_dir):
if remove_content:
nukedir(target_dir, False)
return True
else:
try:
os.makedirs(target_dir)
return True
except Exception as e:
print("Failed to create folder: %s\n%s" % (target_dir, e))
return False | [
"def",
"preparedir",
"(",
"target_dir",
",",
"remove_content",
"=",
"True",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"target_dir",
")",
":",
"if",
"remove_content",
":",
"nukedir",
"(",
"target_dir",
",",
"False",
")",
"return",
"True",
"else",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"target_dir",
")",
"return",
"True",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"Failed to create folder: %s\\n%s\"",
"%",
"(",
"target_dir",
",",
"e",
")",
")",
"return",
"False"
] | Prepare a folder for analysis.
This method creates the folder if it is not created, and removes the file in
the folder if the folder already existed. | [
"Prepare",
"a",
"folder",
"for",
"analysis",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/futil.py#L20-L36 |
4,563 | ladybug-tools/ladybug | ladybug/futil.py | nukedir | def nukedir(target_dir, rmdir=False):
"""Delete all the files inside target_dir.
Usage:
nukedir("c:/ladybug/libs", True)
"""
d = os.path.normpath(target_dir)
if not os.path.isdir(d):
return
files = os.listdir(d)
for f in files:
if f == '.' or f == '..':
continue
path = os.path.join(d, f)
if os.path.isdir(path):
nukedir(path)
else:
try:
os.remove(path)
except Exception:
print("Failed to remove %s" % path)
if rmdir:
try:
os.rmdir(d)
except Exception:
print("Failed to remove %s" % d) | python | def nukedir(target_dir, rmdir=False):
"""Delete all the files inside target_dir.
Usage:
nukedir("c:/ladybug/libs", True)
"""
d = os.path.normpath(target_dir)
if not os.path.isdir(d):
return
files = os.listdir(d)
for f in files:
if f == '.' or f == '..':
continue
path = os.path.join(d, f)
if os.path.isdir(path):
nukedir(path)
else:
try:
os.remove(path)
except Exception:
print("Failed to remove %s" % path)
if rmdir:
try:
os.rmdir(d)
except Exception:
print("Failed to remove %s" % d) | [
"def",
"nukedir",
"(",
"target_dir",
",",
"rmdir",
"=",
"False",
")",
":",
"d",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"target_dir",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"d",
")",
":",
"return",
"files",
"=",
"os",
".",
"listdir",
"(",
"d",
")",
"for",
"f",
"in",
"files",
":",
"if",
"f",
"==",
"'.'",
"or",
"f",
"==",
"'..'",
":",
"continue",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"f",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"nukedir",
"(",
"path",
")",
"else",
":",
"try",
":",
"os",
".",
"remove",
"(",
"path",
")",
"except",
"Exception",
":",
"print",
"(",
"\"Failed to remove %s\"",
"%",
"path",
")",
"if",
"rmdir",
":",
"try",
":",
"os",
".",
"rmdir",
"(",
"d",
")",
"except",
"Exception",
":",
"print",
"(",
"\"Failed to remove %s\"",
"%",
"d",
")"
] | Delete all the files inside target_dir.
Usage:
nukedir("c:/ladybug/libs", True) | [
"Delete",
"all",
"the",
"files",
"inside",
"target_dir",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/futil.py#L39-L69 |
4,564 | ladybug-tools/ladybug | ladybug/futil.py | write_to_file_by_name | def write_to_file_by_name(folder, fname, data, mkdir=False):
"""Write a string of data to file by filename and folder.
Args:
folder: Target folder (e.g. c:/ladybug).
fname: File name (e.g. testPts.pts).
data: Any data as string.
mkdir: Set to True to create the directory if doesn't exist (Default: False).
"""
if not os.path.isdir(folder):
if mkdir:
preparedir(folder)
else:
created = preparedir(folder, False)
if not created:
raise ValueError("Failed to find %s." % folder)
file_path = os.path.join(folder, fname)
with open(file_path, writemode) as outf:
try:
outf.write(str(data))
return file_path
except Exception as e:
raise IOError("Failed to write %s to file:\n\t%s" % (fname, str(e))) | python | def write_to_file_by_name(folder, fname, data, mkdir=False):
"""Write a string of data to file by filename and folder.
Args:
folder: Target folder (e.g. c:/ladybug).
fname: File name (e.g. testPts.pts).
data: Any data as string.
mkdir: Set to True to create the directory if doesn't exist (Default: False).
"""
if not os.path.isdir(folder):
if mkdir:
preparedir(folder)
else:
created = preparedir(folder, False)
if not created:
raise ValueError("Failed to find %s." % folder)
file_path = os.path.join(folder, fname)
with open(file_path, writemode) as outf:
try:
outf.write(str(data))
return file_path
except Exception as e:
raise IOError("Failed to write %s to file:\n\t%s" % (fname, str(e))) | [
"def",
"write_to_file_by_name",
"(",
"folder",
",",
"fname",
",",
"data",
",",
"mkdir",
"=",
"False",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"folder",
")",
":",
"if",
"mkdir",
":",
"preparedir",
"(",
"folder",
")",
"else",
":",
"created",
"=",
"preparedir",
"(",
"folder",
",",
"False",
")",
"if",
"not",
"created",
":",
"raise",
"ValueError",
"(",
"\"Failed to find %s.\"",
"%",
"folder",
")",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"fname",
")",
"with",
"open",
"(",
"file_path",
",",
"writemode",
")",
"as",
"outf",
":",
"try",
":",
"outf",
".",
"write",
"(",
"str",
"(",
"data",
")",
")",
"return",
"file_path",
"except",
"Exception",
"as",
"e",
":",
"raise",
"IOError",
"(",
"\"Failed to write %s to file:\\n\\t%s\"",
"%",
"(",
"fname",
",",
"str",
"(",
"e",
")",
")",
")"
] | Write a string of data to file by filename and folder.
Args:
folder: Target folder (e.g. c:/ladybug).
fname: File name (e.g. testPts.pts).
data: Any data as string.
mkdir: Set to True to create the directory if doesn't exist (Default: False). | [
"Write",
"a",
"string",
"of",
"data",
"to",
"file",
"by",
"filename",
"and",
"folder",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/futil.py#L72-L96 |
4,565 | ladybug-tools/ladybug | ladybug/futil.py | copy_files_to_folder | def copy_files_to_folder(files, target_folder, overwrite=True):
"""Copy a list of files to a new target folder.
Returns:
A list of fullpath of the new files.
"""
if not files:
return []
for f in files:
target = os.path.join(target_folder, os.path.split(f)[-1])
if target == f:
# both file path are the same!
return target
if os.path.exists(target):
if overwrite:
# remove the file before copying
try:
os.remove(target)
except Exception:
raise IOError("Failed to remove %s" % f)
else:
shutil.copy(f, target)
else:
continue
else:
print('Copying %s to %s' % (os.path.split(f)[-1],
os.path.normpath(target_folder)))
shutil.copy(f, target)
return [os.path.join(target_folder, os.path.split(f)[-1]) for f in files] | python | def copy_files_to_folder(files, target_folder, overwrite=True):
"""Copy a list of files to a new target folder.
Returns:
A list of fullpath of the new files.
"""
if not files:
return []
for f in files:
target = os.path.join(target_folder, os.path.split(f)[-1])
if target == f:
# both file path are the same!
return target
if os.path.exists(target):
if overwrite:
# remove the file before copying
try:
os.remove(target)
except Exception:
raise IOError("Failed to remove %s" % f)
else:
shutil.copy(f, target)
else:
continue
else:
print('Copying %s to %s' % (os.path.split(f)[-1],
os.path.normpath(target_folder)))
shutil.copy(f, target)
return [os.path.join(target_folder, os.path.split(f)[-1]) for f in files] | [
"def",
"copy_files_to_folder",
"(",
"files",
",",
"target_folder",
",",
"overwrite",
"=",
"True",
")",
":",
"if",
"not",
"files",
":",
"return",
"[",
"]",
"for",
"f",
"in",
"files",
":",
"target",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target_folder",
",",
"os",
".",
"path",
".",
"split",
"(",
"f",
")",
"[",
"-",
"1",
"]",
")",
"if",
"target",
"==",
"f",
":",
"# both file path are the same!",
"return",
"target",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"target",
")",
":",
"if",
"overwrite",
":",
"# remove the file before copying",
"try",
":",
"os",
".",
"remove",
"(",
"target",
")",
"except",
"Exception",
":",
"raise",
"IOError",
"(",
"\"Failed to remove %s\"",
"%",
"f",
")",
"else",
":",
"shutil",
".",
"copy",
"(",
"f",
",",
"target",
")",
"else",
":",
"continue",
"else",
":",
"print",
"(",
"'Copying %s to %s'",
"%",
"(",
"os",
".",
"path",
".",
"split",
"(",
"f",
")",
"[",
"-",
"1",
"]",
",",
"os",
".",
"path",
".",
"normpath",
"(",
"target_folder",
")",
")",
")",
"shutil",
".",
"copy",
"(",
"f",
",",
"target",
")",
"return",
"[",
"os",
".",
"path",
".",
"join",
"(",
"target_folder",
",",
"os",
".",
"path",
".",
"split",
"(",
"f",
")",
"[",
"-",
"1",
"]",
")",
"for",
"f",
"in",
"files",
"]"
] | Copy a list of files to a new target folder.
Returns:
A list of fullpath of the new files. | [
"Copy",
"a",
"list",
"of",
"files",
"to",
"a",
"new",
"target",
"folder",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/futil.py#L111-L143 |
4,566 | ladybug-tools/ladybug | ladybug/futil.py | bat_to_sh | def bat_to_sh(file_path):
"""Convert honeybee .bat file to .sh file.
WARNING: This is a very simple function and doesn't handle any edge cases.
"""
sh_file = file_path[:-4] + '.sh'
with open(file_path, 'rb') as inf, open(sh_file, 'wb') as outf:
outf.write('#!/usr/bin/env bash\n\n')
for line in inf:
# pass the path lines, etc to get to the commands
if line.strip():
continue
else:
break
for line in inf:
if line.startswith('echo'):
continue
modified_line = line.replace('c:\\radiance\\bin\\', '').replace('\\', '/')
outf.write(modified_line)
print('bash file is created at:\n\t%s' % sh_file)
# Heroku - Make command.sh executable
st = os.stat(sh_file)
os.chmod(sh_file, st.st_mode | 0o111)
return sh_file | python | def bat_to_sh(file_path):
"""Convert honeybee .bat file to .sh file.
WARNING: This is a very simple function and doesn't handle any edge cases.
"""
sh_file = file_path[:-4] + '.sh'
with open(file_path, 'rb') as inf, open(sh_file, 'wb') as outf:
outf.write('#!/usr/bin/env bash\n\n')
for line in inf:
# pass the path lines, etc to get to the commands
if line.strip():
continue
else:
break
for line in inf:
if line.startswith('echo'):
continue
modified_line = line.replace('c:\\radiance\\bin\\', '').replace('\\', '/')
outf.write(modified_line)
print('bash file is created at:\n\t%s' % sh_file)
# Heroku - Make command.sh executable
st = os.stat(sh_file)
os.chmod(sh_file, st.st_mode | 0o111)
return sh_file | [
"def",
"bat_to_sh",
"(",
"file_path",
")",
":",
"sh_file",
"=",
"file_path",
"[",
":",
"-",
"4",
"]",
"+",
"'.sh'",
"with",
"open",
"(",
"file_path",
",",
"'rb'",
")",
"as",
"inf",
",",
"open",
"(",
"sh_file",
",",
"'wb'",
")",
"as",
"outf",
":",
"outf",
".",
"write",
"(",
"'#!/usr/bin/env bash\\n\\n'",
")",
"for",
"line",
"in",
"inf",
":",
"# pass the path lines, etc to get to the commands",
"if",
"line",
".",
"strip",
"(",
")",
":",
"continue",
"else",
":",
"break",
"for",
"line",
"in",
"inf",
":",
"if",
"line",
".",
"startswith",
"(",
"'echo'",
")",
":",
"continue",
"modified_line",
"=",
"line",
".",
"replace",
"(",
"'c:\\\\radiance\\\\bin\\\\'",
",",
"''",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"outf",
".",
"write",
"(",
"modified_line",
")",
"print",
"(",
"'bash file is created at:\\n\\t%s'",
"%",
"sh_file",
")",
"# Heroku - Make command.sh executable",
"st",
"=",
"os",
".",
"stat",
"(",
"sh_file",
")",
"os",
".",
"chmod",
"(",
"sh_file",
",",
"st",
".",
"st_mode",
"|",
"0o111",
")",
"return",
"sh_file"
] | Convert honeybee .bat file to .sh file.
WARNING: This is a very simple function and doesn't handle any edge cases. | [
"Convert",
"honeybee",
".",
"bat",
"file",
"to",
".",
"sh",
"file",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/futil.py#L146-L171 |
4,567 | ladybug-tools/ladybug | ladybug/futil.py | _download_py2 | def _download_py2(link, path, __hdr__):
"""Download a file from a link in Python 2."""
try:
req = urllib2.Request(link, headers=__hdr__)
u = urllib2.urlopen(req)
except Exception as e:
raise Exception(' Download failed with the error:\n{}'.format(e))
with open(path, 'wb') as outf:
for l in u:
outf.write(l)
u.close() | python | def _download_py2(link, path, __hdr__):
"""Download a file from a link in Python 2."""
try:
req = urllib2.Request(link, headers=__hdr__)
u = urllib2.urlopen(req)
except Exception as e:
raise Exception(' Download failed with the error:\n{}'.format(e))
with open(path, 'wb') as outf:
for l in u:
outf.write(l)
u.close() | [
"def",
"_download_py2",
"(",
"link",
",",
"path",
",",
"__hdr__",
")",
":",
"try",
":",
"req",
"=",
"urllib2",
".",
"Request",
"(",
"link",
",",
"headers",
"=",
"__hdr__",
")",
"u",
"=",
"urllib2",
".",
"urlopen",
"(",
"req",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"' Download failed with the error:\\n{}'",
".",
"format",
"(",
"e",
")",
")",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"outf",
":",
"for",
"l",
"in",
"u",
":",
"outf",
".",
"write",
"(",
"l",
")",
"u",
".",
"close",
"(",
")"
] | Download a file from a link in Python 2. | [
"Download",
"a",
"file",
"from",
"a",
"link",
"in",
"Python",
"2",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/futil.py#L174-L185 |
4,568 | ladybug-tools/ladybug | ladybug/futil.py | _download_py3 | def _download_py3(link, path, __hdr__):
"""Download a file from a link in Python 3."""
try:
req = urllib.request.Request(link, headers=__hdr__)
u = urllib.request.urlopen(req)
except Exception as e:
raise Exception(' Download failed with the error:\n{}'.format(e))
with open(path, 'wb') as outf:
for l in u:
outf.write(l)
u.close() | python | def _download_py3(link, path, __hdr__):
"""Download a file from a link in Python 3."""
try:
req = urllib.request.Request(link, headers=__hdr__)
u = urllib.request.urlopen(req)
except Exception as e:
raise Exception(' Download failed with the error:\n{}'.format(e))
with open(path, 'wb') as outf:
for l in u:
outf.write(l)
u.close() | [
"def",
"_download_py3",
"(",
"link",
",",
"path",
",",
"__hdr__",
")",
":",
"try",
":",
"req",
"=",
"urllib",
".",
"request",
".",
"Request",
"(",
"link",
",",
"headers",
"=",
"__hdr__",
")",
"u",
"=",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"req",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"' Download failed with the error:\\n{}'",
".",
"format",
"(",
"e",
")",
")",
"with",
"open",
"(",
"path",
",",
"'wb'",
")",
"as",
"outf",
":",
"for",
"l",
"in",
"u",
":",
"outf",
".",
"write",
"(",
"l",
")",
"u",
".",
"close",
"(",
")"
] | Download a file from a link in Python 3. | [
"Download",
"a",
"file",
"from",
"a",
"link",
"in",
"Python",
"3",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/futil.py#L188-L199 |
4,569 | ladybug-tools/ladybug | ladybug/futil.py | download_file_by_name | def download_file_by_name(url, target_folder, file_name, mkdir=False):
"""Download a file to a directory.
Args:
url: A string to a valid URL.
target_folder: Target folder for download (e.g. c:/ladybug)
file_name: File name (e.g. testPts.zip).
mkdir: Set to True to create the directory if doesn't exist (Default: False)
"""
# headers to "spoof" the download as coming from a browser (needed for E+ site)
__hdr__ = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 '
'(KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,'
'application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
# create the target directory.
if not os.path.isdir(target_folder):
if mkdir:
preparedir(target_folder)
else:
created = preparedir(target_folder, False)
if not created:
raise ValueError("Failed to find %s." % target_folder)
file_path = os.path.join(target_folder, file_name)
if (sys.version_info < (3, 0)):
_download_py2(url, file_path, __hdr__)
else:
_download_py3(url, file_path, __hdr__) | python | def download_file_by_name(url, target_folder, file_name, mkdir=False):
"""Download a file to a directory.
Args:
url: A string to a valid URL.
target_folder: Target folder for download (e.g. c:/ladybug)
file_name: File name (e.g. testPts.zip).
mkdir: Set to True to create the directory if doesn't exist (Default: False)
"""
# headers to "spoof" the download as coming from a browser (needed for E+ site)
__hdr__ = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 '
'(KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,'
'application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
# create the target directory.
if not os.path.isdir(target_folder):
if mkdir:
preparedir(target_folder)
else:
created = preparedir(target_folder, False)
if not created:
raise ValueError("Failed to find %s." % target_folder)
file_path = os.path.join(target_folder, file_name)
if (sys.version_info < (3, 0)):
_download_py2(url, file_path, __hdr__)
else:
_download_py3(url, file_path, __hdr__) | [
"def",
"download_file_by_name",
"(",
"url",
",",
"target_folder",
",",
"file_name",
",",
"mkdir",
"=",
"False",
")",
":",
"# headers to \"spoof\" the download as coming from a browser (needed for E+ site)",
"__hdr__",
"=",
"{",
"'User-Agent'",
":",
"'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 '",
"'(KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'",
",",
"'Accept'",
":",
"'text/html,application/xhtml+xml,'",
"'application/xml;q=0.9,*/*;q=0.8'",
",",
"'Accept-Charset'",
":",
"'ISO-8859-1,utf-8;q=0.7,*;q=0.3'",
",",
"'Accept-Encoding'",
":",
"'none'",
",",
"'Accept-Language'",
":",
"'en-US,en;q=0.8'",
",",
"'Connection'",
":",
"'keep-alive'",
"}",
"# create the target directory.",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"target_folder",
")",
":",
"if",
"mkdir",
":",
"preparedir",
"(",
"target_folder",
")",
"else",
":",
"created",
"=",
"preparedir",
"(",
"target_folder",
",",
"False",
")",
"if",
"not",
"created",
":",
"raise",
"ValueError",
"(",
"\"Failed to find %s.\"",
"%",
"target_folder",
")",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target_folder",
",",
"file_name",
")",
"if",
"(",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
"0",
")",
")",
":",
"_download_py2",
"(",
"url",
",",
"file_path",
",",
"__hdr__",
")",
"else",
":",
"_download_py3",
"(",
"url",
",",
"file_path",
",",
"__hdr__",
")"
] | Download a file to a directory.
Args:
url: A string to a valid URL.
target_folder: Target folder for download (e.g. c:/ladybug)
file_name: File name (e.g. testPts.zip).
mkdir: Set to True to create the directory if doesn't exist (Default: False) | [
"Download",
"a",
"file",
"to",
"a",
"directory",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/futil.py#L202-L234 |
4,570 | ladybug-tools/ladybug | ladybug/futil.py | unzip_file | def unzip_file(source_file, dest_dir=None, mkdir=False):
"""Unzip a compressed file.
Args:
source_file: Full path to a valid compressed file (e.g. c:/ladybug/testPts.zip)
dest_dir: Target folder to extract to (e.g. c:/ladybug).
Default is set to the same directory as the source file.
mkdir: Set to True to create the directory if doesn't exist (Default: False)
"""
# set default dest_dir and create it if need be.
if dest_dir is None:
dest_dir, fname = os.path.split(source_file)
elif not os.path.isdir(dest_dir):
if mkdir:
preparedir(dest_dir)
else:
created = preparedir(dest_dir, False)
if not created:
raise ValueError("Failed to find %s." % dest_dir)
# extract files to destination
with zipfile.ZipFile(source_file) as zf:
for member in zf.infolist():
words = member.filename.split('\\')
for word in words[:-1]:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir, ''):
continue
dest_dir = os.path.join(dest_dir, word)
zf.extract(member, dest_dir) | python | def unzip_file(source_file, dest_dir=None, mkdir=False):
"""Unzip a compressed file.
Args:
source_file: Full path to a valid compressed file (e.g. c:/ladybug/testPts.zip)
dest_dir: Target folder to extract to (e.g. c:/ladybug).
Default is set to the same directory as the source file.
mkdir: Set to True to create the directory if doesn't exist (Default: False)
"""
# set default dest_dir and create it if need be.
if dest_dir is None:
dest_dir, fname = os.path.split(source_file)
elif not os.path.isdir(dest_dir):
if mkdir:
preparedir(dest_dir)
else:
created = preparedir(dest_dir, False)
if not created:
raise ValueError("Failed to find %s." % dest_dir)
# extract files to destination
with zipfile.ZipFile(source_file) as zf:
for member in zf.infolist():
words = member.filename.split('\\')
for word in words[:-1]:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir, ''):
continue
dest_dir = os.path.join(dest_dir, word)
zf.extract(member, dest_dir) | [
"def",
"unzip_file",
"(",
"source_file",
",",
"dest_dir",
"=",
"None",
",",
"mkdir",
"=",
"False",
")",
":",
"# set default dest_dir and create it if need be.",
"if",
"dest_dir",
"is",
"None",
":",
"dest_dir",
",",
"fname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"source_file",
")",
"elif",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dest_dir",
")",
":",
"if",
"mkdir",
":",
"preparedir",
"(",
"dest_dir",
")",
"else",
":",
"created",
"=",
"preparedir",
"(",
"dest_dir",
",",
"False",
")",
"if",
"not",
"created",
":",
"raise",
"ValueError",
"(",
"\"Failed to find %s.\"",
"%",
"dest_dir",
")",
"# extract files to destination",
"with",
"zipfile",
".",
"ZipFile",
"(",
"source_file",
")",
"as",
"zf",
":",
"for",
"member",
"in",
"zf",
".",
"infolist",
"(",
")",
":",
"words",
"=",
"member",
".",
"filename",
".",
"split",
"(",
"'\\\\'",
")",
"for",
"word",
"in",
"words",
"[",
":",
"-",
"1",
"]",
":",
"drive",
",",
"word",
"=",
"os",
".",
"path",
".",
"splitdrive",
"(",
"word",
")",
"head",
",",
"word",
"=",
"os",
".",
"path",
".",
"split",
"(",
"word",
")",
"if",
"word",
"in",
"(",
"os",
".",
"curdir",
",",
"os",
".",
"pardir",
",",
"''",
")",
":",
"continue",
"dest_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dest_dir",
",",
"word",
")",
"zf",
".",
"extract",
"(",
"member",
",",
"dest_dir",
")"
] | Unzip a compressed file.
Args:
source_file: Full path to a valid compressed file (e.g. c:/ladybug/testPts.zip)
dest_dir: Target folder to extract to (e.g. c:/ladybug).
Default is set to the same directory as the source file.
mkdir: Set to True to create the directory if doesn't exist (Default: False) | [
"Unzip",
"a",
"compressed",
"file",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/futil.py#L249-L279 |
4,571 | ladybug-tools/ladybug | ladybug/futil.py | csv_to_matrix | def csv_to_matrix(csv_file_path):
"""Load a CSV file into a Python matrix of strings.
Args:
csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)
"""
mtx = []
with open(csv_file_path) as csv_data_file:
for row in csv_data_file:
mtx.append(row.split(','))
return mtx | python | def csv_to_matrix(csv_file_path):
"""Load a CSV file into a Python matrix of strings.
Args:
csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)
"""
mtx = []
with open(csv_file_path) as csv_data_file:
for row in csv_data_file:
mtx.append(row.split(','))
return mtx | [
"def",
"csv_to_matrix",
"(",
"csv_file_path",
")",
":",
"mtx",
"=",
"[",
"]",
"with",
"open",
"(",
"csv_file_path",
")",
"as",
"csv_data_file",
":",
"for",
"row",
"in",
"csv_data_file",
":",
"mtx",
".",
"append",
"(",
"row",
".",
"split",
"(",
"','",
")",
")",
"return",
"mtx"
] | Load a CSV file into a Python matrix of strings.
Args:
csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv) | [
"Load",
"a",
"CSV",
"file",
"into",
"a",
"Python",
"matrix",
"of",
"strings",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/futil.py#L282-L292 |
4,572 | ladybug-tools/ladybug | ladybug/futil.py | csv_to_num_matrix | def csv_to_num_matrix(csv_file_path):
"""Load a CSV file consisting only of numbers into a Python matrix of floats.
Args:
csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)
"""
mtx = []
with open(csv_file_path) as csv_data_file:
for row in csv_data_file:
mtx.append([float(val) for val in row.split(',')])
return mtx | python | def csv_to_num_matrix(csv_file_path):
"""Load a CSV file consisting only of numbers into a Python matrix of floats.
Args:
csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)
"""
mtx = []
with open(csv_file_path) as csv_data_file:
for row in csv_data_file:
mtx.append([float(val) for val in row.split(',')])
return mtx | [
"def",
"csv_to_num_matrix",
"(",
"csv_file_path",
")",
":",
"mtx",
"=",
"[",
"]",
"with",
"open",
"(",
"csv_file_path",
")",
"as",
"csv_data_file",
":",
"for",
"row",
"in",
"csv_data_file",
":",
"mtx",
".",
"append",
"(",
"[",
"float",
"(",
"val",
")",
"for",
"val",
"in",
"row",
".",
"split",
"(",
"','",
")",
"]",
")",
"return",
"mtx"
] | Load a CSV file consisting only of numbers into a Python matrix of floats.
Args:
csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv) | [
"Load",
"a",
"CSV",
"file",
"consisting",
"only",
"of",
"numbers",
"into",
"a",
"Python",
"matrix",
"of",
"floats",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/futil.py#L295-L305 |
4,573 | ladybug-tools/ladybug | ladybug/stat.py | STAT.from_json | def from_json(cls, data):
""" Create STAT from json dictionary.
Args:
data: {
'location': {} , // ladybug location schema
'ashrae_climate_zone': str,
'koppen_climate_zone': str,
'extreme_cold_week': {}, // ladybug analysis period schema
'extreme_hot_week': {}, // ladybug analysis period schema
'typical_weeks': {}, // dict of ladybug analysis period schemas
'heating_dict': {}, // dict containing heating design conditions
'cooling_dict': {}, // dict containing cooling design conditions
"monthly_db_50": [], // list of 12 float values for each month
"monthly_wb_50": [], // list of 12 float values for each month
"monthly_db_range_50": [], // list of 12 float values for each month
"monthly_wb_range_50": [], // list of 12 float values for each month
"monthly_db_100": [], // list of 12 float values for each month
"monthly_wb_100": [], // list of 12 float values for each month
"monthly_db_20": [], // list of 12 float values for each month
"monthly_wb_20": [], // list of 12 float values for each month
"monthly_db_04": [], // list of 12 float values for each month
"monthly_wb_04": [], // list of 12 float values for each month
"monthly_wind": [], // list of 12 float values for each month
"monthly_wind_dirs": [], // matrix with 12 cols for months of the year
and 8 rows for the cardinal directions.
"standard_pressure_at_elev": float, // float value for pressure in Pa
"monthly_tau_beam":[], // list of 12 float values for each month
"monthly_tau_diffuse": [] // list of 12 float values for each month
}
"""
# Initialize the class with all data missing
stat_ob = cls(None)
# Check required and optional keys
option_keys_none = ('ashrae_climate_zone', 'koppen_climate_zone',
'extreme_cold_week', 'extreme_hot_week',
'standard_pressure_at_elev')
option_keys_list = ('monthly_db_50', 'monthly_wb_50',
'monthly_db_range_50', 'monthly_wb_range_50',
'monthly_db_100', 'monthly_wb_100', 'monthly_db_20',
'monthly_wb_20', 'monthly_db_04', 'monthly_wb_04',
'monthly_wind', 'monthly_wind_dirs',
'monthly_tau_beam', 'monthly_tau_diffuse')
option_keys_dict = ('typical_weeks', 'heating_dict', 'cooling_dict')
assert 'location' in data, 'Required key "location" is missing!'
for key in option_keys_none:
if key not in data:
data[key] = None
for key in option_keys_list:
if key not in data:
data[key] = []
for key in option_keys_dict:
if key not in data:
data[key] = {}
# assign the properties of the dictionary to the stat object.
stat_ob._location = Location.from_json(data['location'])
stat_ob._ashrae_climate_zone = data['ashrae_climate_zone']
stat_ob._koppen_climate_zone = data['koppen_climate_zone']
stat_ob._extreme_cold_week = AnalysisPeriod.from_json(data['extreme_cold_week'])\
if data['extreme_cold_week'] else None
stat_ob._extreme_hot_week = AnalysisPeriod.from_json(data['extreme_hot_week'])\
if data['extreme_hot_week'] else None
stat_ob._typical_weeks = {}
for key, val in data['typical_weeks'].items():
if isinstance(val, list):
stat_ob._typical_weeks[key] = [AnalysisPeriod.from_json(v) for v in val]
else:
stat_ob._typical_weeks[key] = AnalysisPeriod.from_json(val)
stat_ob._winter_des_day_dict = data['heating_dict']
stat_ob._summer_des_day_dict = data['cooling_dict']
stat_ob._monthly_db_50 = data['monthly_db_50']
stat_ob._monthly_wb_50 = data['monthly_wb_50']
stat_ob._monthly_db_range_50 = data['monthly_db_range_50']
stat_ob._monthly_wb_range_50 = data['monthly_wb_range_50']
stat_ob._monthly_db_100 = data['monthly_db_100']
stat_ob._monthly_wb_100 = data['monthly_wb_100']
stat_ob._monthly_db_20 = data['monthly_db_20']
stat_ob._monthly_wb_20 = data['monthly_wb_20']
stat_ob._monthly_db_04 = data['monthly_db_04']
stat_ob._monthly_wb_04 = data['monthly_wb_04']
stat_ob._monthly_wind = data['monthly_wind']
stat_ob._monthly_wind_dirs = data['monthly_wind_dirs']
stat_ob._stand_press_at_elev = data['standard_pressure_at_elev']
stat_ob._monthly_tau_beam = data['monthly_tau_beam']
stat_ob._monthly_tau_diffuse = data['monthly_tau_diffuse']
return stat_ob | python | def from_json(cls, data):
""" Create STAT from json dictionary.
Args:
data: {
'location': {} , // ladybug location schema
'ashrae_climate_zone': str,
'koppen_climate_zone': str,
'extreme_cold_week': {}, // ladybug analysis period schema
'extreme_hot_week': {}, // ladybug analysis period schema
'typical_weeks': {}, // dict of ladybug analysis period schemas
'heating_dict': {}, // dict containing heating design conditions
'cooling_dict': {}, // dict containing cooling design conditions
"monthly_db_50": [], // list of 12 float values for each month
"monthly_wb_50": [], // list of 12 float values for each month
"monthly_db_range_50": [], // list of 12 float values for each month
"monthly_wb_range_50": [], // list of 12 float values for each month
"monthly_db_100": [], // list of 12 float values for each month
"monthly_wb_100": [], // list of 12 float values for each month
"monthly_db_20": [], // list of 12 float values for each month
"monthly_wb_20": [], // list of 12 float values for each month
"monthly_db_04": [], // list of 12 float values for each month
"monthly_wb_04": [], // list of 12 float values for each month
"monthly_wind": [], // list of 12 float values for each month
"monthly_wind_dirs": [], // matrix with 12 cols for months of the year
and 8 rows for the cardinal directions.
"standard_pressure_at_elev": float, // float value for pressure in Pa
"monthly_tau_beam":[], // list of 12 float values for each month
"monthly_tau_diffuse": [] // list of 12 float values for each month
}
"""
# Initialize the class with all data missing
stat_ob = cls(None)
# Check required and optional keys
option_keys_none = ('ashrae_climate_zone', 'koppen_climate_zone',
'extreme_cold_week', 'extreme_hot_week',
'standard_pressure_at_elev')
option_keys_list = ('monthly_db_50', 'monthly_wb_50',
'monthly_db_range_50', 'monthly_wb_range_50',
'monthly_db_100', 'monthly_wb_100', 'monthly_db_20',
'monthly_wb_20', 'monthly_db_04', 'monthly_wb_04',
'monthly_wind', 'monthly_wind_dirs',
'monthly_tau_beam', 'monthly_tau_diffuse')
option_keys_dict = ('typical_weeks', 'heating_dict', 'cooling_dict')
assert 'location' in data, 'Required key "location" is missing!'
for key in option_keys_none:
if key not in data:
data[key] = None
for key in option_keys_list:
if key not in data:
data[key] = []
for key in option_keys_dict:
if key not in data:
data[key] = {}
# assign the properties of the dictionary to the stat object.
stat_ob._location = Location.from_json(data['location'])
stat_ob._ashrae_climate_zone = data['ashrae_climate_zone']
stat_ob._koppen_climate_zone = data['koppen_climate_zone']
stat_ob._extreme_cold_week = AnalysisPeriod.from_json(data['extreme_cold_week'])\
if data['extreme_cold_week'] else None
stat_ob._extreme_hot_week = AnalysisPeriod.from_json(data['extreme_hot_week'])\
if data['extreme_hot_week'] else None
stat_ob._typical_weeks = {}
for key, val in data['typical_weeks'].items():
if isinstance(val, list):
stat_ob._typical_weeks[key] = [AnalysisPeriod.from_json(v) for v in val]
else:
stat_ob._typical_weeks[key] = AnalysisPeriod.from_json(val)
stat_ob._winter_des_day_dict = data['heating_dict']
stat_ob._summer_des_day_dict = data['cooling_dict']
stat_ob._monthly_db_50 = data['monthly_db_50']
stat_ob._monthly_wb_50 = data['monthly_wb_50']
stat_ob._monthly_db_range_50 = data['monthly_db_range_50']
stat_ob._monthly_wb_range_50 = data['monthly_wb_range_50']
stat_ob._monthly_db_100 = data['monthly_db_100']
stat_ob._monthly_wb_100 = data['monthly_wb_100']
stat_ob._monthly_db_20 = data['monthly_db_20']
stat_ob._monthly_wb_20 = data['monthly_wb_20']
stat_ob._monthly_db_04 = data['monthly_db_04']
stat_ob._monthly_wb_04 = data['monthly_wb_04']
stat_ob._monthly_wind = data['monthly_wind']
stat_ob._monthly_wind_dirs = data['monthly_wind_dirs']
stat_ob._stand_press_at_elev = data['standard_pressure_at_elev']
stat_ob._monthly_tau_beam = data['monthly_tau_beam']
stat_ob._monthly_tau_diffuse = data['monthly_tau_diffuse']
return stat_ob | [
"def",
"from_json",
"(",
"cls",
",",
"data",
")",
":",
"# Initialize the class with all data missing",
"stat_ob",
"=",
"cls",
"(",
"None",
")",
"# Check required and optional keys",
"option_keys_none",
"=",
"(",
"'ashrae_climate_zone'",
",",
"'koppen_climate_zone'",
",",
"'extreme_cold_week'",
",",
"'extreme_hot_week'",
",",
"'standard_pressure_at_elev'",
")",
"option_keys_list",
"=",
"(",
"'monthly_db_50'",
",",
"'monthly_wb_50'",
",",
"'monthly_db_range_50'",
",",
"'monthly_wb_range_50'",
",",
"'monthly_db_100'",
",",
"'monthly_wb_100'",
",",
"'monthly_db_20'",
",",
"'monthly_wb_20'",
",",
"'monthly_db_04'",
",",
"'monthly_wb_04'",
",",
"'monthly_wind'",
",",
"'monthly_wind_dirs'",
",",
"'monthly_tau_beam'",
",",
"'monthly_tau_diffuse'",
")",
"option_keys_dict",
"=",
"(",
"'typical_weeks'",
",",
"'heating_dict'",
",",
"'cooling_dict'",
")",
"assert",
"'location'",
"in",
"data",
",",
"'Required key \"location\" is missing!'",
"for",
"key",
"in",
"option_keys_none",
":",
"if",
"key",
"not",
"in",
"data",
":",
"data",
"[",
"key",
"]",
"=",
"None",
"for",
"key",
"in",
"option_keys_list",
":",
"if",
"key",
"not",
"in",
"data",
":",
"data",
"[",
"key",
"]",
"=",
"[",
"]",
"for",
"key",
"in",
"option_keys_dict",
":",
"if",
"key",
"not",
"in",
"data",
":",
"data",
"[",
"key",
"]",
"=",
"{",
"}",
"# assign the properties of the dictionary to the stat object.",
"stat_ob",
".",
"_location",
"=",
"Location",
".",
"from_json",
"(",
"data",
"[",
"'location'",
"]",
")",
"stat_ob",
".",
"_ashrae_climate_zone",
"=",
"data",
"[",
"'ashrae_climate_zone'",
"]",
"stat_ob",
".",
"_koppen_climate_zone",
"=",
"data",
"[",
"'koppen_climate_zone'",
"]",
"stat_ob",
".",
"_extreme_cold_week",
"=",
"AnalysisPeriod",
".",
"from_json",
"(",
"data",
"[",
"'extreme_cold_week'",
"]",
")",
"if",
"data",
"[",
"'extreme_cold_week'",
"]",
"else",
"None",
"stat_ob",
".",
"_extreme_hot_week",
"=",
"AnalysisPeriod",
".",
"from_json",
"(",
"data",
"[",
"'extreme_hot_week'",
"]",
")",
"if",
"data",
"[",
"'extreme_hot_week'",
"]",
"else",
"None",
"stat_ob",
".",
"_typical_weeks",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"data",
"[",
"'typical_weeks'",
"]",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"stat_ob",
".",
"_typical_weeks",
"[",
"key",
"]",
"=",
"[",
"AnalysisPeriod",
".",
"from_json",
"(",
"v",
")",
"for",
"v",
"in",
"val",
"]",
"else",
":",
"stat_ob",
".",
"_typical_weeks",
"[",
"key",
"]",
"=",
"AnalysisPeriod",
".",
"from_json",
"(",
"val",
")",
"stat_ob",
".",
"_winter_des_day_dict",
"=",
"data",
"[",
"'heating_dict'",
"]",
"stat_ob",
".",
"_summer_des_day_dict",
"=",
"data",
"[",
"'cooling_dict'",
"]",
"stat_ob",
".",
"_monthly_db_50",
"=",
"data",
"[",
"'monthly_db_50'",
"]",
"stat_ob",
".",
"_monthly_wb_50",
"=",
"data",
"[",
"'monthly_wb_50'",
"]",
"stat_ob",
".",
"_monthly_db_range_50",
"=",
"data",
"[",
"'monthly_db_range_50'",
"]",
"stat_ob",
".",
"_monthly_wb_range_50",
"=",
"data",
"[",
"'monthly_wb_range_50'",
"]",
"stat_ob",
".",
"_monthly_db_100",
"=",
"data",
"[",
"'monthly_db_100'",
"]",
"stat_ob",
".",
"_monthly_wb_100",
"=",
"data",
"[",
"'monthly_wb_100'",
"]",
"stat_ob",
".",
"_monthly_db_20",
"=",
"data",
"[",
"'monthly_db_20'",
"]",
"stat_ob",
".",
"_monthly_wb_20",
"=",
"data",
"[",
"'monthly_wb_20'",
"]",
"stat_ob",
".",
"_monthly_db_04",
"=",
"data",
"[",
"'monthly_db_04'",
"]",
"stat_ob",
".",
"_monthly_wb_04",
"=",
"data",
"[",
"'monthly_wb_04'",
"]",
"stat_ob",
".",
"_monthly_wind",
"=",
"data",
"[",
"'monthly_wind'",
"]",
"stat_ob",
".",
"_monthly_wind_dirs",
"=",
"data",
"[",
"'monthly_wind_dirs'",
"]",
"stat_ob",
".",
"_stand_press_at_elev",
"=",
"data",
"[",
"'standard_pressure_at_elev'",
"]",
"stat_ob",
".",
"_monthly_tau_beam",
"=",
"data",
"[",
"'monthly_tau_beam'",
"]",
"stat_ob",
".",
"_monthly_tau_diffuse",
"=",
"data",
"[",
"'monthly_tau_diffuse'",
"]",
"return",
"stat_ob"
] | Create STAT from json dictionary.
Args:
data: {
'location': {} , // ladybug location schema
'ashrae_climate_zone': str,
'koppen_climate_zone': str,
'extreme_cold_week': {}, // ladybug analysis period schema
'extreme_hot_week': {}, // ladybug analysis period schema
'typical_weeks': {}, // dict of ladybug analysis period schemas
'heating_dict': {}, // dict containing heating design conditions
'cooling_dict': {}, // dict containing cooling design conditions
"monthly_db_50": [], // list of 12 float values for each month
"monthly_wb_50": [], // list of 12 float values for each month
"monthly_db_range_50": [], // list of 12 float values for each month
"monthly_wb_range_50": [], // list of 12 float values for each month
"monthly_db_100": [], // list of 12 float values for each month
"monthly_wb_100": [], // list of 12 float values for each month
"monthly_db_20": [], // list of 12 float values for each month
"monthly_wb_20": [], // list of 12 float values for each month
"monthly_db_04": [], // list of 12 float values for each month
"monthly_wb_04": [], // list of 12 float values for each month
"monthly_wind": [], // list of 12 float values for each month
"monthly_wind_dirs": [], // matrix with 12 cols for months of the year
and 8 rows for the cardinal directions.
"standard_pressure_at_elev": float, // float value for pressure in Pa
"monthly_tau_beam":[], // list of 12 float values for each month
"monthly_tau_diffuse": [] // list of 12 float values for each month
} | [
"Create",
"STAT",
"from",
"json",
"dictionary",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/stat.py#L87-L175 |
4,574 | ladybug-tools/ladybug | ladybug/stat.py | STAT.monthly_cooling_design_days_050 | def monthly_cooling_design_days_050(self):
"""A list of 12 objects representing monthly 5.0% cooling design days."""
if self.monthly_found is False or self._monthly_db_50 == [] \
or self._monthly_wb_50 == []:
return []
else:
db_conds = [DryBulbCondition(x, y) for x, y in zip(
self._monthly_db_50, self._monthly_db_range_50)]
hu_conds = [HumidityCondition(
'Wetbulb', x, self._stand_press_at_elev) for x in self._monthly_wb_50]
ws_conds = self.monthly_wind_conditions
sky_conds = self.monthly_clear_sky_conditions
return [DesignDay(
'5% Cooling Design Day for {}'.format(self._months[i]),
'SummerDesignDay', self._location,
db_conds[i], hu_conds[i], ws_conds[i], sky_conds[i])
for i in xrange(12)] | python | def monthly_cooling_design_days_050(self):
"""A list of 12 objects representing monthly 5.0% cooling design days."""
if self.monthly_found is False or self._monthly_db_50 == [] \
or self._monthly_wb_50 == []:
return []
else:
db_conds = [DryBulbCondition(x, y) for x, y in zip(
self._monthly_db_50, self._monthly_db_range_50)]
hu_conds = [HumidityCondition(
'Wetbulb', x, self._stand_press_at_elev) for x in self._monthly_wb_50]
ws_conds = self.monthly_wind_conditions
sky_conds = self.monthly_clear_sky_conditions
return [DesignDay(
'5% Cooling Design Day for {}'.format(self._months[i]),
'SummerDesignDay', self._location,
db_conds[i], hu_conds[i], ws_conds[i], sky_conds[i])
for i in xrange(12)] | [
"def",
"monthly_cooling_design_days_050",
"(",
"self",
")",
":",
"if",
"self",
".",
"monthly_found",
"is",
"False",
"or",
"self",
".",
"_monthly_db_50",
"==",
"[",
"]",
"or",
"self",
".",
"_monthly_wb_50",
"==",
"[",
"]",
":",
"return",
"[",
"]",
"else",
":",
"db_conds",
"=",
"[",
"DryBulbCondition",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"self",
".",
"_monthly_db_50",
",",
"self",
".",
"_monthly_db_range_50",
")",
"]",
"hu_conds",
"=",
"[",
"HumidityCondition",
"(",
"'Wetbulb'",
",",
"x",
",",
"self",
".",
"_stand_press_at_elev",
")",
"for",
"x",
"in",
"self",
".",
"_monthly_wb_50",
"]",
"ws_conds",
"=",
"self",
".",
"monthly_wind_conditions",
"sky_conds",
"=",
"self",
".",
"monthly_clear_sky_conditions",
"return",
"[",
"DesignDay",
"(",
"'5% Cooling Design Day for {}'",
".",
"format",
"(",
"self",
".",
"_months",
"[",
"i",
"]",
")",
",",
"'SummerDesignDay'",
",",
"self",
".",
"_location",
",",
"db_conds",
"[",
"i",
"]",
",",
"hu_conds",
"[",
"i",
"]",
",",
"ws_conds",
"[",
"i",
"]",
",",
"sky_conds",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"xrange",
"(",
"12",
")",
"]"
] | A list of 12 objects representing monthly 5.0% cooling design days. | [
"A",
"list",
"of",
"12",
"objects",
"representing",
"monthly",
"5",
".",
"0%",
"cooling",
"design",
"days",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/stat.py#L497-L513 |
4,575 | ladybug-tools/ladybug | ladybug/stat.py | STAT.monthly_cooling_design_days_100 | def monthly_cooling_design_days_100(self):
"""A list of 12 objects representing monthly 10.0% cooling design days."""
if self.monthly_found is False or self._monthly_db_100 == [] \
or self._monthly_wb_100 == []:
return []
else:
db_conds = [DryBulbCondition(x, y) for x, y in zip(
self._monthly_db_100, self._monthly_db_range_50)]
hu_conds = [HumidityCondition(
'Wetbulb', x, self._stand_press_at_elev) for x in self._monthly_wb_100]
ws_conds = self.monthly_wind_conditions
sky_conds = self.monthly_clear_sky_conditions
return [DesignDay(
'10% Cooling Design Day for {}'.format(self._months[i]),
'SummerDesignDay', self._location,
db_conds[i], hu_conds[i], ws_conds[i], sky_conds[i])
for i in xrange(12)] | python | def monthly_cooling_design_days_100(self):
"""A list of 12 objects representing monthly 10.0% cooling design days."""
if self.monthly_found is False or self._monthly_db_100 == [] \
or self._monthly_wb_100 == []:
return []
else:
db_conds = [DryBulbCondition(x, y) for x, y in zip(
self._monthly_db_100, self._monthly_db_range_50)]
hu_conds = [HumidityCondition(
'Wetbulb', x, self._stand_press_at_elev) for x in self._monthly_wb_100]
ws_conds = self.monthly_wind_conditions
sky_conds = self.monthly_clear_sky_conditions
return [DesignDay(
'10% Cooling Design Day for {}'.format(self._months[i]),
'SummerDesignDay', self._location,
db_conds[i], hu_conds[i], ws_conds[i], sky_conds[i])
for i in xrange(12)] | [
"def",
"monthly_cooling_design_days_100",
"(",
"self",
")",
":",
"if",
"self",
".",
"monthly_found",
"is",
"False",
"or",
"self",
".",
"_monthly_db_100",
"==",
"[",
"]",
"or",
"self",
".",
"_monthly_wb_100",
"==",
"[",
"]",
":",
"return",
"[",
"]",
"else",
":",
"db_conds",
"=",
"[",
"DryBulbCondition",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"self",
".",
"_monthly_db_100",
",",
"self",
".",
"_monthly_db_range_50",
")",
"]",
"hu_conds",
"=",
"[",
"HumidityCondition",
"(",
"'Wetbulb'",
",",
"x",
",",
"self",
".",
"_stand_press_at_elev",
")",
"for",
"x",
"in",
"self",
".",
"_monthly_wb_100",
"]",
"ws_conds",
"=",
"self",
".",
"monthly_wind_conditions",
"sky_conds",
"=",
"self",
".",
"monthly_clear_sky_conditions",
"return",
"[",
"DesignDay",
"(",
"'10% Cooling Design Day for {}'",
".",
"format",
"(",
"self",
".",
"_months",
"[",
"i",
"]",
")",
",",
"'SummerDesignDay'",
",",
"self",
".",
"_location",
",",
"db_conds",
"[",
"i",
"]",
",",
"hu_conds",
"[",
"i",
"]",
",",
"ws_conds",
"[",
"i",
"]",
",",
"sky_conds",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"xrange",
"(",
"12",
")",
"]"
] | A list of 12 objects representing monthly 10.0% cooling design days. | [
"A",
"list",
"of",
"12",
"objects",
"representing",
"monthly",
"10",
".",
"0%",
"cooling",
"design",
"days",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/stat.py#L516-L532 |
4,576 | ladybug-tools/ladybug | ladybug/stat.py | STAT.monthly_cooling_design_days_020 | def monthly_cooling_design_days_020(self):
"""A list of 12 objects representing monthly 2.0% cooling design days."""
if self.monthly_found is False or self._monthly_db_20 == [] \
or self._monthly_wb_20 == []:
return []
else:
db_conds = [DryBulbCondition(x, y) for x, y in zip(
self._monthly_db_20, self._monthly_db_range_50)]
hu_conds = [HumidityCondition(
'Wetbulb', x, self._stand_press_at_elev) for x in self._monthly_wb_20]
ws_conds = self.monthly_wind_conditions
sky_conds = self.monthly_clear_sky_conditions
return [DesignDay(
'2% Cooling Design Day for {}'.format(self._months[i]),
'SummerDesignDay', self._location,
db_conds[i], hu_conds[i], ws_conds[i], sky_conds[i])
for i in xrange(12)] | python | def monthly_cooling_design_days_020(self):
"""A list of 12 objects representing monthly 2.0% cooling design days."""
if self.monthly_found is False or self._monthly_db_20 == [] \
or self._monthly_wb_20 == []:
return []
else:
db_conds = [DryBulbCondition(x, y) for x, y in zip(
self._monthly_db_20, self._monthly_db_range_50)]
hu_conds = [HumidityCondition(
'Wetbulb', x, self._stand_press_at_elev) for x in self._monthly_wb_20]
ws_conds = self.monthly_wind_conditions
sky_conds = self.monthly_clear_sky_conditions
return [DesignDay(
'2% Cooling Design Day for {}'.format(self._months[i]),
'SummerDesignDay', self._location,
db_conds[i], hu_conds[i], ws_conds[i], sky_conds[i])
for i in xrange(12)] | [
"def",
"monthly_cooling_design_days_020",
"(",
"self",
")",
":",
"if",
"self",
".",
"monthly_found",
"is",
"False",
"or",
"self",
".",
"_monthly_db_20",
"==",
"[",
"]",
"or",
"self",
".",
"_monthly_wb_20",
"==",
"[",
"]",
":",
"return",
"[",
"]",
"else",
":",
"db_conds",
"=",
"[",
"DryBulbCondition",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"self",
".",
"_monthly_db_20",
",",
"self",
".",
"_monthly_db_range_50",
")",
"]",
"hu_conds",
"=",
"[",
"HumidityCondition",
"(",
"'Wetbulb'",
",",
"x",
",",
"self",
".",
"_stand_press_at_elev",
")",
"for",
"x",
"in",
"self",
".",
"_monthly_wb_20",
"]",
"ws_conds",
"=",
"self",
".",
"monthly_wind_conditions",
"sky_conds",
"=",
"self",
".",
"monthly_clear_sky_conditions",
"return",
"[",
"DesignDay",
"(",
"'2% Cooling Design Day for {}'",
".",
"format",
"(",
"self",
".",
"_months",
"[",
"i",
"]",
")",
",",
"'SummerDesignDay'",
",",
"self",
".",
"_location",
",",
"db_conds",
"[",
"i",
"]",
",",
"hu_conds",
"[",
"i",
"]",
",",
"ws_conds",
"[",
"i",
"]",
",",
"sky_conds",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"xrange",
"(",
"12",
")",
"]"
] | A list of 12 objects representing monthly 2.0% cooling design days. | [
"A",
"list",
"of",
"12",
"objects",
"representing",
"monthly",
"2",
".",
"0%",
"cooling",
"design",
"days",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/stat.py#L535-L551 |
4,577 | ladybug-tools/ladybug | ladybug/stat.py | STAT.monthly_cooling_design_days_004 | def monthly_cooling_design_days_004(self):
"""A list of 12 objects representing monthly 0.4% cooling design days."""
if self.monthly_found is False or self._monthly_db_04 == [] \
or self._monthly_wb_04 == []:
return []
else:
db_conds = [DryBulbCondition(x, y) for x, y in zip(
self._monthly_db_04, self._monthly_db_range_50)]
hu_conds = [HumidityCondition(
'Wetbulb', x, self._stand_press_at_elev) for x in self._monthly_wb_04]
ws_conds = self.monthly_wind_conditions
sky_conds = self.monthly_clear_sky_conditions
return [DesignDay(
'0.4% Cooling Design Day for {}'.format(self._months[i]),
'SummerDesignDay', self._location,
db_conds[i], hu_conds[i], ws_conds[i], sky_conds[i])
for i in xrange(12)] | python | def monthly_cooling_design_days_004(self):
"""A list of 12 objects representing monthly 0.4% cooling design days."""
if self.monthly_found is False or self._monthly_db_04 == [] \
or self._monthly_wb_04 == []:
return []
else:
db_conds = [DryBulbCondition(x, y) for x, y in zip(
self._monthly_db_04, self._monthly_db_range_50)]
hu_conds = [HumidityCondition(
'Wetbulb', x, self._stand_press_at_elev) for x in self._monthly_wb_04]
ws_conds = self.monthly_wind_conditions
sky_conds = self.monthly_clear_sky_conditions
return [DesignDay(
'0.4% Cooling Design Day for {}'.format(self._months[i]),
'SummerDesignDay', self._location,
db_conds[i], hu_conds[i], ws_conds[i], sky_conds[i])
for i in xrange(12)] | [
"def",
"monthly_cooling_design_days_004",
"(",
"self",
")",
":",
"if",
"self",
".",
"monthly_found",
"is",
"False",
"or",
"self",
".",
"_monthly_db_04",
"==",
"[",
"]",
"or",
"self",
".",
"_monthly_wb_04",
"==",
"[",
"]",
":",
"return",
"[",
"]",
"else",
":",
"db_conds",
"=",
"[",
"DryBulbCondition",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"self",
".",
"_monthly_db_04",
",",
"self",
".",
"_monthly_db_range_50",
")",
"]",
"hu_conds",
"=",
"[",
"HumidityCondition",
"(",
"'Wetbulb'",
",",
"x",
",",
"self",
".",
"_stand_press_at_elev",
")",
"for",
"x",
"in",
"self",
".",
"_monthly_wb_04",
"]",
"ws_conds",
"=",
"self",
".",
"monthly_wind_conditions",
"sky_conds",
"=",
"self",
".",
"monthly_clear_sky_conditions",
"return",
"[",
"DesignDay",
"(",
"'0.4% Cooling Design Day for {}'",
".",
"format",
"(",
"self",
".",
"_months",
"[",
"i",
"]",
")",
",",
"'SummerDesignDay'",
",",
"self",
".",
"_location",
",",
"db_conds",
"[",
"i",
"]",
",",
"hu_conds",
"[",
"i",
"]",
",",
"ws_conds",
"[",
"i",
"]",
",",
"sky_conds",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"xrange",
"(",
"12",
")",
"]"
] | A list of 12 objects representing monthly 0.4% cooling design days. | [
"A",
"list",
"of",
"12",
"objects",
"representing",
"monthly",
"0",
".",
"4%",
"cooling",
"design",
"days",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/stat.py#L554-L570 |
4,578 | ladybug-tools/ladybug | ladybug/stat.py | STAT.monthly_wind_conditions | def monthly_wind_conditions(self):
"""A list of 12 monthly wind conditions that are used on the design days."""
return [WindCondition(x, y) for x, y in zip(
self._monthly_wind, self.monthly_wind_dirs)] | python | def monthly_wind_conditions(self):
"""A list of 12 monthly wind conditions that are used on the design days."""
return [WindCondition(x, y) for x, y in zip(
self._monthly_wind, self.monthly_wind_dirs)] | [
"def",
"monthly_wind_conditions",
"(",
"self",
")",
":",
"return",
"[",
"WindCondition",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"self",
".",
"_monthly_wind",
",",
"self",
".",
"monthly_wind_dirs",
")",
"]"
] | A list of 12 monthly wind conditions that are used on the design days. | [
"A",
"list",
"of",
"12",
"monthly",
"wind",
"conditions",
"that",
"are",
"used",
"on",
"the",
"design",
"days",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/stat.py#L598-L601 |
4,579 | ladybug-tools/ladybug | ladybug/stat.py | STAT.monthly_wind_dirs | def monthly_wind_dirs(self):
"""A list of prevailing wind directions for each month."""
mwd = zip(*self._monthly_wind_dirs)
return [self._wind_dirs[mon.index(max(mon))] for mon in mwd] | python | def monthly_wind_dirs(self):
"""A list of prevailing wind directions for each month."""
mwd = zip(*self._monthly_wind_dirs)
return [self._wind_dirs[mon.index(max(mon))] for mon in mwd] | [
"def",
"monthly_wind_dirs",
"(",
"self",
")",
":",
"mwd",
"=",
"zip",
"(",
"*",
"self",
".",
"_monthly_wind_dirs",
")",
"return",
"[",
"self",
".",
"_wind_dirs",
"[",
"mon",
".",
"index",
"(",
"max",
"(",
"mon",
")",
")",
"]",
"for",
"mon",
"in",
"mwd",
"]"
] | A list of prevailing wind directions for each month. | [
"A",
"list",
"of",
"prevailing",
"wind",
"directions",
"for",
"each",
"month",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/stat.py#L609-L612 |
4,580 | ladybug-tools/ladybug | ladybug/stat.py | STAT.monthly_clear_sky_conditions | def monthly_clear_sky_conditions(self):
"""A list of 12 monthly clear sky conditions that are used on the design days."""
if self._monthly_tau_diffuse is [] or self._monthly_tau_beam is []:
return [OriginalClearSkyCondition(i, 21) for i in xrange(1, 13)]
return [RevisedClearSkyCondition(i, 21, x, y) for i, x, y in zip(
list(xrange(1, 13)), self._monthly_tau_beam, self._monthly_tau_diffuse)] | python | def monthly_clear_sky_conditions(self):
"""A list of 12 monthly clear sky conditions that are used on the design days."""
if self._monthly_tau_diffuse is [] or self._monthly_tau_beam is []:
return [OriginalClearSkyCondition(i, 21) for i in xrange(1, 13)]
return [RevisedClearSkyCondition(i, 21, x, y) for i, x, y in zip(
list(xrange(1, 13)), self._monthly_tau_beam, self._monthly_tau_diffuse)] | [
"def",
"monthly_clear_sky_conditions",
"(",
"self",
")",
":",
"if",
"self",
".",
"_monthly_tau_diffuse",
"is",
"[",
"]",
"or",
"self",
".",
"_monthly_tau_beam",
"is",
"[",
"]",
":",
"return",
"[",
"OriginalClearSkyCondition",
"(",
"i",
",",
"21",
")",
"for",
"i",
"in",
"xrange",
"(",
"1",
",",
"13",
")",
"]",
"return",
"[",
"RevisedClearSkyCondition",
"(",
"i",
",",
"21",
",",
"x",
",",
"y",
")",
"for",
"i",
",",
"x",
",",
"y",
"in",
"zip",
"(",
"list",
"(",
"xrange",
"(",
"1",
",",
"13",
")",
")",
",",
"self",
".",
"_monthly_tau_beam",
",",
"self",
".",
"_monthly_tau_diffuse",
")",
"]"
] | A list of 12 monthly clear sky conditions that are used on the design days. | [
"A",
"list",
"of",
"12",
"monthly",
"clear",
"sky",
"conditions",
"that",
"are",
"used",
"on",
"the",
"design",
"days",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/stat.py#L615-L620 |
4,581 | ladybug-tools/ladybug | ladybug/stat.py | STAT.to_json | def to_json(self):
"""Convert the STAT object to a dictionary."""
def jsonify_dict(base_dict):
new_dict = {}
for key, val in base_dict.items():
if isinstance(val, list):
new_dict[key] = [v.to_json() for v in val]
else:
new_dict[key] = val.to_json()
return new_dict
return {
'location': self.location.to_json(),
'ashrae_climate_zone': self.ashrae_climate_zone,
'koppen_climate_zone': self.koppen_climate_zone,
'extreme_cold_week': self.extreme_cold_week.to_json()
if self.extreme_cold_week else None,
'extreme_hot_week': self.extreme_hot_week.to_json()
if self.extreme_cold_week else None,
'typical_weeks': jsonify_dict(self._typical_weeks),
'heating_dict': self._winter_des_day_dict,
'cooling_dict': self._summer_des_day_dict,
"monthly_db_50": self._monthly_db_50,
"monthly_wb_50": self._monthly_wb_50,
"monthly_db_range_50": self._monthly_db_range_50,
"monthly_wb_range_50": self._monthly_wb_range_50,
"monthly_db_100": self._monthly_db_100,
"monthly_wb_100": self._monthly_wb_100,
"monthly_db_20": self._monthly_db_20,
"monthly_wb_20": self._monthly_wb_20,
"monthly_db_04": self._monthly_db_04,
"monthly_wb_04": self._monthly_wb_04,
"monthly_wind": self._monthly_wind,
"monthly_wind_dirs": self._monthly_wind_dirs,
"standard_pressure_at_elev": self.standard_pressure_at_elev,
"monthly_tau_beam": self.monthly_tau_beam,
"monthly_tau_diffuse": self.monthly_tau_diffuse
} | python | def to_json(self):
"""Convert the STAT object to a dictionary."""
def jsonify_dict(base_dict):
new_dict = {}
for key, val in base_dict.items():
if isinstance(val, list):
new_dict[key] = [v.to_json() for v in val]
else:
new_dict[key] = val.to_json()
return new_dict
return {
'location': self.location.to_json(),
'ashrae_climate_zone': self.ashrae_climate_zone,
'koppen_climate_zone': self.koppen_climate_zone,
'extreme_cold_week': self.extreme_cold_week.to_json()
if self.extreme_cold_week else None,
'extreme_hot_week': self.extreme_hot_week.to_json()
if self.extreme_cold_week else None,
'typical_weeks': jsonify_dict(self._typical_weeks),
'heating_dict': self._winter_des_day_dict,
'cooling_dict': self._summer_des_day_dict,
"monthly_db_50": self._monthly_db_50,
"monthly_wb_50": self._monthly_wb_50,
"monthly_db_range_50": self._monthly_db_range_50,
"monthly_wb_range_50": self._monthly_wb_range_50,
"monthly_db_100": self._monthly_db_100,
"monthly_wb_100": self._monthly_wb_100,
"monthly_db_20": self._monthly_db_20,
"monthly_wb_20": self._monthly_wb_20,
"monthly_db_04": self._monthly_db_04,
"monthly_wb_04": self._monthly_wb_04,
"monthly_wind": self._monthly_wind,
"monthly_wind_dirs": self._monthly_wind_dirs,
"standard_pressure_at_elev": self.standard_pressure_at_elev,
"monthly_tau_beam": self.monthly_tau_beam,
"monthly_tau_diffuse": self.monthly_tau_diffuse
} | [
"def",
"to_json",
"(",
"self",
")",
":",
"def",
"jsonify_dict",
"(",
"base_dict",
")",
":",
"new_dict",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"base_dict",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"new_dict",
"[",
"key",
"]",
"=",
"[",
"v",
".",
"to_json",
"(",
")",
"for",
"v",
"in",
"val",
"]",
"else",
":",
"new_dict",
"[",
"key",
"]",
"=",
"val",
".",
"to_json",
"(",
")",
"return",
"new_dict",
"return",
"{",
"'location'",
":",
"self",
".",
"location",
".",
"to_json",
"(",
")",
",",
"'ashrae_climate_zone'",
":",
"self",
".",
"ashrae_climate_zone",
",",
"'koppen_climate_zone'",
":",
"self",
".",
"koppen_climate_zone",
",",
"'extreme_cold_week'",
":",
"self",
".",
"extreme_cold_week",
".",
"to_json",
"(",
")",
"if",
"self",
".",
"extreme_cold_week",
"else",
"None",
",",
"'extreme_hot_week'",
":",
"self",
".",
"extreme_hot_week",
".",
"to_json",
"(",
")",
"if",
"self",
".",
"extreme_cold_week",
"else",
"None",
",",
"'typical_weeks'",
":",
"jsonify_dict",
"(",
"self",
".",
"_typical_weeks",
")",
",",
"'heating_dict'",
":",
"self",
".",
"_winter_des_day_dict",
",",
"'cooling_dict'",
":",
"self",
".",
"_summer_des_day_dict",
",",
"\"monthly_db_50\"",
":",
"self",
".",
"_monthly_db_50",
",",
"\"monthly_wb_50\"",
":",
"self",
".",
"_monthly_wb_50",
",",
"\"monthly_db_range_50\"",
":",
"self",
".",
"_monthly_db_range_50",
",",
"\"monthly_wb_range_50\"",
":",
"self",
".",
"_monthly_wb_range_50",
",",
"\"monthly_db_100\"",
":",
"self",
".",
"_monthly_db_100",
",",
"\"monthly_wb_100\"",
":",
"self",
".",
"_monthly_wb_100",
",",
"\"monthly_db_20\"",
":",
"self",
".",
"_monthly_db_20",
",",
"\"monthly_wb_20\"",
":",
"self",
".",
"_monthly_wb_20",
",",
"\"monthly_db_04\"",
":",
"self",
".",
"_monthly_db_04",
",",
"\"monthly_wb_04\"",
":",
"self",
".",
"_monthly_wb_04",
",",
"\"monthly_wind\"",
":",
"self",
".",
"_monthly_wind",
",",
"\"monthly_wind_dirs\"",
":",
"self",
".",
"_monthly_wind_dirs",
",",
"\"standard_pressure_at_elev\"",
":",
"self",
".",
"standard_pressure_at_elev",
",",
"\"monthly_tau_beam\"",
":",
"self",
".",
"monthly_tau_beam",
",",
"\"monthly_tau_diffuse\"",
":",
"self",
".",
"monthly_tau_diffuse",
"}"
] | Convert the STAT object to a dictionary. | [
"Convert",
"the",
"STAT",
"object",
"to",
"a",
"dictionary",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/stat.py#L642-L678 |
4,582 | ladybug-tools/ladybug | ladybug/datatype/base.py | DataTypeBase.from_json | def from_json(cls, data):
"""Create a data type from a dictionary.
Args:
data: Data as a dictionary.
{
"name": data type name of the data type as a string
"data_type": the class name of the data type as a string
"base_unit": the base unit of the data type
}
"""
assert 'name' in data, 'Required keyword "name" is missing!'
assert 'data_type' in data, 'Required keyword "data_type" is missing!'
if cls._type_enumeration is None:
cls._type_enumeration = _DataTypeEnumeration(import_modules=False)
if data['data_type'] == 'GenericType':
assert 'base_unit' in data, \
'Keyword "base_unit" is missing and is required for GenericType.'
return cls._type_enumeration._GENERICTYPE(data['name'], data['base_unit'])
elif data['data_type'] in cls._type_enumeration._TYPES:
clss = cls._type_enumeration._TYPES[data['data_type']]
if data['data_type'] == data['name'].title().replace(' ', ''):
return clss()
else:
instance = clss()
instance._name = data['name']
return instance
else:
raise ValueError(
'Data Type {} could not be recognized'.format(data['data_type'])) | python | def from_json(cls, data):
"""Create a data type from a dictionary.
Args:
data: Data as a dictionary.
{
"name": data type name of the data type as a string
"data_type": the class name of the data type as a string
"base_unit": the base unit of the data type
}
"""
assert 'name' in data, 'Required keyword "name" is missing!'
assert 'data_type' in data, 'Required keyword "data_type" is missing!'
if cls._type_enumeration is None:
cls._type_enumeration = _DataTypeEnumeration(import_modules=False)
if data['data_type'] == 'GenericType':
assert 'base_unit' in data, \
'Keyword "base_unit" is missing and is required for GenericType.'
return cls._type_enumeration._GENERICTYPE(data['name'], data['base_unit'])
elif data['data_type'] in cls._type_enumeration._TYPES:
clss = cls._type_enumeration._TYPES[data['data_type']]
if data['data_type'] == data['name'].title().replace(' ', ''):
return clss()
else:
instance = clss()
instance._name = data['name']
return instance
else:
raise ValueError(
'Data Type {} could not be recognized'.format(data['data_type'])) | [
"def",
"from_json",
"(",
"cls",
",",
"data",
")",
":",
"assert",
"'name'",
"in",
"data",
",",
"'Required keyword \"name\" is missing!'",
"assert",
"'data_type'",
"in",
"data",
",",
"'Required keyword \"data_type\" is missing!'",
"if",
"cls",
".",
"_type_enumeration",
"is",
"None",
":",
"cls",
".",
"_type_enumeration",
"=",
"_DataTypeEnumeration",
"(",
"import_modules",
"=",
"False",
")",
"if",
"data",
"[",
"'data_type'",
"]",
"==",
"'GenericType'",
":",
"assert",
"'base_unit'",
"in",
"data",
",",
"'Keyword \"base_unit\" is missing and is required for GenericType.'",
"return",
"cls",
".",
"_type_enumeration",
".",
"_GENERICTYPE",
"(",
"data",
"[",
"'name'",
"]",
",",
"data",
"[",
"'base_unit'",
"]",
")",
"elif",
"data",
"[",
"'data_type'",
"]",
"in",
"cls",
".",
"_type_enumeration",
".",
"_TYPES",
":",
"clss",
"=",
"cls",
".",
"_type_enumeration",
".",
"_TYPES",
"[",
"data",
"[",
"'data_type'",
"]",
"]",
"if",
"data",
"[",
"'data_type'",
"]",
"==",
"data",
"[",
"'name'",
"]",
".",
"title",
"(",
")",
".",
"replace",
"(",
"' '",
",",
"''",
")",
":",
"return",
"clss",
"(",
")",
"else",
":",
"instance",
"=",
"clss",
"(",
")",
"instance",
".",
"_name",
"=",
"data",
"[",
"'name'",
"]",
"return",
"instance",
"else",
":",
"raise",
"ValueError",
"(",
"'Data Type {} could not be recognized'",
".",
"format",
"(",
"data",
"[",
"'data_type'",
"]",
")",
")"
] | Create a data type from a dictionary.
Args:
data: Data as a dictionary.
{
"name": data type name of the data type as a string
"data_type": the class name of the data type as a string
"base_unit": the base unit of the data type
} | [
"Create",
"a",
"data",
"type",
"from",
"a",
"dictionary",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datatype/base.py#L70-L100 |
4,583 | ladybug-tools/ladybug | ladybug/datatype/base.py | DataTypeBase.is_unit_acceptable | def is_unit_acceptable(self, unit, raise_exception=True):
"""Check if a certain unit is acceptable for the data type.
Args:
unit: A text string representing the abbreviated unit.
raise_exception: Set to True to raise an exception if not acceptable.
"""
_is_acceptable = unit in self.units
if _is_acceptable or raise_exception is False:
return _is_acceptable
else:
raise ValueError(
'{0} is not an acceptable unit type for {1}. '
'Choose from the following: {2}'.format(
unit, self.__class__.__name__, self.units
)
) | python | def is_unit_acceptable(self, unit, raise_exception=True):
"""Check if a certain unit is acceptable for the data type.
Args:
unit: A text string representing the abbreviated unit.
raise_exception: Set to True to raise an exception if not acceptable.
"""
_is_acceptable = unit in self.units
if _is_acceptable or raise_exception is False:
return _is_acceptable
else:
raise ValueError(
'{0} is not an acceptable unit type for {1}. '
'Choose from the following: {2}'.format(
unit, self.__class__.__name__, self.units
)
) | [
"def",
"is_unit_acceptable",
"(",
"self",
",",
"unit",
",",
"raise_exception",
"=",
"True",
")",
":",
"_is_acceptable",
"=",
"unit",
"in",
"self",
".",
"units",
"if",
"_is_acceptable",
"or",
"raise_exception",
"is",
"False",
":",
"return",
"_is_acceptable",
"else",
":",
"raise",
"ValueError",
"(",
"'{0} is not an acceptable unit type for {1}. '",
"'Choose from the following: {2}'",
".",
"format",
"(",
"unit",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"units",
")",
")"
] | Check if a certain unit is acceptable for the data type.
Args:
unit: A text string representing the abbreviated unit.
raise_exception: Set to True to raise an exception if not acceptable. | [
"Check",
"if",
"a",
"certain",
"unit",
"is",
"acceptable",
"for",
"the",
"data",
"type",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datatype/base.py#L102-L119 |
4,584 | ladybug-tools/ladybug | ladybug/datatype/base.py | DataTypeBase._is_numeric | def _is_numeric(self, values):
"""Check to be sure values are numbers before doing numerical operations."""
if len(values) > 0:
assert isinstance(values[0], (float, int)), \
"values must be numbers to perform math operations. Got {}".format(
type(values[0]))
return True | python | def _is_numeric(self, values):
"""Check to be sure values are numbers before doing numerical operations."""
if len(values) > 0:
assert isinstance(values[0], (float, int)), \
"values must be numbers to perform math operations. Got {}".format(
type(values[0]))
return True | [
"def",
"_is_numeric",
"(",
"self",
",",
"values",
")",
":",
"if",
"len",
"(",
"values",
")",
">",
"0",
":",
"assert",
"isinstance",
"(",
"values",
"[",
"0",
"]",
",",
"(",
"float",
",",
"int",
")",
")",
",",
"\"values must be numbers to perform math operations. Got {}\"",
".",
"format",
"(",
"type",
"(",
"values",
"[",
"0",
"]",
")",
")",
"return",
"True"
] | Check to be sure values are numbers before doing numerical operations. | [
"Check",
"to",
"be",
"sure",
"values",
"are",
"numbers",
"before",
"doing",
"numerical",
"operations",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datatype/base.py#L188-L194 |
4,585 | ladybug-tools/ladybug | ladybug/datatype/base.py | DataTypeBase._to_unit_base | def _to_unit_base(self, base_unit, values, unit, from_unit):
"""Return values in a given unit given the input from_unit."""
self._is_numeric(values)
namespace = {'self': self, 'values': values}
if not from_unit == base_unit:
self.is_unit_acceptable(from_unit, True)
statement = '[self._{}_to_{}(val) for val in values]'.format(
self._clean(from_unit), self._clean(base_unit))
values = eval(statement, namespace)
namespace['values'] = values
if not unit == base_unit:
self.is_unit_acceptable(unit, True)
statement = '[self._{}_to_{}(val) for val in values]'.format(
self._clean(base_unit), self._clean(unit))
values = eval(statement, namespace)
return values | python | def _to_unit_base(self, base_unit, values, unit, from_unit):
"""Return values in a given unit given the input from_unit."""
self._is_numeric(values)
namespace = {'self': self, 'values': values}
if not from_unit == base_unit:
self.is_unit_acceptable(from_unit, True)
statement = '[self._{}_to_{}(val) for val in values]'.format(
self._clean(from_unit), self._clean(base_unit))
values = eval(statement, namespace)
namespace['values'] = values
if not unit == base_unit:
self.is_unit_acceptable(unit, True)
statement = '[self._{}_to_{}(val) for val in values]'.format(
self._clean(base_unit), self._clean(unit))
values = eval(statement, namespace)
return values | [
"def",
"_to_unit_base",
"(",
"self",
",",
"base_unit",
",",
"values",
",",
"unit",
",",
"from_unit",
")",
":",
"self",
".",
"_is_numeric",
"(",
"values",
")",
"namespace",
"=",
"{",
"'self'",
":",
"self",
",",
"'values'",
":",
"values",
"}",
"if",
"not",
"from_unit",
"==",
"base_unit",
":",
"self",
".",
"is_unit_acceptable",
"(",
"from_unit",
",",
"True",
")",
"statement",
"=",
"'[self._{}_to_{}(val) for val in values]'",
".",
"format",
"(",
"self",
".",
"_clean",
"(",
"from_unit",
")",
",",
"self",
".",
"_clean",
"(",
"base_unit",
")",
")",
"values",
"=",
"eval",
"(",
"statement",
",",
"namespace",
")",
"namespace",
"[",
"'values'",
"]",
"=",
"values",
"if",
"not",
"unit",
"==",
"base_unit",
":",
"self",
".",
"is_unit_acceptable",
"(",
"unit",
",",
"True",
")",
"statement",
"=",
"'[self._{}_to_{}(val) for val in values]'",
".",
"format",
"(",
"self",
".",
"_clean",
"(",
"base_unit",
")",
",",
"self",
".",
"_clean",
"(",
"unit",
")",
")",
"values",
"=",
"eval",
"(",
"statement",
",",
"namespace",
")",
"return",
"values"
] | Return values in a given unit given the input from_unit. | [
"Return",
"values",
"in",
"a",
"given",
"unit",
"given",
"the",
"input",
"from_unit",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datatype/base.py#L196-L211 |
4,586 | ladybug-tools/ladybug | ladybug/datatype/base.py | DataTypeBase.name | def name(self):
"""The data type name."""
if self._name is None:
return re.sub(r"(?<=\w)([A-Z])", r" \1", self.__class__.__name__)
else:
return self._name | python | def name(self):
"""The data type name."""
if self._name is None:
return re.sub(r"(?<=\w)([A-Z])", r" \1", self.__class__.__name__)
else:
return self._name | [
"def",
"name",
"(",
"self",
")",
":",
"if",
"self",
".",
"_name",
"is",
"None",
":",
"return",
"re",
".",
"sub",
"(",
"r\"(?<=\\w)([A-Z])\"",
",",
"r\" \\1\"",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
"else",
":",
"return",
"self",
".",
"_name"
] | The data type name. | [
"The",
"data",
"type",
"name",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datatype/base.py#L222-L227 |
4,587 | ladybug-tools/ladybug | ladybug/header.py | Header.from_json | def from_json(cls, data):
"""Create a header from a dictionary.
Args:
data: {
"data_type": {}, //Type of data (e.g. Temperature)
"unit": string,
"analysis_period": {} // A Ladybug AnalysisPeriod
"metadata": {}, // A dictionary of metadata
}
"""
# assign default values
assert 'data_type' in data, 'Required keyword "data_type" is missing!'
keys = ('data_type', 'unit', 'analysis_period', 'metadata')
for key in keys:
if key not in data:
data[key] = None
data_type = DataTypeBase.from_json(data['data_type'])
ap = AnalysisPeriod.from_json(data['analysis_period'])
return cls(data_type, data['unit'], ap, data['metadata']) | python | def from_json(cls, data):
"""Create a header from a dictionary.
Args:
data: {
"data_type": {}, //Type of data (e.g. Temperature)
"unit": string,
"analysis_period": {} // A Ladybug AnalysisPeriod
"metadata": {}, // A dictionary of metadata
}
"""
# assign default values
assert 'data_type' in data, 'Required keyword "data_type" is missing!'
keys = ('data_type', 'unit', 'analysis_period', 'metadata')
for key in keys:
if key not in data:
data[key] = None
data_type = DataTypeBase.from_json(data['data_type'])
ap = AnalysisPeriod.from_json(data['analysis_period'])
return cls(data_type, data['unit'], ap, data['metadata']) | [
"def",
"from_json",
"(",
"cls",
",",
"data",
")",
":",
"# assign default values",
"assert",
"'data_type'",
"in",
"data",
",",
"'Required keyword \"data_type\" is missing!'",
"keys",
"=",
"(",
"'data_type'",
",",
"'unit'",
",",
"'analysis_period'",
",",
"'metadata'",
")",
"for",
"key",
"in",
"keys",
":",
"if",
"key",
"not",
"in",
"data",
":",
"data",
"[",
"key",
"]",
"=",
"None",
"data_type",
"=",
"DataTypeBase",
".",
"from_json",
"(",
"data",
"[",
"'data_type'",
"]",
")",
"ap",
"=",
"AnalysisPeriod",
".",
"from_json",
"(",
"data",
"[",
"'analysis_period'",
"]",
")",
"return",
"cls",
"(",
"data_type",
",",
"data",
"[",
"'unit'",
"]",
",",
"ap",
",",
"data",
"[",
"'metadata'",
"]",
")"
] | Create a header from a dictionary.
Args:
data: {
"data_type": {}, //Type of data (e.g. Temperature)
"unit": string,
"analysis_period": {} // A Ladybug AnalysisPeriod
"metadata": {}, // A dictionary of metadata
} | [
"Create",
"a",
"header",
"from",
"a",
"dictionary",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/header.py#L58-L78 |
4,588 | ladybug-tools/ladybug | ladybug/header.py | Header.duplicate | def duplicate(self):
"""Return a copy of the header."""
a_per = self.analysis_period.duplicate() if self.analysis_period else None
return self.__class__(self.data_type, self.unit,
a_per, deepcopy(self.metadata)) | python | def duplicate(self):
"""Return a copy of the header."""
a_per = self.analysis_period.duplicate() if self.analysis_period else None
return self.__class__(self.data_type, self.unit,
a_per, deepcopy(self.metadata)) | [
"def",
"duplicate",
"(",
"self",
")",
":",
"a_per",
"=",
"self",
".",
"analysis_period",
".",
"duplicate",
"(",
")",
"if",
"self",
".",
"analysis_period",
"else",
"None",
"return",
"self",
".",
"__class__",
"(",
"self",
".",
"data_type",
",",
"self",
".",
"unit",
",",
"a_per",
",",
"deepcopy",
"(",
"self",
".",
"metadata",
")",
")"
] | Return a copy of the header. | [
"Return",
"a",
"copy",
"of",
"the",
"header",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/header.py#L105-L109 |
4,589 | ladybug-tools/ladybug | ladybug/header.py | Header.to_tuple | def to_tuple(self):
"""Return Ladybug header as a list."""
return (
self.data_type,
self.unit,
self.analysis_period,
self.metadata
) | python | def to_tuple(self):
"""Return Ladybug header as a list."""
return (
self.data_type,
self.unit,
self.analysis_period,
self.metadata
) | [
"def",
"to_tuple",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"data_type",
",",
"self",
".",
"unit",
",",
"self",
".",
"analysis_period",
",",
"self",
".",
"metadata",
")"
] | Return Ladybug header as a list. | [
"Return",
"Ladybug",
"header",
"as",
"a",
"list",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/header.py#L111-L118 |
4,590 | ladybug-tools/ladybug | ladybug/header.py | Header.to_json | def to_json(self):
"""Return a header as a dictionary."""
a_per = self.analysis_period.to_json() if self.analysis_period else None
return {'data_type': self.data_type.to_json(),
'unit': self.unit,
'analysis_period': a_per,
'metadata': self.metadata} | python | def to_json(self):
"""Return a header as a dictionary."""
a_per = self.analysis_period.to_json() if self.analysis_period else None
return {'data_type': self.data_type.to_json(),
'unit': self.unit,
'analysis_period': a_per,
'metadata': self.metadata} | [
"def",
"to_json",
"(",
"self",
")",
":",
"a_per",
"=",
"self",
".",
"analysis_period",
".",
"to_json",
"(",
")",
"if",
"self",
".",
"analysis_period",
"else",
"None",
"return",
"{",
"'data_type'",
":",
"self",
".",
"data_type",
".",
"to_json",
"(",
")",
",",
"'unit'",
":",
"self",
".",
"unit",
",",
"'analysis_period'",
":",
"a_per",
",",
"'metadata'",
":",
"self",
".",
"metadata",
"}"
] | Return a header as a dictionary. | [
"Return",
"a",
"header",
"as",
"a",
"dictionary",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/header.py#L124-L130 |
4,591 | ladybug-tools/ladybug | ladybug/skymodel.py | ashrae_clear_sky | def ashrae_clear_sky(altitudes, month, sky_clearness=1):
"""Calculate solar flux for an original ASHRAE Clear Sky
Args:
altitudes: A list of solar altitudes in degrees
month: An integer (1-12) indicating the month the altitudes belong to
sky_clearness: A factor that will be multiplied by the output of
the model. This is to help account for locations where clear,
dry skies predominate (e.g., at high elevations) or,
conversely, where hazy and humid conditions are frequent. See
Threlkeld and Jordan (1958) for recommended values. Typical
values range from 0.95 to 1.05 and are usually never more
than 1.2. Default is set to 1.0.
Returns:
dir_norm_rad: A list of direct normal radiation values for each
of the connected altitudes in W/m2.
dif_horiz_rad: A list of diffuse horizontall radiation values for each
of the connected altitudes in W/m2.
"""
# apparent solar irradiation at air mass m = 0
MONTHLY_A = [1202, 1187, 1164, 1130, 1106, 1092, 1093, 1107, 1136,
1166, 1190, 1204]
# atmospheric extinction coefficient
MONTHLY_B = [0.141, 0.142, 0.149, 0.164, 0.177, 0.185, 0.186, 0.182,
0.165, 0.152, 0.144, 0.141]
dir_norm_rad = []
dif_horiz_rad = []
for i, alt in enumerate(altitudes):
if alt > 0:
try:
dir_norm = MONTHLY_A[month - 1] / (math.exp(
MONTHLY_B[month - 1] / (math.sin(math.radians(alt)))))
diff_horiz = 0.17 * dir_norm * math.sin(math.radians(alt))
dir_norm_rad.append(dir_norm * sky_clearness)
dif_horiz_rad.append(diff_horiz * sky_clearness)
except OverflowError:
# very small altitude values
dir_norm_rad.append(0)
dif_horiz_rad.append(0)
else:
# night time
dir_norm_rad.append(0)
dif_horiz_rad.append(0)
return dir_norm_rad, dif_horiz_rad | python | def ashrae_clear_sky(altitudes, month, sky_clearness=1):
"""Calculate solar flux for an original ASHRAE Clear Sky
Args:
altitudes: A list of solar altitudes in degrees
month: An integer (1-12) indicating the month the altitudes belong to
sky_clearness: A factor that will be multiplied by the output of
the model. This is to help account for locations where clear,
dry skies predominate (e.g., at high elevations) or,
conversely, where hazy and humid conditions are frequent. See
Threlkeld and Jordan (1958) for recommended values. Typical
values range from 0.95 to 1.05 and are usually never more
than 1.2. Default is set to 1.0.
Returns:
dir_norm_rad: A list of direct normal radiation values for each
of the connected altitudes in W/m2.
dif_horiz_rad: A list of diffuse horizontall radiation values for each
of the connected altitudes in W/m2.
"""
# apparent solar irradiation at air mass m = 0
MONTHLY_A = [1202, 1187, 1164, 1130, 1106, 1092, 1093, 1107, 1136,
1166, 1190, 1204]
# atmospheric extinction coefficient
MONTHLY_B = [0.141, 0.142, 0.149, 0.164, 0.177, 0.185, 0.186, 0.182,
0.165, 0.152, 0.144, 0.141]
dir_norm_rad = []
dif_horiz_rad = []
for i, alt in enumerate(altitudes):
if alt > 0:
try:
dir_norm = MONTHLY_A[month - 1] / (math.exp(
MONTHLY_B[month - 1] / (math.sin(math.radians(alt)))))
diff_horiz = 0.17 * dir_norm * math.sin(math.radians(alt))
dir_norm_rad.append(dir_norm * sky_clearness)
dif_horiz_rad.append(diff_horiz * sky_clearness)
except OverflowError:
# very small altitude values
dir_norm_rad.append(0)
dif_horiz_rad.append(0)
else:
# night time
dir_norm_rad.append(0)
dif_horiz_rad.append(0)
return dir_norm_rad, dif_horiz_rad | [
"def",
"ashrae_clear_sky",
"(",
"altitudes",
",",
"month",
",",
"sky_clearness",
"=",
"1",
")",
":",
"# apparent solar irradiation at air mass m = 0",
"MONTHLY_A",
"=",
"[",
"1202",
",",
"1187",
",",
"1164",
",",
"1130",
",",
"1106",
",",
"1092",
",",
"1093",
",",
"1107",
",",
"1136",
",",
"1166",
",",
"1190",
",",
"1204",
"]",
"# atmospheric extinction coefficient",
"MONTHLY_B",
"=",
"[",
"0.141",
",",
"0.142",
",",
"0.149",
",",
"0.164",
",",
"0.177",
",",
"0.185",
",",
"0.186",
",",
"0.182",
",",
"0.165",
",",
"0.152",
",",
"0.144",
",",
"0.141",
"]",
"dir_norm_rad",
"=",
"[",
"]",
"dif_horiz_rad",
"=",
"[",
"]",
"for",
"i",
",",
"alt",
"in",
"enumerate",
"(",
"altitudes",
")",
":",
"if",
"alt",
">",
"0",
":",
"try",
":",
"dir_norm",
"=",
"MONTHLY_A",
"[",
"month",
"-",
"1",
"]",
"/",
"(",
"math",
".",
"exp",
"(",
"MONTHLY_B",
"[",
"month",
"-",
"1",
"]",
"/",
"(",
"math",
".",
"sin",
"(",
"math",
".",
"radians",
"(",
"alt",
")",
")",
")",
")",
")",
"diff_horiz",
"=",
"0.17",
"*",
"dir_norm",
"*",
"math",
".",
"sin",
"(",
"math",
".",
"radians",
"(",
"alt",
")",
")",
"dir_norm_rad",
".",
"append",
"(",
"dir_norm",
"*",
"sky_clearness",
")",
"dif_horiz_rad",
".",
"append",
"(",
"diff_horiz",
"*",
"sky_clearness",
")",
"except",
"OverflowError",
":",
"# very small altitude values",
"dir_norm_rad",
".",
"append",
"(",
"0",
")",
"dif_horiz_rad",
".",
"append",
"(",
"0",
")",
"else",
":",
"# night time",
"dir_norm_rad",
".",
"append",
"(",
"0",
")",
"dif_horiz_rad",
".",
"append",
"(",
"0",
")",
"return",
"dir_norm_rad",
",",
"dif_horiz_rad"
] | Calculate solar flux for an original ASHRAE Clear Sky
Args:
altitudes: A list of solar altitudes in degrees
month: An integer (1-12) indicating the month the altitudes belong to
sky_clearness: A factor that will be multiplied by the output of
the model. This is to help account for locations where clear,
dry skies predominate (e.g., at high elevations) or,
conversely, where hazy and humid conditions are frequent. See
Threlkeld and Jordan (1958) for recommended values. Typical
values range from 0.95 to 1.05 and are usually never more
than 1.2. Default is set to 1.0.
Returns:
dir_norm_rad: A list of direct normal radiation values for each
of the connected altitudes in W/m2.
dif_horiz_rad: A list of diffuse horizontall radiation values for each
of the connected altitudes in W/m2. | [
"Calculate",
"solar",
"flux",
"for",
"an",
"original",
"ASHRAE",
"Clear",
"Sky"
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/skymodel.py#L11-L57 |
4,592 | ladybug-tools/ladybug | ladybug/skymodel.py | zhang_huang_solar | def zhang_huang_solar(alt, cloud_cover, relative_humidity,
dry_bulb_present, dry_bulb_t3_hrs, wind_speed,
irr_0=1355):
"""Calculate global horizontal solar irradiance using the Zhang-Huang model.
Note:
[1] Zhang, Q.Y. and Huang, Y.J. 2002. "Development of typical year weather files
for Chinese locations", LBNL-51436, ASHRAE Transactions, Vol. 108, Part 2.
Args:
alt: A solar altitude in degrees.
cloud_cover: A float value between 0 and 10 that represents the sky cloud cover
in tenths (0 = clear; 10 = completely overcast)
relative_humidity: A float value between 0 and 100 that represents
the relative humidity in percent.
dry_bulb_present: A float value that represents the dry bulb
temperature at the time of interest (in degrees C).
dry_bulb_t3_hrs: A float value that represents the dry bulb
temperature at three hours before the time of interest (in degrees C).
wind_speed: A float value that represents the wind speed in m/s.
irr_0 = Optional extraterrestrial solar constant (W/m2).
Default is to use the average value over the earth's orbit (1355).
Returns:
glob_ir: A global horizontall radiation value in W/m2.
"""
# zhang-huang solar model regression constants
C0, C1, C2, C3, C4, C5, D_COEFF, K_COEFF = 0.5598, 0.4982, \
-0.6762, 0.02842, -0.00317, 0.014, -17.853, 0.843
# start assuming night time
glob_ir = 0
if alt > 0:
# get sin of the altitude
sin_alt = math.sin(math.radians(alt))
# shortened and converted versions of the input parameters
cc, rh, n_temp, n3_temp, w_spd = cloud_cover / 10.0, \
relative_humidity, dry_bulb_present, dry_bulb_t3_hrs, wind_speed
# calculate zhang-huang global radiation
glob_ir = ((irr_0 * sin_alt *
(C0 + (C1 * cc) + (C2 * cc**2) +
(C3 * (n_temp - n3_temp)) +
(C4 * rh) + (C5 * w_spd))) + D_COEFF) / K_COEFF
if glob_ir < 0:
glob_ir = 0
return glob_ir | python | def zhang_huang_solar(alt, cloud_cover, relative_humidity,
dry_bulb_present, dry_bulb_t3_hrs, wind_speed,
irr_0=1355):
"""Calculate global horizontal solar irradiance using the Zhang-Huang model.
Note:
[1] Zhang, Q.Y. and Huang, Y.J. 2002. "Development of typical year weather files
for Chinese locations", LBNL-51436, ASHRAE Transactions, Vol. 108, Part 2.
Args:
alt: A solar altitude in degrees.
cloud_cover: A float value between 0 and 10 that represents the sky cloud cover
in tenths (0 = clear; 10 = completely overcast)
relative_humidity: A float value between 0 and 100 that represents
the relative humidity in percent.
dry_bulb_present: A float value that represents the dry bulb
temperature at the time of interest (in degrees C).
dry_bulb_t3_hrs: A float value that represents the dry bulb
temperature at three hours before the time of interest (in degrees C).
wind_speed: A float value that represents the wind speed in m/s.
irr_0 = Optional extraterrestrial solar constant (W/m2).
Default is to use the average value over the earth's orbit (1355).
Returns:
glob_ir: A global horizontall radiation value in W/m2.
"""
# zhang-huang solar model regression constants
C0, C1, C2, C3, C4, C5, D_COEFF, K_COEFF = 0.5598, 0.4982, \
-0.6762, 0.02842, -0.00317, 0.014, -17.853, 0.843
# start assuming night time
glob_ir = 0
if alt > 0:
# get sin of the altitude
sin_alt = math.sin(math.radians(alt))
# shortened and converted versions of the input parameters
cc, rh, n_temp, n3_temp, w_spd = cloud_cover / 10.0, \
relative_humidity, dry_bulb_present, dry_bulb_t3_hrs, wind_speed
# calculate zhang-huang global radiation
glob_ir = ((irr_0 * sin_alt *
(C0 + (C1 * cc) + (C2 * cc**2) +
(C3 * (n_temp - n3_temp)) +
(C4 * rh) + (C5 * w_spd))) + D_COEFF) / K_COEFF
if glob_ir < 0:
glob_ir = 0
return glob_ir | [
"def",
"zhang_huang_solar",
"(",
"alt",
",",
"cloud_cover",
",",
"relative_humidity",
",",
"dry_bulb_present",
",",
"dry_bulb_t3_hrs",
",",
"wind_speed",
",",
"irr_0",
"=",
"1355",
")",
":",
"# zhang-huang solar model regression constants",
"C0",
",",
"C1",
",",
"C2",
",",
"C3",
",",
"C4",
",",
"C5",
",",
"D_COEFF",
",",
"K_COEFF",
"=",
"0.5598",
",",
"0.4982",
",",
"-",
"0.6762",
",",
"0.02842",
",",
"-",
"0.00317",
",",
"0.014",
",",
"-",
"17.853",
",",
"0.843",
"# start assuming night time",
"glob_ir",
"=",
"0",
"if",
"alt",
">",
"0",
":",
"# get sin of the altitude",
"sin_alt",
"=",
"math",
".",
"sin",
"(",
"math",
".",
"radians",
"(",
"alt",
")",
")",
"# shortened and converted versions of the input parameters",
"cc",
",",
"rh",
",",
"n_temp",
",",
"n3_temp",
",",
"w_spd",
"=",
"cloud_cover",
"/",
"10.0",
",",
"relative_humidity",
",",
"dry_bulb_present",
",",
"dry_bulb_t3_hrs",
",",
"wind_speed",
"# calculate zhang-huang global radiation",
"glob_ir",
"=",
"(",
"(",
"irr_0",
"*",
"sin_alt",
"*",
"(",
"C0",
"+",
"(",
"C1",
"*",
"cc",
")",
"+",
"(",
"C2",
"*",
"cc",
"**",
"2",
")",
"+",
"(",
"C3",
"*",
"(",
"n_temp",
"-",
"n3_temp",
")",
")",
"+",
"(",
"C4",
"*",
"rh",
")",
"+",
"(",
"C5",
"*",
"w_spd",
")",
")",
")",
"+",
"D_COEFF",
")",
"/",
"K_COEFF",
"if",
"glob_ir",
"<",
"0",
":",
"glob_ir",
"=",
"0",
"return",
"glob_ir"
] | Calculate global horizontal solar irradiance using the Zhang-Huang model.
Note:
[1] Zhang, Q.Y. and Huang, Y.J. 2002. "Development of typical year weather files
for Chinese locations", LBNL-51436, ASHRAE Transactions, Vol. 108, Part 2.
Args:
alt: A solar altitude in degrees.
cloud_cover: A float value between 0 and 10 that represents the sky cloud cover
in tenths (0 = clear; 10 = completely overcast)
relative_humidity: A float value between 0 and 100 that represents
the relative humidity in percent.
dry_bulb_present: A float value that represents the dry bulb
temperature at the time of interest (in degrees C).
dry_bulb_t3_hrs: A float value that represents the dry bulb
temperature at three hours before the time of interest (in degrees C).
wind_speed: A float value that represents the wind speed in m/s.
irr_0 = Optional extraterrestrial solar constant (W/m2).
Default is to use the average value over the earth's orbit (1355).
Returns:
glob_ir: A global horizontall radiation value in W/m2. | [
"Calculate",
"global",
"horizontal",
"solar",
"irradiance",
"using",
"the",
"Zhang",
"-",
"Huang",
"model",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/skymodel.py#L112-L161 |
4,593 | ladybug-tools/ladybug | ladybug/skymodel.py | zhang_huang_solar_split | def zhang_huang_solar_split(altitudes, doys, cloud_cover, relative_humidity,
dry_bulb_present, dry_bulb_t3_hrs, wind_speed,
atm_pressure, use_disc=False):
"""Calculate direct and diffuse solar irradiance using the Zhang-Huang model.
By default, this function uses the DIRINT method (aka. Perez split) to split global
irradiance into direct and diffuse. This is the same method used by EnergyPlus.
Args:
altitudes: A list of solar altitudes in degrees.
doys: A list of days of the year that correspond to the altitudes.
cloud_cover: A list of float values between 0 and 10 that represents cloud cover
in tenths (0 = clear; 10 = completely overcast)
relative_humidity: A list of float values between 0 and 100 that represents
the relative humidity in percent.
dry_bulb_present: A list of float values that represents the dry bulb
temperature at the time of interest (in degrees C).
dry_bulb_t3_hrs: A list of float values that represents the dry bulb
temperature at three hours before the time of interest (in degrees C).
wind_speed: A list of float values that represents the wind speed in m/s.
atm_pressure: A list of float values that represent the
atmospheric pressure in Pa.
use_disc: Set to True to use the original DISC model as opposed to the
newer and more accurate DIRINT model. Default is False.
Returns:
dir_norm_rad: A list of direct normal radiation values for each
of the connected altitudes in W/m2.
dif_horiz_rad: A list of diffuse horizontall radiation values for each
of the connected altitudes in W/m2.
"""
# Calculate global horizontal irradiance using the original zhang-huang model
glob_ir = []
for i in range(len(altitudes)):
ghi = zhang_huang_solar(altitudes[i], cloud_cover[i], relative_humidity[i],
dry_bulb_present[i], dry_bulb_t3_hrs[i], wind_speed[i])
glob_ir.append(ghi)
if use_disc is False:
# Calculate dew point temperature to improve the splitting of direct + diffuse
temp_dew = [dew_point_from_db_rh(dry_bulb_present[i], relative_humidity[i])
for i in range(len(glob_ir))]
# Split global rad into direct + diffuse using dirint method (aka. Perez split)
dir_norm_rad = dirint(glob_ir, altitudes, doys, atm_pressure,
use_delta_kt_prime=True, temp_dew=temp_dew)
# Calculate diffuse horizontal from dni and ghi.
dif_horiz_rad = [glob_ir[i] -
(dir_norm_rad[i] * math.sin(math.radians(altitudes[i])))
for i in range(len(glob_ir))]
else:
dir_norm_rad = []
dif_horiz_rad = []
for i in range(len(glob_ir)):
dni, kt, am = disc(glob_ir[i], altitudes[i], doys[i], atm_pressure[i])
dhi = glob_ir[i] - (dni * math.sin(math.radians(altitudes[i])))
dir_norm_rad.append(dni)
dif_horiz_rad.append(dhi)
return dir_norm_rad, dif_horiz_rad | python | def zhang_huang_solar_split(altitudes, doys, cloud_cover, relative_humidity,
dry_bulb_present, dry_bulb_t3_hrs, wind_speed,
atm_pressure, use_disc=False):
"""Calculate direct and diffuse solar irradiance using the Zhang-Huang model.
By default, this function uses the DIRINT method (aka. Perez split) to split global
irradiance into direct and diffuse. This is the same method used by EnergyPlus.
Args:
altitudes: A list of solar altitudes in degrees.
doys: A list of days of the year that correspond to the altitudes.
cloud_cover: A list of float values between 0 and 10 that represents cloud cover
in tenths (0 = clear; 10 = completely overcast)
relative_humidity: A list of float values between 0 and 100 that represents
the relative humidity in percent.
dry_bulb_present: A list of float values that represents the dry bulb
temperature at the time of interest (in degrees C).
dry_bulb_t3_hrs: A list of float values that represents the dry bulb
temperature at three hours before the time of interest (in degrees C).
wind_speed: A list of float values that represents the wind speed in m/s.
atm_pressure: A list of float values that represent the
atmospheric pressure in Pa.
use_disc: Set to True to use the original DISC model as opposed to the
newer and more accurate DIRINT model. Default is False.
Returns:
dir_norm_rad: A list of direct normal radiation values for each
of the connected altitudes in W/m2.
dif_horiz_rad: A list of diffuse horizontall radiation values for each
of the connected altitudes in W/m2.
"""
# Calculate global horizontal irradiance using the original zhang-huang model
glob_ir = []
for i in range(len(altitudes)):
ghi = zhang_huang_solar(altitudes[i], cloud_cover[i], relative_humidity[i],
dry_bulb_present[i], dry_bulb_t3_hrs[i], wind_speed[i])
glob_ir.append(ghi)
if use_disc is False:
# Calculate dew point temperature to improve the splitting of direct + diffuse
temp_dew = [dew_point_from_db_rh(dry_bulb_present[i], relative_humidity[i])
for i in range(len(glob_ir))]
# Split global rad into direct + diffuse using dirint method (aka. Perez split)
dir_norm_rad = dirint(glob_ir, altitudes, doys, atm_pressure,
use_delta_kt_prime=True, temp_dew=temp_dew)
# Calculate diffuse horizontal from dni and ghi.
dif_horiz_rad = [glob_ir[i] -
(dir_norm_rad[i] * math.sin(math.radians(altitudes[i])))
for i in range(len(glob_ir))]
else:
dir_norm_rad = []
dif_horiz_rad = []
for i in range(len(glob_ir)):
dni, kt, am = disc(glob_ir[i], altitudes[i], doys[i], atm_pressure[i])
dhi = glob_ir[i] - (dni * math.sin(math.radians(altitudes[i])))
dir_norm_rad.append(dni)
dif_horiz_rad.append(dhi)
return dir_norm_rad, dif_horiz_rad | [
"def",
"zhang_huang_solar_split",
"(",
"altitudes",
",",
"doys",
",",
"cloud_cover",
",",
"relative_humidity",
",",
"dry_bulb_present",
",",
"dry_bulb_t3_hrs",
",",
"wind_speed",
",",
"atm_pressure",
",",
"use_disc",
"=",
"False",
")",
":",
"# Calculate global horizontal irradiance using the original zhang-huang model",
"glob_ir",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"altitudes",
")",
")",
":",
"ghi",
"=",
"zhang_huang_solar",
"(",
"altitudes",
"[",
"i",
"]",
",",
"cloud_cover",
"[",
"i",
"]",
",",
"relative_humidity",
"[",
"i",
"]",
",",
"dry_bulb_present",
"[",
"i",
"]",
",",
"dry_bulb_t3_hrs",
"[",
"i",
"]",
",",
"wind_speed",
"[",
"i",
"]",
")",
"glob_ir",
".",
"append",
"(",
"ghi",
")",
"if",
"use_disc",
"is",
"False",
":",
"# Calculate dew point temperature to improve the splitting of direct + diffuse",
"temp_dew",
"=",
"[",
"dew_point_from_db_rh",
"(",
"dry_bulb_present",
"[",
"i",
"]",
",",
"relative_humidity",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"glob_ir",
")",
")",
"]",
"# Split global rad into direct + diffuse using dirint method (aka. Perez split)",
"dir_norm_rad",
"=",
"dirint",
"(",
"glob_ir",
",",
"altitudes",
",",
"doys",
",",
"atm_pressure",
",",
"use_delta_kt_prime",
"=",
"True",
",",
"temp_dew",
"=",
"temp_dew",
")",
"# Calculate diffuse horizontal from dni and ghi.",
"dif_horiz_rad",
"=",
"[",
"glob_ir",
"[",
"i",
"]",
"-",
"(",
"dir_norm_rad",
"[",
"i",
"]",
"*",
"math",
".",
"sin",
"(",
"math",
".",
"radians",
"(",
"altitudes",
"[",
"i",
"]",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"glob_ir",
")",
")",
"]",
"else",
":",
"dir_norm_rad",
"=",
"[",
"]",
"dif_horiz_rad",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"glob_ir",
")",
")",
":",
"dni",
",",
"kt",
",",
"am",
"=",
"disc",
"(",
"glob_ir",
"[",
"i",
"]",
",",
"altitudes",
"[",
"i",
"]",
",",
"doys",
"[",
"i",
"]",
",",
"atm_pressure",
"[",
"i",
"]",
")",
"dhi",
"=",
"glob_ir",
"[",
"i",
"]",
"-",
"(",
"dni",
"*",
"math",
".",
"sin",
"(",
"math",
".",
"radians",
"(",
"altitudes",
"[",
"i",
"]",
")",
")",
")",
"dir_norm_rad",
".",
"append",
"(",
"dni",
")",
"dif_horiz_rad",
".",
"append",
"(",
"dhi",
")",
"return",
"dir_norm_rad",
",",
"dif_horiz_rad"
] | Calculate direct and diffuse solar irradiance using the Zhang-Huang model.
By default, this function uses the DIRINT method (aka. Perez split) to split global
irradiance into direct and diffuse. This is the same method used by EnergyPlus.
Args:
altitudes: A list of solar altitudes in degrees.
doys: A list of days of the year that correspond to the altitudes.
cloud_cover: A list of float values between 0 and 10 that represents cloud cover
in tenths (0 = clear; 10 = completely overcast)
relative_humidity: A list of float values between 0 and 100 that represents
the relative humidity in percent.
dry_bulb_present: A list of float values that represents the dry bulb
temperature at the time of interest (in degrees C).
dry_bulb_t3_hrs: A list of float values that represents the dry bulb
temperature at three hours before the time of interest (in degrees C).
wind_speed: A list of float values that represents the wind speed in m/s.
atm_pressure: A list of float values that represent the
atmospheric pressure in Pa.
use_disc: Set to True to use the original DISC model as opposed to the
newer and more accurate DIRINT model. Default is False.
Returns:
dir_norm_rad: A list of direct normal radiation values for each
of the connected altitudes in W/m2.
dif_horiz_rad: A list of diffuse horizontall radiation values for each
of the connected altitudes in W/m2. | [
"Calculate",
"direct",
"and",
"diffuse",
"solar",
"irradiance",
"using",
"the",
"Zhang",
"-",
"Huang",
"model",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/skymodel.py#L164-L224 |
4,594 | ladybug-tools/ladybug | ladybug/skymodel.py | calc_horizontal_infrared | def calc_horizontal_infrared(sky_cover, dry_bulb, dew_point):
"""Calculate horizontal infrared radiation intensity.
See EnergyPlus Enrineering Reference for more information:
https://bigladdersoftware.com/epx/docs/8-9/engineering-reference/climate-calculations.html#sky-radiation-modeling
Note:
[1] Walton, G. N. 1983. Thermal Analysis Research Program Reference Manual.
NBSSIR 83-2655. National Bureau of Standards, p. 21.
[2] Clark, G. and C. Allen, “The Estimation of Atmospheric Radiation for
Clear and Cloudy Skies,” Proceedings 2nd National Passive Solar Conference
(AS/ISES), 1978, pp. 675-678.
Args:
sky_cover: A float value between 0 and 10 that represents the opaque
sky cover in tenths (0 = clear; 10 = completely overcast)
dry_bulb: A float value that represents the dry bulb temperature
in degrees C.
dew_point: A float value that represents the dew point temperature
in degrees C.
Returns:
horiz_ir: A horizontal infrared radiation intensity value in W/m2.
"""
# stefan-boltzmann constant
SIGMA = 5.6697e-8
# convert to kelvin
db_k = dry_bulb + 273.15
dp_k = dew_point + 273.15
# calculate sky emissivity and horizontal ir
sky_emiss = (0.787 + (0.764 * math.log(dp_k / 273.15))) * \
(1 + (0.022 * sky_cover) - (0.0035 * (sky_cover ** 2)) +
(0.00028 * (sky_cover ** 3)))
horiz_ir = sky_emiss * SIGMA * (db_k ** 4)
return horiz_ir | python | def calc_horizontal_infrared(sky_cover, dry_bulb, dew_point):
"""Calculate horizontal infrared radiation intensity.
See EnergyPlus Enrineering Reference for more information:
https://bigladdersoftware.com/epx/docs/8-9/engineering-reference/climate-calculations.html#sky-radiation-modeling
Note:
[1] Walton, G. N. 1983. Thermal Analysis Research Program Reference Manual.
NBSSIR 83-2655. National Bureau of Standards, p. 21.
[2] Clark, G. and C. Allen, “The Estimation of Atmospheric Radiation for
Clear and Cloudy Skies,” Proceedings 2nd National Passive Solar Conference
(AS/ISES), 1978, pp. 675-678.
Args:
sky_cover: A float value between 0 and 10 that represents the opaque
sky cover in tenths (0 = clear; 10 = completely overcast)
dry_bulb: A float value that represents the dry bulb temperature
in degrees C.
dew_point: A float value that represents the dew point temperature
in degrees C.
Returns:
horiz_ir: A horizontal infrared radiation intensity value in W/m2.
"""
# stefan-boltzmann constant
SIGMA = 5.6697e-8
# convert to kelvin
db_k = dry_bulb + 273.15
dp_k = dew_point + 273.15
# calculate sky emissivity and horizontal ir
sky_emiss = (0.787 + (0.764 * math.log(dp_k / 273.15))) * \
(1 + (0.022 * sky_cover) - (0.0035 * (sky_cover ** 2)) +
(0.00028 * (sky_cover ** 3)))
horiz_ir = sky_emiss * SIGMA * (db_k ** 4)
return horiz_ir | [
"def",
"calc_horizontal_infrared",
"(",
"sky_cover",
",",
"dry_bulb",
",",
"dew_point",
")",
":",
"# stefan-boltzmann constant",
"SIGMA",
"=",
"5.6697e-8",
"# convert to kelvin",
"db_k",
"=",
"dry_bulb",
"+",
"273.15",
"dp_k",
"=",
"dew_point",
"+",
"273.15",
"# calculate sky emissivity and horizontal ir",
"sky_emiss",
"=",
"(",
"0.787",
"+",
"(",
"0.764",
"*",
"math",
".",
"log",
"(",
"dp_k",
"/",
"273.15",
")",
")",
")",
"*",
"(",
"1",
"+",
"(",
"0.022",
"*",
"sky_cover",
")",
"-",
"(",
"0.0035",
"*",
"(",
"sky_cover",
"**",
"2",
")",
")",
"+",
"(",
"0.00028",
"*",
"(",
"sky_cover",
"**",
"3",
")",
")",
")",
"horiz_ir",
"=",
"sky_emiss",
"*",
"SIGMA",
"*",
"(",
"db_k",
"**",
"4",
")",
"return",
"horiz_ir"
] | Calculate horizontal infrared radiation intensity.
See EnergyPlus Enrineering Reference for more information:
https://bigladdersoftware.com/epx/docs/8-9/engineering-reference/climate-calculations.html#sky-radiation-modeling
Note:
[1] Walton, G. N. 1983. Thermal Analysis Research Program Reference Manual.
NBSSIR 83-2655. National Bureau of Standards, p. 21.
[2] Clark, G. and C. Allen, “The Estimation of Atmospheric Radiation for
Clear and Cloudy Skies,” Proceedings 2nd National Passive Solar Conference
(AS/ISES), 1978, pp. 675-678.
Args:
sky_cover: A float value between 0 and 10 that represents the opaque
sky cover in tenths (0 = clear; 10 = completely overcast)
dry_bulb: A float value that represents the dry bulb temperature
in degrees C.
dew_point: A float value that represents the dew point temperature
in degrees C.
Returns:
horiz_ir: A horizontal infrared radiation intensity value in W/m2. | [
"Calculate",
"horizontal",
"infrared",
"radiation",
"intensity",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/skymodel.py#L230-L267 |
4,595 | ladybug-tools/ladybug | ladybug/legendparameters.py | LegendParameters.set_domain | def set_domain(self, values):
"""Set domain of the colors based on min and max of a list of values."""
_flattenedList = sorted(flatten(values))
self.domain = tuple(_flattenedList[0] if d == 'min' else d for d in self.domain)
self.domain = tuple(_flattenedList[-1] if d == 'max' else d for d in self.domain) | python | def set_domain(self, values):
"""Set domain of the colors based on min and max of a list of values."""
_flattenedList = sorted(flatten(values))
self.domain = tuple(_flattenedList[0] if d == 'min' else d for d in self.domain)
self.domain = tuple(_flattenedList[-1] if d == 'max' else d for d in self.domain) | [
"def",
"set_domain",
"(",
"self",
",",
"values",
")",
":",
"_flattenedList",
"=",
"sorted",
"(",
"flatten",
"(",
"values",
")",
")",
"self",
".",
"domain",
"=",
"tuple",
"(",
"_flattenedList",
"[",
"0",
"]",
"if",
"d",
"==",
"'min'",
"else",
"d",
"for",
"d",
"in",
"self",
".",
"domain",
")",
"self",
".",
"domain",
"=",
"tuple",
"(",
"_flattenedList",
"[",
"-",
"1",
"]",
"if",
"d",
"==",
"'max'",
"else",
"d",
"for",
"d",
"in",
"self",
".",
"domain",
")"
] | Set domain of the colors based on min and max of a list of values. | [
"Set",
"domain",
"of",
"the",
"colors",
"based",
"on",
"min",
"and",
"max",
"of",
"a",
"list",
"of",
"values",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/legendparameters.py#L80-L84 |
4,596 | ladybug-tools/ladybug | ladybug/datacollection.py | HourlyDiscontinuousCollection.timestep_text | def timestep_text(self):
"""Return a text string representing the timestep of the collection."""
if self.header.analysis_period.timestep == 1:
return 'Hourly'
else:
return '{} Minute'.format(int(60 / self.header.analysis_period.timestep)) | python | def timestep_text(self):
"""Return a text string representing the timestep of the collection."""
if self.header.analysis_period.timestep == 1:
return 'Hourly'
else:
return '{} Minute'.format(int(60 / self.header.analysis_period.timestep)) | [
"def",
"timestep_text",
"(",
"self",
")",
":",
"if",
"self",
".",
"header",
".",
"analysis_period",
".",
"timestep",
"==",
"1",
":",
"return",
"'Hourly'",
"else",
":",
"return",
"'{} Minute'",
".",
"format",
"(",
"int",
"(",
"60",
"/",
"self",
".",
"header",
".",
"analysis_period",
".",
"timestep",
")",
")"
] | Return a text string representing the timestep of the collection. | [
"Return",
"a",
"text",
"string",
"representing",
"the",
"timestep",
"of",
"the",
"collection",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datacollection.py#L96-L101 |
4,597 | ladybug-tools/ladybug | ladybug/datacollection.py | HourlyDiscontinuousCollection.moys_dict | def moys_dict(self):
"""Return a dictionary of this collection's values where the keys are the moys.
This is useful for aligning the values with another list of datetimes.
"""
moy_dict = {}
for val, dt in zip(self.values, self.datetimes):
moy_dict[dt.moy] = val
return moy_dict | python | def moys_dict(self):
"""Return a dictionary of this collection's values where the keys are the moys.
This is useful for aligning the values with another list of datetimes.
"""
moy_dict = {}
for val, dt in zip(self.values, self.datetimes):
moy_dict[dt.moy] = val
return moy_dict | [
"def",
"moys_dict",
"(",
"self",
")",
":",
"moy_dict",
"=",
"{",
"}",
"for",
"val",
",",
"dt",
"in",
"zip",
"(",
"self",
".",
"values",
",",
"self",
".",
"datetimes",
")",
":",
"moy_dict",
"[",
"dt",
".",
"moy",
"]",
"=",
"val",
"return",
"moy_dict"
] | Return a dictionary of this collection's values where the keys are the moys.
This is useful for aligning the values with another list of datetimes. | [
"Return",
"a",
"dictionary",
"of",
"this",
"collection",
"s",
"values",
"where",
"the",
"keys",
"are",
"the",
"moys",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datacollection.py#L104-L112 |
4,598 | ladybug-tools/ladybug | ladybug/datacollection.py | HourlyDiscontinuousCollection.filter_by_analysis_period | def filter_by_analysis_period(self, analysis_period):
"""
Filter a Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data
"""
self._check_analysis_period(analysis_period)
_filtered_data = self.filter_by_moys(analysis_period.moys)
_filtered_data.header._analysis_period = analysis_period
return _filtered_data | python | def filter_by_analysis_period(self, analysis_period):
"""
Filter a Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data
"""
self._check_analysis_period(analysis_period)
_filtered_data = self.filter_by_moys(analysis_period.moys)
_filtered_data.header._analysis_period = analysis_period
return _filtered_data | [
"def",
"filter_by_analysis_period",
"(",
"self",
",",
"analysis_period",
")",
":",
"self",
".",
"_check_analysis_period",
"(",
"analysis_period",
")",
"_filtered_data",
"=",
"self",
".",
"filter_by_moys",
"(",
"analysis_period",
".",
"moys",
")",
"_filtered_data",
".",
"header",
".",
"_analysis_period",
"=",
"analysis_period",
"return",
"_filtered_data"
] | Filter a Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data | [
"Filter",
"a",
"Data",
"Collection",
"based",
"on",
"an",
"analysis",
"period",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datacollection.py#L114-L127 |
4,599 | ladybug-tools/ladybug | ladybug/datacollection.py | HourlyDiscontinuousCollection.group_by_month_per_hour | def group_by_month_per_hour(self):
"""Return a dictionary of this collection's values grouped by each month per hour.
Key values are tuples of 2 integers:
The first represents the month of the year between 1-12.
The first represents the hour of the day between 0-24.
(eg. (12, 23) for December at 11 PM)
"""
data_by_month_per_hour = OrderedDict()
for m in xrange(1, 13):
for h in xrange(0, 24):
data_by_month_per_hour[(m, h)] = []
for v, dt in zip(self.values, self.datetimes):
data_by_month_per_hour[(dt.month, dt.hour)].append(v)
return data_by_month_per_hour | python | def group_by_month_per_hour(self):
"""Return a dictionary of this collection's values grouped by each month per hour.
Key values are tuples of 2 integers:
The first represents the month of the year between 1-12.
The first represents the hour of the day between 0-24.
(eg. (12, 23) for December at 11 PM)
"""
data_by_month_per_hour = OrderedDict()
for m in xrange(1, 13):
for h in xrange(0, 24):
data_by_month_per_hour[(m, h)] = []
for v, dt in zip(self.values, self.datetimes):
data_by_month_per_hour[(dt.month, dt.hour)].append(v)
return data_by_month_per_hour | [
"def",
"group_by_month_per_hour",
"(",
"self",
")",
":",
"data_by_month_per_hour",
"=",
"OrderedDict",
"(",
")",
"for",
"m",
"in",
"xrange",
"(",
"1",
",",
"13",
")",
":",
"for",
"h",
"in",
"xrange",
"(",
"0",
",",
"24",
")",
":",
"data_by_month_per_hour",
"[",
"(",
"m",
",",
"h",
")",
"]",
"=",
"[",
"]",
"for",
"v",
",",
"dt",
"in",
"zip",
"(",
"self",
".",
"values",
",",
"self",
".",
"datetimes",
")",
":",
"data_by_month_per_hour",
"[",
"(",
"dt",
".",
"month",
",",
"dt",
".",
"hour",
")",
"]",
".",
"append",
"(",
"v",
")",
"return",
"data_by_month_per_hour"
] | Return a dictionary of this collection's values grouped by each month per hour.
Key values are tuples of 2 integers:
The first represents the month of the year between 1-12.
The first represents the hour of the day between 0-24.
(eg. (12, 23) for December at 11 PM) | [
"Return",
"a",
"dictionary",
"of",
"this",
"collection",
"s",
"values",
"grouped",
"by",
"each",
"month",
"per",
"hour",
"."
] | c08b7308077a48d5612f644943f92d5b5dade583 | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datacollection.py#L214-L228 |
Subsets and Splits