repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 39
1.84M
| func_code_tokens
listlengths 15
672k
| func_documentation_string
stringlengths 1
47.2k
| func_documentation_tokens
listlengths 1
3.92k
| split_name
stringclasses 1
value | func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|---|---|---|
amzn/ion-python | amazon/ion/simple_types.py | _IonNature.from_value | def from_value(cls, ion_type, value, annotations=()):
"""Constructs a value as a copy with an associated Ion type and annotations.
Args:
ion_type (IonType): The associated Ion type.
value (Any): The value to construct from, generally of type ``cls``.
annotations (Sequence[unicode]): The sequence Unicode strings decorating this value.
"""
if value is None:
value = IonPyNull()
else:
args, kwargs = cls._to_constructor_args(value)
value = cls(*args, **kwargs)
value.ion_event = None
value.ion_type = ion_type
value.ion_annotations = annotations
return value | python | def from_value(cls, ion_type, value, annotations=()):
if value is None:
value = IonPyNull()
else:
args, kwargs = cls._to_constructor_args(value)
value = cls(*args, **kwargs)
value.ion_event = None
value.ion_type = ion_type
value.ion_annotations = annotations
return value | [
"def",
"from_value",
"(",
"cls",
",",
"ion_type",
",",
"value",
",",
"annotations",
"=",
"(",
")",
")",
":",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"IonPyNull",
"(",
")",
"else",
":",
"args",
",",
"kwargs",
"=",
"cls",
".",
"_to_constructor_args",
"(",
"value",
")",
"value",
"=",
"cls",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"value",
".",
"ion_event",
"=",
"None",
"value",
".",
"ion_type",
"=",
"ion_type",
"value",
".",
"ion_annotations",
"=",
"annotations",
"return",
"value"
]
| Constructs a value as a copy with an associated Ion type and annotations.
Args:
ion_type (IonType): The associated Ion type.
value (Any): The value to construct from, generally of type ``cls``.
annotations (Sequence[unicode]): The sequence Unicode strings decorating this value. | [
"Constructs",
"a",
"value",
"as",
"a",
"copy",
"with",
"an",
"associated",
"Ion",
"type",
"and",
"annotations",
"."
]
| train | https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/simple_types.py#L93-L109 |
amzn/ion-python | amazon/ion/simple_types.py | _IonNature.to_event | def to_event(self, event_type, field_name=None, depth=None):
"""Constructs an IonEvent from this _IonNature value.
Args:
event_type (IonEventType): The type of the resulting event.
field_name (Optional[text]): The field name associated with this value, if any.
depth (Optional[int]): The depth of this value.
Returns:
An IonEvent with the properties from this value.
"""
if self.ion_event is None:
value = self
if isinstance(self, IonPyNull):
value = None
self.ion_event = IonEvent(event_type, ion_type=self.ion_type, value=value, field_name=field_name,
annotations=self.ion_annotations, depth=depth)
return self.ion_event | python | def to_event(self, event_type, field_name=None, depth=None):
if self.ion_event is None:
value = self
if isinstance(self, IonPyNull):
value = None
self.ion_event = IonEvent(event_type, ion_type=self.ion_type, value=value, field_name=field_name,
annotations=self.ion_annotations, depth=depth)
return self.ion_event | [
"def",
"to_event",
"(",
"self",
",",
"event_type",
",",
"field_name",
"=",
"None",
",",
"depth",
"=",
"None",
")",
":",
"if",
"self",
".",
"ion_event",
"is",
"None",
":",
"value",
"=",
"self",
"if",
"isinstance",
"(",
"self",
",",
"IonPyNull",
")",
":",
"value",
"=",
"None",
"self",
".",
"ion_event",
"=",
"IonEvent",
"(",
"event_type",
",",
"ion_type",
"=",
"self",
".",
"ion_type",
",",
"value",
"=",
"value",
",",
"field_name",
"=",
"field_name",
",",
"annotations",
"=",
"self",
".",
"ion_annotations",
",",
"depth",
"=",
"depth",
")",
"return",
"self",
".",
"ion_event"
]
| Constructs an IonEvent from this _IonNature value.
Args:
event_type (IonEventType): The type of the resulting event.
field_name (Optional[text]): The field name associated with this value, if any.
depth (Optional[int]): The depth of this value.
Returns:
An IonEvent with the properties from this value. | [
"Constructs",
"an",
"IonEvent",
"from",
"this",
"_IonNature",
"value",
"."
]
| train | https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/simple_types.py#L111-L128 |
XML-Security/signxml | signxml/util/__init__.py | bytes_to_long | def bytes_to_long(s):
"""bytes_to_long(string) : long
Convert a byte string to a long integer.
This is (essentially) the inverse of long_to_bytes().
"""
if isinstance(s, int):
# On Python 2, indexing into a bytearray returns a byte string; on Python 3, an int.
return s
acc = 0
if USING_PYTHON2:
acc = long(acc) # noqa
unpack = struct.unpack
length = len(s)
if length % 4:
extra = (4 - length % 4)
s = b'\000' * extra + s
length = length + extra
for i in range(0, length, 4):
acc = (acc << 32) + unpack(b'>I', s[i:i + 4])[0]
return acc | python | def bytes_to_long(s):
if isinstance(s, int):
return s
acc = 0
if USING_PYTHON2:
acc = long(acc)
unpack = struct.unpack
length = len(s)
if length % 4:
extra = (4 - length % 4)
s = b'\000' * extra + s
length = length + extra
for i in range(0, length, 4):
acc = (acc << 32) + unpack(b'>I', s[i:i + 4])[0]
return acc | [
"def",
"bytes_to_long",
"(",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"int",
")",
":",
"# On Python 2, indexing into a bytearray returns a byte string; on Python 3, an int.",
"return",
"s",
"acc",
"=",
"0",
"if",
"USING_PYTHON2",
":",
"acc",
"=",
"long",
"(",
"acc",
")",
"# noqa",
"unpack",
"=",
"struct",
".",
"unpack",
"length",
"=",
"len",
"(",
"s",
")",
"if",
"length",
"%",
"4",
":",
"extra",
"=",
"(",
"4",
"-",
"length",
"%",
"4",
")",
"s",
"=",
"b'\\000'",
"*",
"extra",
"+",
"s",
"length",
"=",
"length",
"+",
"extra",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"length",
",",
"4",
")",
":",
"acc",
"=",
"(",
"acc",
"<<",
"32",
")",
"+",
"unpack",
"(",
"b'>I'",
",",
"s",
"[",
"i",
":",
"i",
"+",
"4",
"]",
")",
"[",
"0",
"]",
"return",
"acc"
]
| bytes_to_long(string) : long
Convert a byte string to a long integer.
This is (essentially) the inverse of long_to_bytes(). | [
"bytes_to_long",
"(",
"string",
")",
":",
"long",
"Convert",
"a",
"byte",
"string",
"to",
"a",
"long",
"integer",
"."
]
| train | https://github.com/XML-Security/signxml/blob/16503242617e9b25e5c2c9ced5ef18a06ffde146/signxml/util/__init__.py#L41-L61 |
XML-Security/signxml | signxml/util/__init__.py | long_to_bytes | def long_to_bytes(n, blocksize=0):
"""long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
if USING_PYTHON2:
n = long(n) # noqa
pack = struct.pack
while n > 0:
s = pack(b'>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s | python | def long_to_bytes(n, blocksize=0):
s = b''
if USING_PYTHON2:
n = long(n)
pack = struct.pack
while n > 0:
s = pack(b'>I', n & 0xffffffff) + s
n = n >> 32
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
s = b'\000'
i = 0
s = s[i:]
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s | [
"def",
"long_to_bytes",
"(",
"n",
",",
"blocksize",
"=",
"0",
")",
":",
"# after much testing, this algorithm was deemed to be the fastest",
"s",
"=",
"b''",
"if",
"USING_PYTHON2",
":",
"n",
"=",
"long",
"(",
"n",
")",
"# noqa",
"pack",
"=",
"struct",
".",
"pack",
"while",
"n",
">",
"0",
":",
"s",
"=",
"pack",
"(",
"b'>I'",
",",
"n",
"&",
"0xffffffff",
")",
"+",
"s",
"n",
"=",
"n",
">>",
"32",
"# strip off leading zeros",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"s",
")",
")",
":",
"if",
"s",
"[",
"i",
"]",
"!=",
"b'\\000'",
"[",
"0",
"]",
":",
"break",
"else",
":",
"# only happens when n == 0",
"s",
"=",
"b'\\000'",
"i",
"=",
"0",
"s",
"=",
"s",
"[",
"i",
":",
"]",
"# add back some pad bytes. this could be done more efficiently w.r.t. the",
"# de-padding being done above, but sigh...",
"if",
"blocksize",
">",
"0",
"and",
"len",
"(",
"s",
")",
"%",
"blocksize",
":",
"s",
"=",
"(",
"blocksize",
"-",
"len",
"(",
"s",
")",
"%",
"blocksize",
")",
"*",
"b'\\000'",
"+",
"s",
"return",
"s"
]
| long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize. | [
"long_to_bytes",
"(",
"n",
":",
"long",
"blocksize",
":",
"int",
")",
":",
"string",
"Convert",
"a",
"long",
"integer",
"to",
"a",
"byte",
"string",
"."
]
| train | https://github.com/XML-Security/signxml/blob/16503242617e9b25e5c2c9ced5ef18a06ffde146/signxml/util/__init__.py#L64-L93 |
XML-Security/signxml | signxml/util/__init__.py | raw_p_sha1 | def raw_p_sha1(secret, seed, sizes=()):
"""
Derive one or more keys from secret and seed.
(See specs part 6, 6.7.5 and RFC 2246 - TLS v1.0)
Lengths of keys will match sizes argument
Source: https://github.com/FreeOpcUa/python-opcua
key_sizes = (signature_key_size, symmetric_key_size, 16)
(sigkey, key, init_vec) = p_sha1(nonce2, nonce1, key_sizes)
"""
full_size = 0
for size in sizes:
full_size += size
result = b''
accum = seed
while len(result) < full_size:
accum = hmac_sha1(secret, accum)
result += hmac_sha1(secret, accum + seed)
parts = []
for size in sizes:
parts.append(result[:size])
result = result[size:]
return tuple(parts) | python | def raw_p_sha1(secret, seed, sizes=()):
full_size = 0
for size in sizes:
full_size += size
result = b''
accum = seed
while len(result) < full_size:
accum = hmac_sha1(secret, accum)
result += hmac_sha1(secret, accum + seed)
parts = []
for size in sizes:
parts.append(result[:size])
result = result[size:]
return tuple(parts) | [
"def",
"raw_p_sha1",
"(",
"secret",
",",
"seed",
",",
"sizes",
"=",
"(",
")",
")",
":",
"full_size",
"=",
"0",
"for",
"size",
"in",
"sizes",
":",
"full_size",
"+=",
"size",
"result",
"=",
"b''",
"accum",
"=",
"seed",
"while",
"len",
"(",
"result",
")",
"<",
"full_size",
":",
"accum",
"=",
"hmac_sha1",
"(",
"secret",
",",
"accum",
")",
"result",
"+=",
"hmac_sha1",
"(",
"secret",
",",
"accum",
"+",
"seed",
")",
"parts",
"=",
"[",
"]",
"for",
"size",
"in",
"sizes",
":",
"parts",
".",
"append",
"(",
"result",
"[",
":",
"size",
"]",
")",
"result",
"=",
"result",
"[",
"size",
":",
"]",
"return",
"tuple",
"(",
"parts",
")"
]
| Derive one or more keys from secret and seed.
(See specs part 6, 6.7.5 and RFC 2246 - TLS v1.0)
Lengths of keys will match sizes argument
Source: https://github.com/FreeOpcUa/python-opcua
key_sizes = (signature_key_size, symmetric_key_size, 16)
(sigkey, key, init_vec) = p_sha1(nonce2, nonce1, key_sizes) | [
"Derive",
"one",
"or",
"more",
"keys",
"from",
"secret",
"and",
"seed",
".",
"(",
"See",
"specs",
"part",
"6",
"6",
".",
"7",
".",
"5",
"and",
"RFC",
"2246",
"-",
"TLS",
"v1",
".",
"0",
")",
"Lengths",
"of",
"keys",
"will",
"match",
"sizes",
"argument"
]
| train | https://github.com/XML-Security/signxml/blob/16503242617e9b25e5c2c9ced5ef18a06ffde146/signxml/util/__init__.py#L162-L186 |
XML-Security/signxml | signxml/util/__init__.py | verify_x509_cert_chain | def verify_x509_cert_chain(cert_chain, ca_pem_file=None, ca_path=None):
"""
Look at certs in the cert chain and add them to the store one by one.
Return the cert at the end of the chain. That is the cert to be used by the caller for verifying.
From https://www.w3.org/TR/xmldsig-core2/#sec-X509Data:
"All certificates appearing in an X509Data element must relate to the validation key by either containing it
or being part of a certification chain that terminates in a certificate containing the validation key.
No ordering is implied by the above constraints"
"""
from OpenSSL import SSL
context = SSL.Context(SSL.TLSv1_METHOD)
if ca_pem_file is None and ca_path is None:
import certifi
ca_pem_file = certifi.where()
context.load_verify_locations(ensure_bytes(ca_pem_file, none_ok=True), capath=ca_path)
store = context.get_cert_store()
certs = list(reversed(cert_chain))
end_of_chain, last_error = None, None
while len(certs) > 0:
for cert in certs:
try:
end_of_chain = _add_cert_to_store(store, cert)
certs.remove(cert)
break
except RedundantCert:
certs.remove(cert)
if end_of_chain is None:
end_of_chain = cert
break
except Exception as e:
last_error = e
else:
raise last_error
return end_of_chain | python | def verify_x509_cert_chain(cert_chain, ca_pem_file=None, ca_path=None):
from OpenSSL import SSL
context = SSL.Context(SSL.TLSv1_METHOD)
if ca_pem_file is None and ca_path is None:
import certifi
ca_pem_file = certifi.where()
context.load_verify_locations(ensure_bytes(ca_pem_file, none_ok=True), capath=ca_path)
store = context.get_cert_store()
certs = list(reversed(cert_chain))
end_of_chain, last_error = None, None
while len(certs) > 0:
for cert in certs:
try:
end_of_chain = _add_cert_to_store(store, cert)
certs.remove(cert)
break
except RedundantCert:
certs.remove(cert)
if end_of_chain is None:
end_of_chain = cert
break
except Exception as e:
last_error = e
else:
raise last_error
return end_of_chain | [
"def",
"verify_x509_cert_chain",
"(",
"cert_chain",
",",
"ca_pem_file",
"=",
"None",
",",
"ca_path",
"=",
"None",
")",
":",
"from",
"OpenSSL",
"import",
"SSL",
"context",
"=",
"SSL",
".",
"Context",
"(",
"SSL",
".",
"TLSv1_METHOD",
")",
"if",
"ca_pem_file",
"is",
"None",
"and",
"ca_path",
"is",
"None",
":",
"import",
"certifi",
"ca_pem_file",
"=",
"certifi",
".",
"where",
"(",
")",
"context",
".",
"load_verify_locations",
"(",
"ensure_bytes",
"(",
"ca_pem_file",
",",
"none_ok",
"=",
"True",
")",
",",
"capath",
"=",
"ca_path",
")",
"store",
"=",
"context",
".",
"get_cert_store",
"(",
")",
"certs",
"=",
"list",
"(",
"reversed",
"(",
"cert_chain",
")",
")",
"end_of_chain",
",",
"last_error",
"=",
"None",
",",
"None",
"while",
"len",
"(",
"certs",
")",
">",
"0",
":",
"for",
"cert",
"in",
"certs",
":",
"try",
":",
"end_of_chain",
"=",
"_add_cert_to_store",
"(",
"store",
",",
"cert",
")",
"certs",
".",
"remove",
"(",
"cert",
")",
"break",
"except",
"RedundantCert",
":",
"certs",
".",
"remove",
"(",
"cert",
")",
"if",
"end_of_chain",
"is",
"None",
":",
"end_of_chain",
"=",
"cert",
"break",
"except",
"Exception",
"as",
"e",
":",
"last_error",
"=",
"e",
"else",
":",
"raise",
"last_error",
"return",
"end_of_chain"
]
| Look at certs in the cert chain and add them to the store one by one.
Return the cert at the end of the chain. That is the cert to be used by the caller for verifying.
From https://www.w3.org/TR/xmldsig-core2/#sec-X509Data:
"All certificates appearing in an X509Data element must relate to the validation key by either containing it
or being part of a certification chain that terminates in a certificate containing the validation key.
No ordering is implied by the above constraints" | [
"Look",
"at",
"certs",
"in",
"the",
"cert",
"chain",
"and",
"add",
"them",
"to",
"the",
"store",
"one",
"by",
"one",
".",
"Return",
"the",
"cert",
"at",
"the",
"end",
"of",
"the",
"chain",
".",
"That",
"is",
"the",
"cert",
"to",
"be",
"used",
"by",
"the",
"caller",
"for",
"verifying",
".",
"From",
"https",
":",
"//",
"www",
".",
"w3",
".",
"org",
"/",
"TR",
"/",
"xmldsig",
"-",
"core2",
"/",
"#sec",
"-",
"X509Data",
":",
"All",
"certificates",
"appearing",
"in",
"an",
"X509Data",
"element",
"must",
"relate",
"to",
"the",
"validation",
"key",
"by",
"either",
"containing",
"it",
"or",
"being",
"part",
"of",
"a",
"certification",
"chain",
"that",
"terminates",
"in",
"a",
"certificate",
"containing",
"the",
"validation",
"key",
".",
"No",
"ordering",
"is",
"implied",
"by",
"the",
"above",
"constraints"
]
| train | https://github.com/XML-Security/signxml/blob/16503242617e9b25e5c2c9ced5ef18a06ffde146/signxml/util/__init__.py#L209-L242 |
XML-Security/signxml | signxml/__init__.py | _remove_sig | def _remove_sig(signature, idempotent=False):
"""
Remove the signature node from its parent, keeping any tail element.
This is needed for eneveloped signatures.
:param signature: Signature to remove from payload
:type signature: XML ElementTree Element
:param idempotent:
If True, don't raise an error if signature is already detached from parent.
:type idempotent: boolean
"""
try:
signaturep = next(signature.iterancestors())
except StopIteration:
if idempotent:
return
raise ValueError("Can't remove the root signature node")
if signature.tail is not None:
try:
signatures = next(signature.itersiblings(preceding=True))
except StopIteration:
if signaturep.text is not None:
signaturep.text = signaturep.text + signature.tail
else:
signaturep.text = signature.tail
else:
if signatures.tail is not None:
signatures.tail = signatures.tail + signature.tail
else:
signatures.tail = signature.tail
signaturep.remove(signature) | python | def _remove_sig(signature, idempotent=False):
try:
signaturep = next(signature.iterancestors())
except StopIteration:
if idempotent:
return
raise ValueError("Can't remove the root signature node")
if signature.tail is not None:
try:
signatures = next(signature.itersiblings(preceding=True))
except StopIteration:
if signaturep.text is not None:
signaturep.text = signaturep.text + signature.tail
else:
signaturep.text = signature.tail
else:
if signatures.tail is not None:
signatures.tail = signatures.tail + signature.tail
else:
signatures.tail = signature.tail
signaturep.remove(signature) | [
"def",
"_remove_sig",
"(",
"signature",
",",
"idempotent",
"=",
"False",
")",
":",
"try",
":",
"signaturep",
"=",
"next",
"(",
"signature",
".",
"iterancestors",
"(",
")",
")",
"except",
"StopIteration",
":",
"if",
"idempotent",
":",
"return",
"raise",
"ValueError",
"(",
"\"Can't remove the root signature node\"",
")",
"if",
"signature",
".",
"tail",
"is",
"not",
"None",
":",
"try",
":",
"signatures",
"=",
"next",
"(",
"signature",
".",
"itersiblings",
"(",
"preceding",
"=",
"True",
")",
")",
"except",
"StopIteration",
":",
"if",
"signaturep",
".",
"text",
"is",
"not",
"None",
":",
"signaturep",
".",
"text",
"=",
"signaturep",
".",
"text",
"+",
"signature",
".",
"tail",
"else",
":",
"signaturep",
".",
"text",
"=",
"signature",
".",
"tail",
"else",
":",
"if",
"signatures",
".",
"tail",
"is",
"not",
"None",
":",
"signatures",
".",
"tail",
"=",
"signatures",
".",
"tail",
"+",
"signature",
".",
"tail",
"else",
":",
"signatures",
".",
"tail",
"=",
"signature",
".",
"tail",
"signaturep",
".",
"remove",
"(",
"signature",
")"
]
| Remove the signature node from its parent, keeping any tail element.
This is needed for eneveloped signatures.
:param signature: Signature to remove from payload
:type signature: XML ElementTree Element
:param idempotent:
If True, don't raise an error if signature is already detached from parent.
:type idempotent: boolean | [
"Remove",
"the",
"signature",
"node",
"from",
"its",
"parent",
"keeping",
"any",
"tail",
"element",
".",
"This",
"is",
"needed",
"for",
"eneveloped",
"signatures",
"."
]
| train | https://github.com/XML-Security/signxml/blob/16503242617e9b25e5c2c9ced5ef18a06ffde146/signxml/__init__.py#L39-L69 |
XML-Security/signxml | signxml/__init__.py | XMLSigner.sign | def sign(self, data, key=None, passphrase=None, cert=None, reference_uri=None, key_name=None, key_info=None,
id_attribute=None):
"""
Sign the data and return the root element of the resulting XML tree.
:param data: Data to sign
:type data: String, file-like object, or XML ElementTree Element API compatible object
:param key:
Key to be used for signing. When signing with a certificate or RSA/DSA/ECDSA key, this can be a string
containing a PEM-formatted key, or a :py:class:`cryptography.hazmat.primitives.interfaces.RSAPublicKey`,
:py:class:`cryptography.hazmat.primitives.interfaces.DSAPublicKey`, or
:py:class:`cryptography.hazmat.primitives.interfaces.EllipticCurvePublicKey` object. When signing with a
HMAC, this should be a string containing the shared secret.
:type key:
string, :py:class:`cryptography.hazmat.primitives.interfaces.RSAPublicKey`,
:py:class:`cryptography.hazmat.primitives.interfaces.DSAPublicKey`, or
:py:class:`cryptography.hazmat.primitives.interfaces.EllipticCurvePublicKey` object
:param passphrase: Passphrase to use to decrypt the key, if any.
:type passphrase: string
:param cert:
X.509 certificate to use for signing. This should be a string containing a PEM-formatted certificate, or an
array of strings or OpenSSL.crypto.X509 objects containing the certificate and a chain of intermediate
certificates.
:type cert: string, array of strings, or array of OpenSSL.crypto.X509 objects
:param reference_uri:
Custom reference URI or list of reference URIs to incorporate into the signature. When ``method`` is set to
``detached`` or ``enveloped``, reference URIs are set to this value and only the referenced elements are
signed.
:type reference_uri: string or list
:param key_name: Add a KeyName element in the KeyInfo element that may be used by the signer to communicate a
key identifier to the recipient. Typically, KeyName contains an identifier related to the key pair used to
sign the message.
:type key_name: string
:param key_info: A custom KeyInfo element to insert in the signature. Use this to supply
``<wsse:SecurityTokenReference>`` or other custom key references.
:type key_info: :py:class:`lxml.etree.Element`
:param id_attribute:
Name of the attribute whose value ``URI`` refers to. By default, SignXML will search for "Id", then "ID".
:type id_attribute: string
:returns:
A :py:class:`lxml.etree.Element` object representing the root of the XML tree containing the signature and
the payload data.
To specify the location of an enveloped signature within **data**, insert a
``<ds:Signature Id="placeholder"></ds:Signature>`` element in **data** (where
"ds" is the "http://www.w3.org/2000/09/xmldsig#" namespace). This element will
be replaced by the generated signature, and excised when generating the digest.
"""
if id_attribute is not None:
self.id_attributes = (id_attribute, )
if isinstance(cert, (str, bytes)):
cert_chain = list(iterate_pem(cert))
else:
cert_chain = cert
if isinstance(reference_uri, (str, bytes)):
reference_uris = [reference_uri]
else:
reference_uris = reference_uri
sig_root, doc_root, c14n_inputs, reference_uris = self._unpack(data, reference_uris)
signed_info_element, signature_value_element = self._build_sig(sig_root, reference_uris, c14n_inputs)
if key is None:
raise InvalidInput('Parameter "key" is required')
signed_info_c14n = self._c14n(signed_info_element, algorithm=self.c14n_alg)
if self.sign_alg.startswith("hmac-"):
from cryptography.hazmat.primitives.hmac import HMAC
signer = HMAC(key=key,
algorithm=self._get_hmac_digest_method_by_tag(self.sign_alg),
backend=default_backend())
signer.update(signed_info_c14n)
signature_value_element.text = ensure_str(b64encode(signer.finalize()))
sig_root.append(signature_value_element)
elif any(self.sign_alg.startswith(i) for i in ["dsa-", "rsa-", "ecdsa-"]):
if isinstance(key, (str, bytes)):
from cryptography.hazmat.primitives.serialization import load_pem_private_key
key = load_pem_private_key(key, password=passphrase, backend=default_backend())
hash_alg = self._get_signature_digest_method_by_tag(self.sign_alg)
if self.sign_alg.startswith("dsa-"):
signature = key.sign(signed_info_c14n, algorithm=hash_alg)
elif self.sign_alg.startswith("ecdsa-"):
signature = key.sign(signed_info_c14n, signature_algorithm=ec.ECDSA(algorithm=hash_alg))
elif self.sign_alg.startswith("rsa-"):
signature = key.sign(signed_info_c14n, padding=PKCS1v15(), algorithm=hash_alg)
else:
raise NotImplementedError()
if self.sign_alg.startswith("dsa-"):
# Note: The output of the DSA signer is a DER-encoded ASN.1 sequence of two DER integers.
from asn1crypto.algos import DSASignature
decoded_signature = DSASignature.load(signature).native
r = decoded_signature['r']
s = decoded_signature['s']
signature = long_to_bytes(r).rjust(32, b"\0") + long_to_bytes(s).rjust(32, b"\0")
signature_value_element.text = ensure_str(b64encode(signature))
if key_info is None:
key_info = SubElement(sig_root, ds_tag("KeyInfo"))
if key_name is not None:
keyname = SubElement(key_info, ds_tag("KeyName"))
keyname.text = key_name
if cert_chain is None:
self._serialize_key_value(key, key_info)
else:
x509_data = SubElement(key_info, ds_tag("X509Data"))
for cert in cert_chain:
x509_certificate = SubElement(x509_data, ds_tag("X509Certificate"))
if isinstance(cert, (str, bytes)):
x509_certificate.text = strip_pem_header(cert)
else:
from OpenSSL.crypto import dump_certificate, FILETYPE_PEM
x509_certificate.text = strip_pem_header(dump_certificate(FILETYPE_PEM, cert))
else:
sig_root.append(key_info)
else:
raise NotImplementedError()
if self.method == methods.enveloping:
for c14n_input in c14n_inputs:
doc_root.append(c14n_input)
return doc_root if self.method == methods.enveloped else sig_root | python | def sign(self, data, key=None, passphrase=None, cert=None, reference_uri=None, key_name=None, key_info=None,
id_attribute=None):
if id_attribute is not None:
self.id_attributes = (id_attribute, )
if isinstance(cert, (str, bytes)):
cert_chain = list(iterate_pem(cert))
else:
cert_chain = cert
if isinstance(reference_uri, (str, bytes)):
reference_uris = [reference_uri]
else:
reference_uris = reference_uri
sig_root, doc_root, c14n_inputs, reference_uris = self._unpack(data, reference_uris)
signed_info_element, signature_value_element = self._build_sig(sig_root, reference_uris, c14n_inputs)
if key is None:
raise InvalidInput('Parameter "key" is required')
signed_info_c14n = self._c14n(signed_info_element, algorithm=self.c14n_alg)
if self.sign_alg.startswith("hmac-"):
from cryptography.hazmat.primitives.hmac import HMAC
signer = HMAC(key=key,
algorithm=self._get_hmac_digest_method_by_tag(self.sign_alg),
backend=default_backend())
signer.update(signed_info_c14n)
signature_value_element.text = ensure_str(b64encode(signer.finalize()))
sig_root.append(signature_value_element)
elif any(self.sign_alg.startswith(i) for i in ["dsa-", "rsa-", "ecdsa-"]):
if isinstance(key, (str, bytes)):
from cryptography.hazmat.primitives.serialization import load_pem_private_key
key = load_pem_private_key(key, password=passphrase, backend=default_backend())
hash_alg = self._get_signature_digest_method_by_tag(self.sign_alg)
if self.sign_alg.startswith("dsa-"):
signature = key.sign(signed_info_c14n, algorithm=hash_alg)
elif self.sign_alg.startswith("ecdsa-"):
signature = key.sign(signed_info_c14n, signature_algorithm=ec.ECDSA(algorithm=hash_alg))
elif self.sign_alg.startswith("rsa-"):
signature = key.sign(signed_info_c14n, padding=PKCS1v15(), algorithm=hash_alg)
else:
raise NotImplementedError()
if self.sign_alg.startswith("dsa-"):
from asn1crypto.algos import DSASignature
decoded_signature = DSASignature.load(signature).native
r = decoded_signature['r']
s = decoded_signature['s']
signature = long_to_bytes(r).rjust(32, b"\0") + long_to_bytes(s).rjust(32, b"\0")
signature_value_element.text = ensure_str(b64encode(signature))
if key_info is None:
key_info = SubElement(sig_root, ds_tag("KeyInfo"))
if key_name is not None:
keyname = SubElement(key_info, ds_tag("KeyName"))
keyname.text = key_name
if cert_chain is None:
self._serialize_key_value(key, key_info)
else:
x509_data = SubElement(key_info, ds_tag("X509Data"))
for cert in cert_chain:
x509_certificate = SubElement(x509_data, ds_tag("X509Certificate"))
if isinstance(cert, (str, bytes)):
x509_certificate.text = strip_pem_header(cert)
else:
from OpenSSL.crypto import dump_certificate, FILETYPE_PEM
x509_certificate.text = strip_pem_header(dump_certificate(FILETYPE_PEM, cert))
else:
sig_root.append(key_info)
else:
raise NotImplementedError()
if self.method == methods.enveloping:
for c14n_input in c14n_inputs:
doc_root.append(c14n_input)
return doc_root if self.method == methods.enveloped else sig_root | [
"def",
"sign",
"(",
"self",
",",
"data",
",",
"key",
"=",
"None",
",",
"passphrase",
"=",
"None",
",",
"cert",
"=",
"None",
",",
"reference_uri",
"=",
"None",
",",
"key_name",
"=",
"None",
",",
"key_info",
"=",
"None",
",",
"id_attribute",
"=",
"None",
")",
":",
"if",
"id_attribute",
"is",
"not",
"None",
":",
"self",
".",
"id_attributes",
"=",
"(",
"id_attribute",
",",
")",
"if",
"isinstance",
"(",
"cert",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"cert_chain",
"=",
"list",
"(",
"iterate_pem",
"(",
"cert",
")",
")",
"else",
":",
"cert_chain",
"=",
"cert",
"if",
"isinstance",
"(",
"reference_uri",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"reference_uris",
"=",
"[",
"reference_uri",
"]",
"else",
":",
"reference_uris",
"=",
"reference_uri",
"sig_root",
",",
"doc_root",
",",
"c14n_inputs",
",",
"reference_uris",
"=",
"self",
".",
"_unpack",
"(",
"data",
",",
"reference_uris",
")",
"signed_info_element",
",",
"signature_value_element",
"=",
"self",
".",
"_build_sig",
"(",
"sig_root",
",",
"reference_uris",
",",
"c14n_inputs",
")",
"if",
"key",
"is",
"None",
":",
"raise",
"InvalidInput",
"(",
"'Parameter \"key\" is required'",
")",
"signed_info_c14n",
"=",
"self",
".",
"_c14n",
"(",
"signed_info_element",
",",
"algorithm",
"=",
"self",
".",
"c14n_alg",
")",
"if",
"self",
".",
"sign_alg",
".",
"startswith",
"(",
"\"hmac-\"",
")",
":",
"from",
"cryptography",
".",
"hazmat",
".",
"primitives",
".",
"hmac",
"import",
"HMAC",
"signer",
"=",
"HMAC",
"(",
"key",
"=",
"key",
",",
"algorithm",
"=",
"self",
".",
"_get_hmac_digest_method_by_tag",
"(",
"self",
".",
"sign_alg",
")",
",",
"backend",
"=",
"default_backend",
"(",
")",
")",
"signer",
".",
"update",
"(",
"signed_info_c14n",
")",
"signature_value_element",
".",
"text",
"=",
"ensure_str",
"(",
"b64encode",
"(",
"signer",
".",
"finalize",
"(",
")",
")",
")",
"sig_root",
".",
"append",
"(",
"signature_value_element",
")",
"elif",
"any",
"(",
"self",
".",
"sign_alg",
".",
"startswith",
"(",
"i",
")",
"for",
"i",
"in",
"[",
"\"dsa-\"",
",",
"\"rsa-\"",
",",
"\"ecdsa-\"",
"]",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"from",
"cryptography",
".",
"hazmat",
".",
"primitives",
".",
"serialization",
"import",
"load_pem_private_key",
"key",
"=",
"load_pem_private_key",
"(",
"key",
",",
"password",
"=",
"passphrase",
",",
"backend",
"=",
"default_backend",
"(",
")",
")",
"hash_alg",
"=",
"self",
".",
"_get_signature_digest_method_by_tag",
"(",
"self",
".",
"sign_alg",
")",
"if",
"self",
".",
"sign_alg",
".",
"startswith",
"(",
"\"dsa-\"",
")",
":",
"signature",
"=",
"key",
".",
"sign",
"(",
"signed_info_c14n",
",",
"algorithm",
"=",
"hash_alg",
")",
"elif",
"self",
".",
"sign_alg",
".",
"startswith",
"(",
"\"ecdsa-\"",
")",
":",
"signature",
"=",
"key",
".",
"sign",
"(",
"signed_info_c14n",
",",
"signature_algorithm",
"=",
"ec",
".",
"ECDSA",
"(",
"algorithm",
"=",
"hash_alg",
")",
")",
"elif",
"self",
".",
"sign_alg",
".",
"startswith",
"(",
"\"rsa-\"",
")",
":",
"signature",
"=",
"key",
".",
"sign",
"(",
"signed_info_c14n",
",",
"padding",
"=",
"PKCS1v15",
"(",
")",
",",
"algorithm",
"=",
"hash_alg",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
")",
"if",
"self",
".",
"sign_alg",
".",
"startswith",
"(",
"\"dsa-\"",
")",
":",
"# Note: The output of the DSA signer is a DER-encoded ASN.1 sequence of two DER integers.",
"from",
"asn1crypto",
".",
"algos",
"import",
"DSASignature",
"decoded_signature",
"=",
"DSASignature",
".",
"load",
"(",
"signature",
")",
".",
"native",
"r",
"=",
"decoded_signature",
"[",
"'r'",
"]",
"s",
"=",
"decoded_signature",
"[",
"'s'",
"]",
"signature",
"=",
"long_to_bytes",
"(",
"r",
")",
".",
"rjust",
"(",
"32",
",",
"b\"\\0\"",
")",
"+",
"long_to_bytes",
"(",
"s",
")",
".",
"rjust",
"(",
"32",
",",
"b\"\\0\"",
")",
"signature_value_element",
".",
"text",
"=",
"ensure_str",
"(",
"b64encode",
"(",
"signature",
")",
")",
"if",
"key_info",
"is",
"None",
":",
"key_info",
"=",
"SubElement",
"(",
"sig_root",
",",
"ds_tag",
"(",
"\"KeyInfo\"",
")",
")",
"if",
"key_name",
"is",
"not",
"None",
":",
"keyname",
"=",
"SubElement",
"(",
"key_info",
",",
"ds_tag",
"(",
"\"KeyName\"",
")",
")",
"keyname",
".",
"text",
"=",
"key_name",
"if",
"cert_chain",
"is",
"None",
":",
"self",
".",
"_serialize_key_value",
"(",
"key",
",",
"key_info",
")",
"else",
":",
"x509_data",
"=",
"SubElement",
"(",
"key_info",
",",
"ds_tag",
"(",
"\"X509Data\"",
")",
")",
"for",
"cert",
"in",
"cert_chain",
":",
"x509_certificate",
"=",
"SubElement",
"(",
"x509_data",
",",
"ds_tag",
"(",
"\"X509Certificate\"",
")",
")",
"if",
"isinstance",
"(",
"cert",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"x509_certificate",
".",
"text",
"=",
"strip_pem_header",
"(",
"cert",
")",
"else",
":",
"from",
"OpenSSL",
".",
"crypto",
"import",
"dump_certificate",
",",
"FILETYPE_PEM",
"x509_certificate",
".",
"text",
"=",
"strip_pem_header",
"(",
"dump_certificate",
"(",
"FILETYPE_PEM",
",",
"cert",
")",
")",
"else",
":",
"sig_root",
".",
"append",
"(",
"key_info",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
")",
"if",
"self",
".",
"method",
"==",
"methods",
".",
"enveloping",
":",
"for",
"c14n_input",
"in",
"c14n_inputs",
":",
"doc_root",
".",
"append",
"(",
"c14n_input",
")",
"return",
"doc_root",
"if",
"self",
".",
"method",
"==",
"methods",
".",
"enveloped",
"else",
"sig_root"
]
| Sign the data and return the root element of the resulting XML tree.
:param data: Data to sign
:type data: String, file-like object, or XML ElementTree Element API compatible object
:param key:
Key to be used for signing. When signing with a certificate or RSA/DSA/ECDSA key, this can be a string
containing a PEM-formatted key, or a :py:class:`cryptography.hazmat.primitives.interfaces.RSAPublicKey`,
:py:class:`cryptography.hazmat.primitives.interfaces.DSAPublicKey`, or
:py:class:`cryptography.hazmat.primitives.interfaces.EllipticCurvePublicKey` object. When signing with a
HMAC, this should be a string containing the shared secret.
:type key:
string, :py:class:`cryptography.hazmat.primitives.interfaces.RSAPublicKey`,
:py:class:`cryptography.hazmat.primitives.interfaces.DSAPublicKey`, or
:py:class:`cryptography.hazmat.primitives.interfaces.EllipticCurvePublicKey` object
:param passphrase: Passphrase to use to decrypt the key, if any.
:type passphrase: string
:param cert:
X.509 certificate to use for signing. This should be a string containing a PEM-formatted certificate, or an
array of strings or OpenSSL.crypto.X509 objects containing the certificate and a chain of intermediate
certificates.
:type cert: string, array of strings, or array of OpenSSL.crypto.X509 objects
:param reference_uri:
Custom reference URI or list of reference URIs to incorporate into the signature. When ``method`` is set to
``detached`` or ``enveloped``, reference URIs are set to this value and only the referenced elements are
signed.
:type reference_uri: string or list
:param key_name: Add a KeyName element in the KeyInfo element that may be used by the signer to communicate a
key identifier to the recipient. Typically, KeyName contains an identifier related to the key pair used to
sign the message.
:type key_name: string
:param key_info: A custom KeyInfo element to insert in the signature. Use this to supply
``<wsse:SecurityTokenReference>`` or other custom key references.
:type key_info: :py:class:`lxml.etree.Element`
:param id_attribute:
Name of the attribute whose value ``URI`` refers to. By default, SignXML will search for "Id", then "ID".
:type id_attribute: string
:returns:
A :py:class:`lxml.etree.Element` object representing the root of the XML tree containing the signature and
the payload data.
To specify the location of an enveloped signature within **data**, insert a
``<ds:Signature Id="placeholder"></ds:Signature>`` element in **data** (where
"ds" is the "http://www.w3.org/2000/09/xmldsig#" namespace). This element will
be replaced by the generated signature, and excised when generating the digest. | [
"Sign",
"the",
"data",
"and",
"return",
"the",
"root",
"element",
"of",
"the",
"resulting",
"XML",
"tree",
"."
]
| train | https://github.com/XML-Security/signxml/blob/16503242617e9b25e5c2c9ced5ef18a06ffde146/signxml/__init__.py#L283-L409 |
XML-Security/signxml | signxml/__init__.py | XMLVerifier.verify | def verify(self, data, require_x509=True, x509_cert=None, cert_subject_name=None, ca_pem_file=None, ca_path=None,
hmac_key=None, validate_schema=True, parser=None, uri_resolver=None, id_attribute=None,
expect_references=1):
"""
Verify the XML signature supplied in the data and return the XML node signed by the signature, or raise an
exception if the signature is not valid. By default, this requires the signature to be generated using a valid
X.509 certificate. To enable other means of signature validation, set the **require_x509** argument to `False`.
.. admonition:: See what is signed
It is important to understand and follow the best practice rule of "See what is signed" when verifying XML
signatures. The gist of this rule is: if your application neglects to verify that the information it trusts is
what was actually signed, the attacker can supply a valid signature but point you to malicious data that wasn't
signed by that signature.
In SignXML, you can ensure that the information signed is what you expect to be signed by only trusting the
data returned by the ``verify()`` method. The return value is the XML node or string that was signed. Also,
depending on the signature settings used, comments in the XML data may not be subject to signing, so may need
to be untrusted.
**Recommended reading:** http://www.w3.org/TR/xmldsig-bestpractices/#practices-applications
.. admonition:: Establish trust
If you do not supply any keyword arguments to ``verify()``, the default behavior is to trust **any** valid XML
signature generated using a valid X.509 certificate trusted by your system's CA store. This means anyone can
get an SSL certificate and generate a signature that you will trust. To establish trust in the signer, use the
``x509_cert`` argument to specify a certificate that was pre-shared out-of-band (e.g. via SAML metadata, as
shown in :ref:`Verifying SAML assertions <verifying-saml-assertions>`), or ``cert_subject_name`` to specify a
subject name that must be in the signing X.509 certificate given by the signature (verified as if it were a
domain name), or ``ca_pem_file``/``ca_path`` to give a custom CA.
:param data: Signature data to verify
:type data: String, file-like object, or XML ElementTree Element API compatible object
:param require_x509:
If ``True``, a valid X.509 certificate-based signature with an established chain of trust is required to
pass validation. If ``False``, other types of valid signatures (e.g. HMAC or RSA public key) are accepted.
:type require_x509: boolean
:param x509_cert:
A trusted external X.509 certificate, given as a PEM-formatted string or OpenSSL.crypto.X509 object, to use
for verification. Overrides any X.509 certificate information supplied by the signature. If left set to
``None``, requires that the signature supply a valid X.509 certificate chain that validates against the
known certificate authorities. Implies **require_x509=True**.
:type x509_cert: string or OpenSSL.crypto.X509
:param ca_pem_file:
Filename of a PEM file containing certificate authority information to use when verifying certificate-based
signatures.
:type ca_pem_file: string or bytes
:param ca_path:
Path to a directory containing PEM-formatted certificate authority files to use when verifying
certificate-based signatures. If neither **ca_pem_file** nor **ca_path** is given, the Mozilla CA bundle
provided by :py:mod:`certifi` will be loaded.
:type ca_path: string
:param cert_subject_name:
Subject Common Name to check the signing X.509 certificate against. Implies **require_x509=True**.
:type cert_subject_name: string
:param hmac_key: If using HMAC, a string containing the shared secret.
:type hmac_key: string
:param validate_schema: Whether to validate **data** against the XML Signature schema.
:type validate_schema: boolean
:param parser: Custom XML parser instance to use when parsing **data**.
:type parser: :py:class:`lxml.etree.XMLParser` compatible parser
:param uri_resolver: Function to use to resolve reference URIs that don't start with "#".
:type uri_resolver: callable
:param id_attribute:
Name of the attribute whose value ``URI`` refers to. By default, SignXML will search for "Id", then "ID".
:type id_attribute: string
:param expect_references:
Number of references to expect in the signature. If this is not 1, an array of VerifyResults is returned.
If set to a non-integer, any number of references is accepted (otherwise a mismatch raises an error).
:type expect_references: int or boolean
:raises: :py:class:`cryptography.exceptions.InvalidSignature`
:returns: VerifyResult object with the signed data, signed xml and signature xml
:rtype: VerifyResult
"""
self.hmac_key = hmac_key
self.require_x509 = require_x509
self.x509_cert = x509_cert
self._parser = parser
if x509_cert:
self.require_x509 = True
if id_attribute is not None:
self.id_attributes = (id_attribute, )
root = self.get_root(data)
if root.tag == ds_tag("Signature"):
signature_ref = root
else:
signature_ref = self._find(root, "Signature", anywhere=True)
# HACK: deep copy won't keep root's namespaces
signature = fromstring(etree.tostring(signature_ref), parser=parser)
if validate_schema:
self.schema().assertValid(signature)
signed_info = self._find(signature, "SignedInfo")
c14n_method = self._find(signed_info, "CanonicalizationMethod")
c14n_algorithm = c14n_method.get("Algorithm")
signature_method = self._find(signed_info, "SignatureMethod")
signature_value = self._find(signature, "SignatureValue")
signature_alg = signature_method.get("Algorithm")
raw_signature = b64decode(signature_value.text)
x509_data = signature.find("ds:KeyInfo/ds:X509Data", namespaces=namespaces)
signed_info_c14n = self._c14n(signed_info, algorithm=c14n_algorithm)
if x509_data is not None or self.require_x509:
from OpenSSL.crypto import load_certificate, X509, FILETYPE_PEM, verify, Error as OpenSSLCryptoError
if self.x509_cert is None:
if x509_data is None:
raise InvalidInput("Expected a X.509 certificate based signature")
certs = [cert.text for cert in self._findall(x509_data, "X509Certificate")]
if not certs:
msg = "Expected to find an X509Certificate element in the signature"
msg += " (X509SubjectName, X509SKI are not supported)"
raise InvalidInput(msg)
cert_chain = [load_certificate(FILETYPE_PEM, add_pem_header(cert)) for cert in certs]
signing_cert = verify_x509_cert_chain(cert_chain, ca_pem_file=ca_pem_file, ca_path=ca_path)
elif isinstance(self.x509_cert, X509):
signing_cert = self.x509_cert
else:
signing_cert = load_certificate(FILETYPE_PEM, add_pem_header(self.x509_cert))
if cert_subject_name and signing_cert.get_subject().commonName != cert_subject_name:
raise InvalidSignature("Certificate subject common name mismatch")
signature_digest_method = self._get_signature_digest_method(signature_alg).name
try:
verify(signing_cert, raw_signature, signed_info_c14n, signature_digest_method)
except OpenSSLCryptoError as e:
try:
lib, func, reason = e.args[0][0]
except Exception:
reason = e
raise InvalidSignature("Signature verification failed: {}".format(reason))
# TODO: CN verification goes here
# TODO: require one of the following to be set: either x509_cert or (ca_pem_file or ca_path) or common_name
# Use ssl.match_hostname or code from it to perform match
elif "hmac-sha" in signature_alg:
if self.hmac_key is None:
raise InvalidInput('Parameter "hmac_key" is required when verifying a HMAC signature')
from cryptography.hazmat.primitives.hmac import HMAC
signer = HMAC(key=ensure_bytes(self.hmac_key),
algorithm=self._get_hmac_digest_method(signature_alg),
backend=default_backend())
signer.update(signed_info_c14n)
if raw_signature != signer.finalize():
raise InvalidSignature("Signature mismatch (HMAC)")
else:
key_value = signature.find("ds:KeyInfo/ds:KeyValue", namespaces=namespaces)
if key_value is None:
raise InvalidInput("Expected to find either KeyValue or X509Data XML element in KeyInfo")
self._verify_signature_with_pubkey(signed_info_c14n, raw_signature, key_value, signature_alg)
verify_results = []
for reference in self._findall(signed_info, "Reference"):
transforms = self._find(reference, "Transforms", require=False)
digest_algorithm = self._find(reference, "DigestMethod").get("Algorithm")
digest_value = self._find(reference, "DigestValue")
payload = self._resolve_reference(root, reference, uri_resolver=uri_resolver)
payload_c14n = self._apply_transforms(payload, transforms, signature_ref, c14n_algorithm)
if digest_value.text != self._get_digest(payload_c14n, self._get_digest_method(digest_algorithm)):
raise InvalidDigest("Digest mismatch for reference {}".format(len(verify_results)))
# We return the signed XML (and only that) to ensure no access to unsigned data happens
try:
payload_c14n_xml = fromstring(payload_c14n)
except etree.XMLSyntaxError:
payload_c14n_xml = None
verify_results.append(VerifyResult(payload_c14n, payload_c14n_xml, signature))
if type(expect_references) is int and len(verify_results) != expect_references:
msg = "Expected to find {} references, but found {}"
raise InvalidSignature(msg.format(expect_references, len(verify_results)))
return verify_results if expect_references > 1 else verify_results[0] | python | def verify(self, data, require_x509=True, x509_cert=None, cert_subject_name=None, ca_pem_file=None, ca_path=None,
hmac_key=None, validate_schema=True, parser=None, uri_resolver=None, id_attribute=None,
expect_references=1):
self.hmac_key = hmac_key
self.require_x509 = require_x509
self.x509_cert = x509_cert
self._parser = parser
if x509_cert:
self.require_x509 = True
if id_attribute is not None:
self.id_attributes = (id_attribute, )
root = self.get_root(data)
if root.tag == ds_tag("Signature"):
signature_ref = root
else:
signature_ref = self._find(root, "Signature", anywhere=True)
signature = fromstring(etree.tostring(signature_ref), parser=parser)
if validate_schema:
self.schema().assertValid(signature)
signed_info = self._find(signature, "SignedInfo")
c14n_method = self._find(signed_info, "CanonicalizationMethod")
c14n_algorithm = c14n_method.get("Algorithm")
signature_method = self._find(signed_info, "SignatureMethod")
signature_value = self._find(signature, "SignatureValue")
signature_alg = signature_method.get("Algorithm")
raw_signature = b64decode(signature_value.text)
x509_data = signature.find("ds:KeyInfo/ds:X509Data", namespaces=namespaces)
signed_info_c14n = self._c14n(signed_info, algorithm=c14n_algorithm)
if x509_data is not None or self.require_x509:
from OpenSSL.crypto import load_certificate, X509, FILETYPE_PEM, verify, Error as OpenSSLCryptoError
if self.x509_cert is None:
if x509_data is None:
raise InvalidInput("Expected a X.509 certificate based signature")
certs = [cert.text for cert in self._findall(x509_data, "X509Certificate")]
if not certs:
msg = "Expected to find an X509Certificate element in the signature"
msg += " (X509SubjectName, X509SKI are not supported)"
raise InvalidInput(msg)
cert_chain = [load_certificate(FILETYPE_PEM, add_pem_header(cert)) for cert in certs]
signing_cert = verify_x509_cert_chain(cert_chain, ca_pem_file=ca_pem_file, ca_path=ca_path)
elif isinstance(self.x509_cert, X509):
signing_cert = self.x509_cert
else:
signing_cert = load_certificate(FILETYPE_PEM, add_pem_header(self.x509_cert))
if cert_subject_name and signing_cert.get_subject().commonName != cert_subject_name:
raise InvalidSignature("Certificate subject common name mismatch")
signature_digest_method = self._get_signature_digest_method(signature_alg).name
try:
verify(signing_cert, raw_signature, signed_info_c14n, signature_digest_method)
except OpenSSLCryptoError as e:
try:
lib, func, reason = e.args[0][0]
except Exception:
reason = e
raise InvalidSignature("Signature verification failed: {}".format(reason))
elif "hmac-sha" in signature_alg:
if self.hmac_key is None:
raise InvalidInput('Parameter "hmac_key" is required when verifying a HMAC signature')
from cryptography.hazmat.primitives.hmac import HMAC
signer = HMAC(key=ensure_bytes(self.hmac_key),
algorithm=self._get_hmac_digest_method(signature_alg),
backend=default_backend())
signer.update(signed_info_c14n)
if raw_signature != signer.finalize():
raise InvalidSignature("Signature mismatch (HMAC)")
else:
key_value = signature.find("ds:KeyInfo/ds:KeyValue", namespaces=namespaces)
if key_value is None:
raise InvalidInput("Expected to find either KeyValue or X509Data XML element in KeyInfo")
self._verify_signature_with_pubkey(signed_info_c14n, raw_signature, key_value, signature_alg)
verify_results = []
for reference in self._findall(signed_info, "Reference"):
transforms = self._find(reference, "Transforms", require=False)
digest_algorithm = self._find(reference, "DigestMethod").get("Algorithm")
digest_value = self._find(reference, "DigestValue")
payload = self._resolve_reference(root, reference, uri_resolver=uri_resolver)
payload_c14n = self._apply_transforms(payload, transforms, signature_ref, c14n_algorithm)
if digest_value.text != self._get_digest(payload_c14n, self._get_digest_method(digest_algorithm)):
raise InvalidDigest("Digest mismatch for reference {}".format(len(verify_results)))
try:
payload_c14n_xml = fromstring(payload_c14n)
except etree.XMLSyntaxError:
payload_c14n_xml = None
verify_results.append(VerifyResult(payload_c14n, payload_c14n_xml, signature))
if type(expect_references) is int and len(verify_results) != expect_references:
msg = "Expected to find {} references, but found {}"
raise InvalidSignature(msg.format(expect_references, len(verify_results)))
return verify_results if expect_references > 1 else verify_results[0] | [
"def",
"verify",
"(",
"self",
",",
"data",
",",
"require_x509",
"=",
"True",
",",
"x509_cert",
"=",
"None",
",",
"cert_subject_name",
"=",
"None",
",",
"ca_pem_file",
"=",
"None",
",",
"ca_path",
"=",
"None",
",",
"hmac_key",
"=",
"None",
",",
"validate_schema",
"=",
"True",
",",
"parser",
"=",
"None",
",",
"uri_resolver",
"=",
"None",
",",
"id_attribute",
"=",
"None",
",",
"expect_references",
"=",
"1",
")",
":",
"self",
".",
"hmac_key",
"=",
"hmac_key",
"self",
".",
"require_x509",
"=",
"require_x509",
"self",
".",
"x509_cert",
"=",
"x509_cert",
"self",
".",
"_parser",
"=",
"parser",
"if",
"x509_cert",
":",
"self",
".",
"require_x509",
"=",
"True",
"if",
"id_attribute",
"is",
"not",
"None",
":",
"self",
".",
"id_attributes",
"=",
"(",
"id_attribute",
",",
")",
"root",
"=",
"self",
".",
"get_root",
"(",
"data",
")",
"if",
"root",
".",
"tag",
"==",
"ds_tag",
"(",
"\"Signature\"",
")",
":",
"signature_ref",
"=",
"root",
"else",
":",
"signature_ref",
"=",
"self",
".",
"_find",
"(",
"root",
",",
"\"Signature\"",
",",
"anywhere",
"=",
"True",
")",
"# HACK: deep copy won't keep root's namespaces",
"signature",
"=",
"fromstring",
"(",
"etree",
".",
"tostring",
"(",
"signature_ref",
")",
",",
"parser",
"=",
"parser",
")",
"if",
"validate_schema",
":",
"self",
".",
"schema",
"(",
")",
".",
"assertValid",
"(",
"signature",
")",
"signed_info",
"=",
"self",
".",
"_find",
"(",
"signature",
",",
"\"SignedInfo\"",
")",
"c14n_method",
"=",
"self",
".",
"_find",
"(",
"signed_info",
",",
"\"CanonicalizationMethod\"",
")",
"c14n_algorithm",
"=",
"c14n_method",
".",
"get",
"(",
"\"Algorithm\"",
")",
"signature_method",
"=",
"self",
".",
"_find",
"(",
"signed_info",
",",
"\"SignatureMethod\"",
")",
"signature_value",
"=",
"self",
".",
"_find",
"(",
"signature",
",",
"\"SignatureValue\"",
")",
"signature_alg",
"=",
"signature_method",
".",
"get",
"(",
"\"Algorithm\"",
")",
"raw_signature",
"=",
"b64decode",
"(",
"signature_value",
".",
"text",
")",
"x509_data",
"=",
"signature",
".",
"find",
"(",
"\"ds:KeyInfo/ds:X509Data\"",
",",
"namespaces",
"=",
"namespaces",
")",
"signed_info_c14n",
"=",
"self",
".",
"_c14n",
"(",
"signed_info",
",",
"algorithm",
"=",
"c14n_algorithm",
")",
"if",
"x509_data",
"is",
"not",
"None",
"or",
"self",
".",
"require_x509",
":",
"from",
"OpenSSL",
".",
"crypto",
"import",
"load_certificate",
",",
"X509",
",",
"FILETYPE_PEM",
",",
"verify",
",",
"Error",
"as",
"OpenSSLCryptoError",
"if",
"self",
".",
"x509_cert",
"is",
"None",
":",
"if",
"x509_data",
"is",
"None",
":",
"raise",
"InvalidInput",
"(",
"\"Expected a X.509 certificate based signature\"",
")",
"certs",
"=",
"[",
"cert",
".",
"text",
"for",
"cert",
"in",
"self",
".",
"_findall",
"(",
"x509_data",
",",
"\"X509Certificate\"",
")",
"]",
"if",
"not",
"certs",
":",
"msg",
"=",
"\"Expected to find an X509Certificate element in the signature\"",
"msg",
"+=",
"\" (X509SubjectName, X509SKI are not supported)\"",
"raise",
"InvalidInput",
"(",
"msg",
")",
"cert_chain",
"=",
"[",
"load_certificate",
"(",
"FILETYPE_PEM",
",",
"add_pem_header",
"(",
"cert",
")",
")",
"for",
"cert",
"in",
"certs",
"]",
"signing_cert",
"=",
"verify_x509_cert_chain",
"(",
"cert_chain",
",",
"ca_pem_file",
"=",
"ca_pem_file",
",",
"ca_path",
"=",
"ca_path",
")",
"elif",
"isinstance",
"(",
"self",
".",
"x509_cert",
",",
"X509",
")",
":",
"signing_cert",
"=",
"self",
".",
"x509_cert",
"else",
":",
"signing_cert",
"=",
"load_certificate",
"(",
"FILETYPE_PEM",
",",
"add_pem_header",
"(",
"self",
".",
"x509_cert",
")",
")",
"if",
"cert_subject_name",
"and",
"signing_cert",
".",
"get_subject",
"(",
")",
".",
"commonName",
"!=",
"cert_subject_name",
":",
"raise",
"InvalidSignature",
"(",
"\"Certificate subject common name mismatch\"",
")",
"signature_digest_method",
"=",
"self",
".",
"_get_signature_digest_method",
"(",
"signature_alg",
")",
".",
"name",
"try",
":",
"verify",
"(",
"signing_cert",
",",
"raw_signature",
",",
"signed_info_c14n",
",",
"signature_digest_method",
")",
"except",
"OpenSSLCryptoError",
"as",
"e",
":",
"try",
":",
"lib",
",",
"func",
",",
"reason",
"=",
"e",
".",
"args",
"[",
"0",
"]",
"[",
"0",
"]",
"except",
"Exception",
":",
"reason",
"=",
"e",
"raise",
"InvalidSignature",
"(",
"\"Signature verification failed: {}\"",
".",
"format",
"(",
"reason",
")",
")",
"# TODO: CN verification goes here",
"# TODO: require one of the following to be set: either x509_cert or (ca_pem_file or ca_path) or common_name",
"# Use ssl.match_hostname or code from it to perform match",
"elif",
"\"hmac-sha\"",
"in",
"signature_alg",
":",
"if",
"self",
".",
"hmac_key",
"is",
"None",
":",
"raise",
"InvalidInput",
"(",
"'Parameter \"hmac_key\" is required when verifying a HMAC signature'",
")",
"from",
"cryptography",
".",
"hazmat",
".",
"primitives",
".",
"hmac",
"import",
"HMAC",
"signer",
"=",
"HMAC",
"(",
"key",
"=",
"ensure_bytes",
"(",
"self",
".",
"hmac_key",
")",
",",
"algorithm",
"=",
"self",
".",
"_get_hmac_digest_method",
"(",
"signature_alg",
")",
",",
"backend",
"=",
"default_backend",
"(",
")",
")",
"signer",
".",
"update",
"(",
"signed_info_c14n",
")",
"if",
"raw_signature",
"!=",
"signer",
".",
"finalize",
"(",
")",
":",
"raise",
"InvalidSignature",
"(",
"\"Signature mismatch (HMAC)\"",
")",
"else",
":",
"key_value",
"=",
"signature",
".",
"find",
"(",
"\"ds:KeyInfo/ds:KeyValue\"",
",",
"namespaces",
"=",
"namespaces",
")",
"if",
"key_value",
"is",
"None",
":",
"raise",
"InvalidInput",
"(",
"\"Expected to find either KeyValue or X509Data XML element in KeyInfo\"",
")",
"self",
".",
"_verify_signature_with_pubkey",
"(",
"signed_info_c14n",
",",
"raw_signature",
",",
"key_value",
",",
"signature_alg",
")",
"verify_results",
"=",
"[",
"]",
"for",
"reference",
"in",
"self",
".",
"_findall",
"(",
"signed_info",
",",
"\"Reference\"",
")",
":",
"transforms",
"=",
"self",
".",
"_find",
"(",
"reference",
",",
"\"Transforms\"",
",",
"require",
"=",
"False",
")",
"digest_algorithm",
"=",
"self",
".",
"_find",
"(",
"reference",
",",
"\"DigestMethod\"",
")",
".",
"get",
"(",
"\"Algorithm\"",
")",
"digest_value",
"=",
"self",
".",
"_find",
"(",
"reference",
",",
"\"DigestValue\"",
")",
"payload",
"=",
"self",
".",
"_resolve_reference",
"(",
"root",
",",
"reference",
",",
"uri_resolver",
"=",
"uri_resolver",
")",
"payload_c14n",
"=",
"self",
".",
"_apply_transforms",
"(",
"payload",
",",
"transforms",
",",
"signature_ref",
",",
"c14n_algorithm",
")",
"if",
"digest_value",
".",
"text",
"!=",
"self",
".",
"_get_digest",
"(",
"payload_c14n",
",",
"self",
".",
"_get_digest_method",
"(",
"digest_algorithm",
")",
")",
":",
"raise",
"InvalidDigest",
"(",
"\"Digest mismatch for reference {}\"",
".",
"format",
"(",
"len",
"(",
"verify_results",
")",
")",
")",
"# We return the signed XML (and only that) to ensure no access to unsigned data happens",
"try",
":",
"payload_c14n_xml",
"=",
"fromstring",
"(",
"payload_c14n",
")",
"except",
"etree",
".",
"XMLSyntaxError",
":",
"payload_c14n_xml",
"=",
"None",
"verify_results",
".",
"append",
"(",
"VerifyResult",
"(",
"payload_c14n",
",",
"payload_c14n_xml",
",",
"signature",
")",
")",
"if",
"type",
"(",
"expect_references",
")",
"is",
"int",
"and",
"len",
"(",
"verify_results",
")",
"!=",
"expect_references",
":",
"msg",
"=",
"\"Expected to find {} references, but found {}\"",
"raise",
"InvalidSignature",
"(",
"msg",
".",
"format",
"(",
"expect_references",
",",
"len",
"(",
"verify_results",
")",
")",
")",
"return",
"verify_results",
"if",
"expect_references",
">",
"1",
"else",
"verify_results",
"[",
"0",
"]"
]
| Verify the XML signature supplied in the data and return the XML node signed by the signature, or raise an
exception if the signature is not valid. By default, this requires the signature to be generated using a valid
X.509 certificate. To enable other means of signature validation, set the **require_x509** argument to `False`.
.. admonition:: See what is signed
It is important to understand and follow the best practice rule of "See what is signed" when verifying XML
signatures. The gist of this rule is: if your application neglects to verify that the information it trusts is
what was actually signed, the attacker can supply a valid signature but point you to malicious data that wasn't
signed by that signature.
In SignXML, you can ensure that the information signed is what you expect to be signed by only trusting the
data returned by the ``verify()`` method. The return value is the XML node or string that was signed. Also,
depending on the signature settings used, comments in the XML data may not be subject to signing, so may need
to be untrusted.
**Recommended reading:** http://www.w3.org/TR/xmldsig-bestpractices/#practices-applications
.. admonition:: Establish trust
If you do not supply any keyword arguments to ``verify()``, the default behavior is to trust **any** valid XML
signature generated using a valid X.509 certificate trusted by your system's CA store. This means anyone can
get an SSL certificate and generate a signature that you will trust. To establish trust in the signer, use the
``x509_cert`` argument to specify a certificate that was pre-shared out-of-band (e.g. via SAML metadata, as
shown in :ref:`Verifying SAML assertions <verifying-saml-assertions>`), or ``cert_subject_name`` to specify a
subject name that must be in the signing X.509 certificate given by the signature (verified as if it were a
domain name), or ``ca_pem_file``/``ca_path`` to give a custom CA.
:param data: Signature data to verify
:type data: String, file-like object, or XML ElementTree Element API compatible object
:param require_x509:
If ``True``, a valid X.509 certificate-based signature with an established chain of trust is required to
pass validation. If ``False``, other types of valid signatures (e.g. HMAC or RSA public key) are accepted.
:type require_x509: boolean
:param x509_cert:
A trusted external X.509 certificate, given as a PEM-formatted string or OpenSSL.crypto.X509 object, to use
for verification. Overrides any X.509 certificate information supplied by the signature. If left set to
``None``, requires that the signature supply a valid X.509 certificate chain that validates against the
known certificate authorities. Implies **require_x509=True**.
:type x509_cert: string or OpenSSL.crypto.X509
:param ca_pem_file:
Filename of a PEM file containing certificate authority information to use when verifying certificate-based
signatures.
:type ca_pem_file: string or bytes
:param ca_path:
Path to a directory containing PEM-formatted certificate authority files to use when verifying
certificate-based signatures. If neither **ca_pem_file** nor **ca_path** is given, the Mozilla CA bundle
provided by :py:mod:`certifi` will be loaded.
:type ca_path: string
:param cert_subject_name:
Subject Common Name to check the signing X.509 certificate against. Implies **require_x509=True**.
:type cert_subject_name: string
:param hmac_key: If using HMAC, a string containing the shared secret.
:type hmac_key: string
:param validate_schema: Whether to validate **data** against the XML Signature schema.
:type validate_schema: boolean
:param parser: Custom XML parser instance to use when parsing **data**.
:type parser: :py:class:`lxml.etree.XMLParser` compatible parser
:param uri_resolver: Function to use to resolve reference URIs that don't start with "#".
:type uri_resolver: callable
:param id_attribute:
Name of the attribute whose value ``URI`` refers to. By default, SignXML will search for "Id", then "ID".
:type id_attribute: string
:param expect_references:
Number of references to expect in the signature. If this is not 1, an array of VerifyResults is returned.
If set to a non-integer, any number of references is accepted (otherwise a mismatch raises an error).
:type expect_references: int or boolean
:raises: :py:class:`cryptography.exceptions.InvalidSignature`
:returns: VerifyResult object with the signed data, signed xml and signature xml
:rtype: VerifyResult | [
"Verify",
"the",
"XML",
"signature",
"supplied",
"in",
"the",
"data",
"and",
"return",
"the",
"XML",
"node",
"signed",
"by",
"the",
"signature",
"or",
"raise",
"an",
"exception",
"if",
"the",
"signature",
"is",
"not",
"valid",
".",
"By",
"default",
"this",
"requires",
"the",
"signature",
"to",
"be",
"generated",
"using",
"a",
"valid",
"X",
".",
"509",
"certificate",
".",
"To",
"enable",
"other",
"means",
"of",
"signature",
"validation",
"set",
"the",
"**",
"require_x509",
"**",
"argument",
"to",
"False",
"."
]
| train | https://github.com/XML-Security/signxml/blob/16503242617e9b25e5c2c9ced5ef18a06ffde146/signxml/__init__.py#L596-L779 |
cenkalti/github-flask | flask_github.py | GitHub.authorize | def authorize(self, scope=None, redirect_uri=None, state=None):
"""
Redirect to GitHub and request access to a user's data.
:param scope: List of `Scopes`_ for which to request access, formatted
as a string or comma delimited list of scopes as a
string. Defaults to ``None``, resulting in granting
read-only access to public information (includes public
user profile info, public repository info, and gists).
For more information on this, see the examples in
presented in the GitHub API `Scopes`_ documentation, or
see the examples provided below.
:type scope: str
:param redirect_uri: `Redirect URL`_ to which to redirect the user
after authentication. Defaults to ``None``,
resulting in using the default redirect URL for
the OAuth application as defined in GitHub. This
URL can differ from the callback URL defined in
your GitHub application, however it must be a
subdirectory of the specified callback URL,
otherwise raises a :class:`GitHubError`. For more
information on this, see the examples in presented
in the GitHub API `Redirect URL`_ documentation,
or see the example provided below.
:type redirect_uri: str
:param state: An unguessable random string. It is used to protect
against cross-site request forgery attacks.
:type state: str
For example, if we wanted to use this method to get read/write access
to user profile information, in addition to read-write access to code,
commit status, etc., we would need to use the `Scopes`_ ``user`` and
``repo`` when calling this method.
.. code-block:: python
github.authorize(scope="user,repo")
Additionally, if we wanted to specify a different redirect URL
following authorization.
.. code-block:: python
# Our application's callback URL is "http://example.com/callback"
redirect_uri="http://example.com/callback/my/path"
github.authorize(scope="user,repo", redirect_uri=redirect_uri)
.. _Scopes: https://developer.github.com/v3/oauth/#scopes
.. _Redirect URL: https://developer.github.com/v3/oauth/#redirect-urls
"""
_logger.debug("Called authorize()")
params = {'client_id': self.client_id}
if scope:
params['scope'] = scope
if redirect_uri:
params['redirect_uri'] = redirect_uri
if state:
params['state'] = state
url = self.auth_url + 'authorize?' + urlencode(params)
_logger.debug("Redirecting to %s", url)
return redirect(url) | python | def authorize(self, scope=None, redirect_uri=None, state=None):
_logger.debug("Called authorize()")
params = {'client_id': self.client_id}
if scope:
params['scope'] = scope
if redirect_uri:
params['redirect_uri'] = redirect_uri
if state:
params['state'] = state
url = self.auth_url + 'authorize?' + urlencode(params)
_logger.debug("Redirecting to %s", url)
return redirect(url) | [
"def",
"authorize",
"(",
"self",
",",
"scope",
"=",
"None",
",",
"redirect_uri",
"=",
"None",
",",
"state",
"=",
"None",
")",
":",
"_logger",
".",
"debug",
"(",
"\"Called authorize()\"",
")",
"params",
"=",
"{",
"'client_id'",
":",
"self",
".",
"client_id",
"}",
"if",
"scope",
":",
"params",
"[",
"'scope'",
"]",
"=",
"scope",
"if",
"redirect_uri",
":",
"params",
"[",
"'redirect_uri'",
"]",
"=",
"redirect_uri",
"if",
"state",
":",
"params",
"[",
"'state'",
"]",
"=",
"state",
"url",
"=",
"self",
".",
"auth_url",
"+",
"'authorize?'",
"+",
"urlencode",
"(",
"params",
")",
"_logger",
".",
"debug",
"(",
"\"Redirecting to %s\"",
",",
"url",
")",
"return",
"redirect",
"(",
"url",
")"
]
| Redirect to GitHub and request access to a user's data.
:param scope: List of `Scopes`_ for which to request access, formatted
as a string or comma delimited list of scopes as a
string. Defaults to ``None``, resulting in granting
read-only access to public information (includes public
user profile info, public repository info, and gists).
For more information on this, see the examples in
presented in the GitHub API `Scopes`_ documentation, or
see the examples provided below.
:type scope: str
:param redirect_uri: `Redirect URL`_ to which to redirect the user
after authentication. Defaults to ``None``,
resulting in using the default redirect URL for
the OAuth application as defined in GitHub. This
URL can differ from the callback URL defined in
your GitHub application, however it must be a
subdirectory of the specified callback URL,
otherwise raises a :class:`GitHubError`. For more
information on this, see the examples in presented
in the GitHub API `Redirect URL`_ documentation,
or see the example provided below.
:type redirect_uri: str
:param state: An unguessable random string. It is used to protect
against cross-site request forgery attacks.
:type state: str
For example, if we wanted to use this method to get read/write access
to user profile information, in addition to read-write access to code,
commit status, etc., we would need to use the `Scopes`_ ``user`` and
``repo`` when calling this method.
.. code-block:: python
github.authorize(scope="user,repo")
Additionally, if we wanted to specify a different redirect URL
following authorization.
.. code-block:: python
# Our application's callback URL is "http://example.com/callback"
redirect_uri="http://example.com/callback/my/path"
github.authorize(scope="user,repo", redirect_uri=redirect_uri)
.. _Scopes: https://developer.github.com/v3/oauth/#scopes
.. _Redirect URL: https://developer.github.com/v3/oauth/#redirect-urls | [
"Redirect",
"to",
"GitHub",
"and",
"request",
"access",
"to",
"a",
"user",
"s",
"data",
"."
]
| train | https://github.com/cenkalti/github-flask/blob/9f58d61b7d328cef857edbb5c64a5d3f716367cb/flask_github.py#L104-L168 |
cenkalti/github-flask | flask_github.py | GitHub.authorized_handler | def authorized_handler(self, f):
"""
Decorator for the route that is used as the callback for authorizing
with GitHub. This callback URL can be set in the settings for the app
or passed in during authorization.
"""
@wraps(f)
def decorated(*args, **kwargs):
if 'code' in request.args:
data = self._handle_response()
else:
data = self._handle_invalid_response()
return f(*((data,) + args), **kwargs)
return decorated | python | def authorized_handler(self, f):
@wraps(f)
def decorated(*args, **kwargs):
if 'code' in request.args:
data = self._handle_response()
else:
data = self._handle_invalid_response()
return f(*((data,) + args), **kwargs)
return decorated | [
"def",
"authorized_handler",
"(",
"self",
",",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"decorated",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'code'",
"in",
"request",
".",
"args",
":",
"data",
"=",
"self",
".",
"_handle_response",
"(",
")",
"else",
":",
"data",
"=",
"self",
".",
"_handle_invalid_response",
"(",
")",
"return",
"f",
"(",
"*",
"(",
"(",
"data",
",",
")",
"+",
"args",
")",
",",
"*",
"*",
"kwargs",
")",
"return",
"decorated"
]
| Decorator for the route that is used as the callback for authorizing
with GitHub. This callback URL can be set in the settings for the app
or passed in during authorization. | [
"Decorator",
"for",
"the",
"route",
"that",
"is",
"used",
"as",
"the",
"callback",
"for",
"authorizing",
"with",
"GitHub",
".",
"This",
"callback",
"URL",
"can",
"be",
"set",
"in",
"the",
"settings",
"for",
"the",
"app",
"or",
"passed",
"in",
"during",
"authorization",
"."
]
| train | https://github.com/cenkalti/github-flask/blob/9f58d61b7d328cef857edbb5c64a5d3f716367cb/flask_github.py#L170-L184 |
cenkalti/github-flask | flask_github.py | GitHub._handle_response | def _handle_response(self):
"""
Handles response after the redirect to GitHub. This response
determines if the user has allowed the this application access. If we
were then we send a POST request for the access_key used to
authenticate requests to GitHub.
"""
_logger.debug("Handling response from GitHub")
params = {
'code': request.args.get('code'),
'client_id': self.client_id,
'client_secret': self.client_secret
}
url = self.auth_url + 'access_token'
_logger.debug("POSTing to %s", url)
_logger.debug(params)
response = self.session.post(url, data=params)
data = parse_qs(response.content)
_logger.debug("response.content = %s", data)
for k, v in data.items():
if len(v) == 1:
data[k] = v[0]
token = data.get(b'access_token', None)
if token is not None:
token = token.decode('ascii')
return token | python | def _handle_response(self):
_logger.debug("Handling response from GitHub")
params = {
'code': request.args.get('code'),
'client_id': self.client_id,
'client_secret': self.client_secret
}
url = self.auth_url + 'access_token'
_logger.debug("POSTing to %s", url)
_logger.debug(params)
response = self.session.post(url, data=params)
data = parse_qs(response.content)
_logger.debug("response.content = %s", data)
for k, v in data.items():
if len(v) == 1:
data[k] = v[0]
token = data.get(b'access_token', None)
if token is not None:
token = token.decode('ascii')
return token | [
"def",
"_handle_response",
"(",
"self",
")",
":",
"_logger",
".",
"debug",
"(",
"\"Handling response from GitHub\"",
")",
"params",
"=",
"{",
"'code'",
":",
"request",
".",
"args",
".",
"get",
"(",
"'code'",
")",
",",
"'client_id'",
":",
"self",
".",
"client_id",
",",
"'client_secret'",
":",
"self",
".",
"client_secret",
"}",
"url",
"=",
"self",
".",
"auth_url",
"+",
"'access_token'",
"_logger",
".",
"debug",
"(",
"\"POSTing to %s\"",
",",
"url",
")",
"_logger",
".",
"debug",
"(",
"params",
")",
"response",
"=",
"self",
".",
"session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"params",
")",
"data",
"=",
"parse_qs",
"(",
"response",
".",
"content",
")",
"_logger",
".",
"debug",
"(",
"\"response.content = %s\"",
",",
"data",
")",
"for",
"k",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"v",
")",
"==",
"1",
":",
"data",
"[",
"k",
"]",
"=",
"v",
"[",
"0",
"]",
"token",
"=",
"data",
".",
"get",
"(",
"b'access_token'",
",",
"None",
")",
"if",
"token",
"is",
"not",
"None",
":",
"token",
"=",
"token",
".",
"decode",
"(",
"'ascii'",
")",
"return",
"token"
]
| Handles response after the redirect to GitHub. This response
determines if the user has allowed the this application access. If we
were then we send a POST request for the access_key used to
authenticate requests to GitHub. | [
"Handles",
"response",
"after",
"the",
"redirect",
"to",
"GitHub",
".",
"This",
"response",
"determines",
"if",
"the",
"user",
"has",
"allowed",
"the",
"this",
"application",
"access",
".",
"If",
"we",
"were",
"then",
"we",
"send",
"a",
"POST",
"request",
"for",
"the",
"access_key",
"used",
"to",
"authenticate",
"requests",
"to",
"GitHub",
"."
]
| train | https://github.com/cenkalti/github-flask/blob/9f58d61b7d328cef857edbb5c64a5d3f716367cb/flask_github.py#L186-L212 |
cenkalti/github-flask | flask_github.py | GitHub.raw_request | def raw_request(self, method, resource, access_token=None, **kwargs):
"""
Makes a HTTP request and returns the raw
:class:`~requests.Response` object.
"""
headers = self._pop_headers(kwargs)
headers['Authorization'] = self._get_authorization_header(access_token)
url = self._get_resource_url(resource)
return self.session.request(method, url, allow_redirects=True, headers=headers, **kwargs) | python | def raw_request(self, method, resource, access_token=None, **kwargs):
headers = self._pop_headers(kwargs)
headers['Authorization'] = self._get_authorization_header(access_token)
url = self._get_resource_url(resource)
return self.session.request(method, url, allow_redirects=True, headers=headers, **kwargs) | [
"def",
"raw_request",
"(",
"self",
",",
"method",
",",
"resource",
",",
"access_token",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"headers",
"=",
"self",
".",
"_pop_headers",
"(",
"kwargs",
")",
"headers",
"[",
"'Authorization'",
"]",
"=",
"self",
".",
"_get_authorization_header",
"(",
"access_token",
")",
"url",
"=",
"self",
".",
"_get_resource_url",
"(",
"resource",
")",
"return",
"self",
".",
"session",
".",
"request",
"(",
"method",
",",
"url",
",",
"allow_redirects",
"=",
"True",
",",
"headers",
"=",
"headers",
",",
"*",
"*",
"kwargs",
")"
]
| Makes a HTTP request and returns the raw
:class:`~requests.Response` object. | [
"Makes",
"a",
"HTTP",
"request",
"and",
"returns",
"the",
"raw",
":",
"class",
":",
"~requests",
".",
"Response",
"object",
"."
]
| train | https://github.com/cenkalti/github-flask/blob/9f58d61b7d328cef857edbb5c64a5d3f716367cb/flask_github.py#L217-L226 |
cenkalti/github-flask | flask_github.py | GitHub.request | def request(self, method, resource, all_pages=False, **kwargs):
"""
Makes a request to the given endpoint.
Keyword arguments are passed to the :meth:`~requests.request` method.
If the content type of the response is JSON, it will be decoded
automatically and a dictionary will be returned.
Otherwise the :class:`~requests.Response` object is returned.
"""
response = self.raw_request(method, resource, **kwargs)
if not is_valid_response(response):
raise GitHubError(response)
if is_json_response(response):
result = response.json()
while all_pages and response.links.get('next'):
url = response.links['next']['url']
response = self.raw_request(method, url, **kwargs)
if not is_valid_response(response) or \
not is_json_response(response):
raise GitHubError(response)
body = response.json()
if isinstance(body, list):
result += body
elif isinstance(body, dict) and 'items' in body:
result['items'] += body['items']
else:
raise GitHubError(response)
return result
else:
return response | python | def request(self, method, resource, all_pages=False, **kwargs):
response = self.raw_request(method, resource, **kwargs)
if not is_valid_response(response):
raise GitHubError(response)
if is_json_response(response):
result = response.json()
while all_pages and response.links.get('next'):
url = response.links['next']['url']
response = self.raw_request(method, url, **kwargs)
if not is_valid_response(response) or \
not is_json_response(response):
raise GitHubError(response)
body = response.json()
if isinstance(body, list):
result += body
elif isinstance(body, dict) and 'items' in body:
result['items'] += body['items']
else:
raise GitHubError(response)
return result
else:
return response | [
"def",
"request",
"(",
"self",
",",
"method",
",",
"resource",
",",
"all_pages",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"=",
"self",
".",
"raw_request",
"(",
"method",
",",
"resource",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"is_valid_response",
"(",
"response",
")",
":",
"raise",
"GitHubError",
"(",
"response",
")",
"if",
"is_json_response",
"(",
"response",
")",
":",
"result",
"=",
"response",
".",
"json",
"(",
")",
"while",
"all_pages",
"and",
"response",
".",
"links",
".",
"get",
"(",
"'next'",
")",
":",
"url",
"=",
"response",
".",
"links",
"[",
"'next'",
"]",
"[",
"'url'",
"]",
"response",
"=",
"self",
".",
"raw_request",
"(",
"method",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"is_valid_response",
"(",
"response",
")",
"or",
"not",
"is_json_response",
"(",
"response",
")",
":",
"raise",
"GitHubError",
"(",
"response",
")",
"body",
"=",
"response",
".",
"json",
"(",
")",
"if",
"isinstance",
"(",
"body",
",",
"list",
")",
":",
"result",
"+=",
"body",
"elif",
"isinstance",
"(",
"body",
",",
"dict",
")",
"and",
"'items'",
"in",
"body",
":",
"result",
"[",
"'items'",
"]",
"+=",
"body",
"[",
"'items'",
"]",
"else",
":",
"raise",
"GitHubError",
"(",
"response",
")",
"return",
"result",
"else",
":",
"return",
"response"
]
| Makes a request to the given endpoint.
Keyword arguments are passed to the :meth:`~requests.request` method.
If the content type of the response is JSON, it will be decoded
automatically and a dictionary will be returned.
Otherwise the :class:`~requests.Response` object is returned. | [
"Makes",
"a",
"request",
"to",
"the",
"given",
"endpoint",
".",
"Keyword",
"arguments",
"are",
"passed",
"to",
"the",
":",
"meth",
":",
"~requests",
".",
"request",
"method",
".",
"If",
"the",
"content",
"type",
"of",
"the",
"response",
"is",
"JSON",
"it",
"will",
"be",
"decoded",
"automatically",
"and",
"a",
"dictionary",
"will",
"be",
"returned",
".",
"Otherwise",
"the",
":",
"class",
":",
"~requests",
".",
"Response",
"object",
"is",
"returned",
"."
]
| train | https://github.com/cenkalti/github-flask/blob/9f58d61b7d328cef857edbb5c64a5d3f716367cb/flask_github.py#L250-L281 |
cenkalti/github-flask | flask_github.py | GitHub.get | def get(self, resource, params=None, **kwargs):
"""Shortcut for ``request('GET', resource)``."""
return self.request('GET', resource, params=params, **kwargs) | python | def get(self, resource, params=None, **kwargs):
return self.request('GET', resource, params=params, **kwargs) | [
"def",
"get",
"(",
"self",
",",
"resource",
",",
"params",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"request",
"(",
"'GET'",
",",
"resource",
",",
"params",
"=",
"params",
",",
"*",
"*",
"kwargs",
")"
]
| Shortcut for ``request('GET', resource)``. | [
"Shortcut",
"for",
"request",
"(",
"GET",
"resource",
")",
"."
]
| train | https://github.com/cenkalti/github-flask/blob/9f58d61b7d328cef857edbb5c64a5d3f716367cb/flask_github.py#L283-L285 |
cenkalti/github-flask | flask_github.py | GitHub.post | def post(self, resource, data=None, **kwargs):
"""Shortcut for ``request('POST', resource)``.
Use this to make POST request since it will also encode ``data`` to
'application/json' format."""
headers = dict(kwargs.pop('headers', {}))
headers.setdefault('Content-Type', 'application/json')
data = json.dumps(data)
return self.request('POST', resource, headers=headers,
data=data, **kwargs) | python | def post(self, resource, data=None, **kwargs):
headers = dict(kwargs.pop('headers', {}))
headers.setdefault('Content-Type', 'application/json')
data = json.dumps(data)
return self.request('POST', resource, headers=headers,
data=data, **kwargs) | [
"def",
"post",
"(",
"self",
",",
"resource",
",",
"data",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"headers",
"=",
"dict",
"(",
"kwargs",
".",
"pop",
"(",
"'headers'",
",",
"{",
"}",
")",
")",
"headers",
".",
"setdefault",
"(",
"'Content-Type'",
",",
"'application/json'",
")",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
"return",
"self",
".",
"request",
"(",
"'POST'",
",",
"resource",
",",
"headers",
"=",
"headers",
",",
"data",
"=",
"data",
",",
"*",
"*",
"kwargs",
")"
]
| Shortcut for ``request('POST', resource)``.
Use this to make POST request since it will also encode ``data`` to
'application/json' format. | [
"Shortcut",
"for",
"request",
"(",
"POST",
"resource",
")",
".",
"Use",
"this",
"to",
"make",
"POST",
"request",
"since",
"it",
"will",
"also",
"encode",
"data",
"to",
"application",
"/",
"json",
"format",
"."
]
| train | https://github.com/cenkalti/github-flask/blob/9f58d61b7d328cef857edbb5c64a5d3f716367cb/flask_github.py#L287-L295 |
ethereum/pyrlp | rlp/lazy.py | decode_lazy | def decode_lazy(rlp, sedes=None, **sedes_kwargs):
"""Decode an RLP encoded object in a lazy fashion.
If the encoded object is a bytestring, this function acts similar to
:func:`rlp.decode`. If it is a list however, a :class:`LazyList` is
returned instead. This object will decode the string lazily, avoiding
both horizontal and vertical traversing as much as possible.
The way `sedes` is applied depends on the decoded object: If it is a string
`sedes` deserializes it as a whole; if it is a list, each element is
deserialized individually. In both cases, `sedes_kwargs` are passed on.
Note that, if a deserializer is used, only "horizontal" but not
"vertical lazyness" can be preserved.
:param rlp: the RLP string to decode
:param sedes: an object implementing a method ``deserialize(code)`` which
is used as described above, or ``None`` if no
deserialization should be performed
:param \*\*sedes_kwargs: additional keyword arguments that will be passed
to the deserializers
:returns: either the already decoded and deserialized object (if encoded as
a string) or an instance of :class:`rlp.LazyList`
"""
item, end = consume_item_lazy(rlp, 0)
if end != len(rlp):
raise DecodingError('RLP length prefix announced wrong length', rlp)
if isinstance(item, LazyList):
item.sedes = sedes
item.sedes_kwargs = sedes_kwargs
return item
elif sedes:
return sedes.deserialize(item, **sedes_kwargs)
else:
return item | python | def decode_lazy(rlp, sedes=None, **sedes_kwargs):
item, end = consume_item_lazy(rlp, 0)
if end != len(rlp):
raise DecodingError('RLP length prefix announced wrong length', rlp)
if isinstance(item, LazyList):
item.sedes = sedes
item.sedes_kwargs = sedes_kwargs
return item
elif sedes:
return sedes.deserialize(item, **sedes_kwargs)
else:
return item | [
"def",
"decode_lazy",
"(",
"rlp",
",",
"sedes",
"=",
"None",
",",
"*",
"*",
"sedes_kwargs",
")",
":",
"item",
",",
"end",
"=",
"consume_item_lazy",
"(",
"rlp",
",",
"0",
")",
"if",
"end",
"!=",
"len",
"(",
"rlp",
")",
":",
"raise",
"DecodingError",
"(",
"'RLP length prefix announced wrong length'",
",",
"rlp",
")",
"if",
"isinstance",
"(",
"item",
",",
"LazyList",
")",
":",
"item",
".",
"sedes",
"=",
"sedes",
"item",
".",
"sedes_kwargs",
"=",
"sedes_kwargs",
"return",
"item",
"elif",
"sedes",
":",
"return",
"sedes",
".",
"deserialize",
"(",
"item",
",",
"*",
"*",
"sedes_kwargs",
")",
"else",
":",
"return",
"item"
]
| Decode an RLP encoded object in a lazy fashion.
If the encoded object is a bytestring, this function acts similar to
:func:`rlp.decode`. If it is a list however, a :class:`LazyList` is
returned instead. This object will decode the string lazily, avoiding
both horizontal and vertical traversing as much as possible.
The way `sedes` is applied depends on the decoded object: If it is a string
`sedes` deserializes it as a whole; if it is a list, each element is
deserialized individually. In both cases, `sedes_kwargs` are passed on.
Note that, if a deserializer is used, only "horizontal" but not
"vertical lazyness" can be preserved.
:param rlp: the RLP string to decode
:param sedes: an object implementing a method ``deserialize(code)`` which
is used as described above, or ``None`` if no
deserialization should be performed
:param \*\*sedes_kwargs: additional keyword arguments that will be passed
to the deserializers
:returns: either the already decoded and deserialized object (if encoded as
a string) or an instance of :class:`rlp.LazyList` | [
"Decode",
"an",
"RLP",
"encoded",
"object",
"in",
"a",
"lazy",
"fashion",
"."
]
| train | https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/lazy.py#L8-L41 |
ethereum/pyrlp | rlp/lazy.py | consume_item_lazy | def consume_item_lazy(rlp, start):
"""Read an item from an RLP string lazily.
If the length prefix announces a string, the string is read; if it
announces a list, a :class:`LazyList` is created.
:param rlp: the rlp string to read from
:param start: the position at which to start reading
:returns: a tuple ``(item, end)`` where ``item`` is the read string or a
:class:`LazyList` and ``end`` is the position of the first
unprocessed byte.
"""
p, t, l, s = consume_length_prefix(rlp, start)
if t is bytes:
item, _, end = consume_payload(rlp, p, s, bytes, l)
return item, end
else:
assert t is list
return LazyList(rlp, s, s + l), s + l | python | def consume_item_lazy(rlp, start):
p, t, l, s = consume_length_prefix(rlp, start)
if t is bytes:
item, _, end = consume_payload(rlp, p, s, bytes, l)
return item, end
else:
assert t is list
return LazyList(rlp, s, s + l), s + l | [
"def",
"consume_item_lazy",
"(",
"rlp",
",",
"start",
")",
":",
"p",
",",
"t",
",",
"l",
",",
"s",
"=",
"consume_length_prefix",
"(",
"rlp",
",",
"start",
")",
"if",
"t",
"is",
"bytes",
":",
"item",
",",
"_",
",",
"end",
"=",
"consume_payload",
"(",
"rlp",
",",
"p",
",",
"s",
",",
"bytes",
",",
"l",
")",
"return",
"item",
",",
"end",
"else",
":",
"assert",
"t",
"is",
"list",
"return",
"LazyList",
"(",
"rlp",
",",
"s",
",",
"s",
"+",
"l",
")",
",",
"s",
"+",
"l"
]
| Read an item from an RLP string lazily.
If the length prefix announces a string, the string is read; if it
announces a list, a :class:`LazyList` is created.
:param rlp: the rlp string to read from
:param start: the position at which to start reading
:returns: a tuple ``(item, end)`` where ``item`` is the read string or a
:class:`LazyList` and ``end`` is the position of the first
unprocessed byte. | [
"Read",
"an",
"item",
"from",
"an",
"RLP",
"string",
"lazily",
"."
]
| train | https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/lazy.py#L44-L62 |
ethereum/pyrlp | rlp/lazy.py | peek | def peek(rlp, index, sedes=None):
"""Get a specific element from an rlp encoded nested list.
This function uses :func:`rlp.decode_lazy` and, thus, decodes only the
necessary parts of the string.
Usage example::
>>> import rlp
>>> rlpdata = rlp.encode([1, 2, [3, [4, 5]]])
>>> rlp.peek(rlpdata, 0, rlp.sedes.big_endian_int)
1
>>> rlp.peek(rlpdata, [2, 0], rlp.sedes.big_endian_int)
3
:param rlp: the rlp string
:param index: the index of the element to peek at (can be a list for
nested data)
:param sedes: a sedes used to deserialize the peeked at object, or `None`
if no deserialization should be performed
:raises: :exc:`IndexError` if `index` is invalid (out of range or too many
levels)
"""
ll = decode_lazy(rlp)
if not isinstance(index, Iterable):
index = [index]
for i in index:
if isinstance(ll, Atomic):
raise IndexError('Too many indices given')
ll = ll[i]
if sedes:
return sedes.deserialize(ll)
else:
return ll | python | def peek(rlp, index, sedes=None):
ll = decode_lazy(rlp)
if not isinstance(index, Iterable):
index = [index]
for i in index:
if isinstance(ll, Atomic):
raise IndexError('Too many indices given')
ll = ll[i]
if sedes:
return sedes.deserialize(ll)
else:
return ll | [
"def",
"peek",
"(",
"rlp",
",",
"index",
",",
"sedes",
"=",
"None",
")",
":",
"ll",
"=",
"decode_lazy",
"(",
"rlp",
")",
"if",
"not",
"isinstance",
"(",
"index",
",",
"Iterable",
")",
":",
"index",
"=",
"[",
"index",
"]",
"for",
"i",
"in",
"index",
":",
"if",
"isinstance",
"(",
"ll",
",",
"Atomic",
")",
":",
"raise",
"IndexError",
"(",
"'Too many indices given'",
")",
"ll",
"=",
"ll",
"[",
"i",
"]",
"if",
"sedes",
":",
"return",
"sedes",
".",
"deserialize",
"(",
"ll",
")",
"else",
":",
"return",
"ll"
]
| Get a specific element from an rlp encoded nested list.
This function uses :func:`rlp.decode_lazy` and, thus, decodes only the
necessary parts of the string.
Usage example::
>>> import rlp
>>> rlpdata = rlp.encode([1, 2, [3, [4, 5]]])
>>> rlp.peek(rlpdata, 0, rlp.sedes.big_endian_int)
1
>>> rlp.peek(rlpdata, [2, 0], rlp.sedes.big_endian_int)
3
:param rlp: the rlp string
:param index: the index of the element to peek at (can be a list for
nested data)
:param sedes: a sedes used to deserialize the peeked at object, or `None`
if no deserialization should be performed
:raises: :exc:`IndexError` if `index` is invalid (out of range or too many
levels) | [
"Get",
"a",
"specific",
"element",
"from",
"an",
"rlp",
"encoded",
"nested",
"list",
"."
]
| train | https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/lazy.py#L138-L171 |
ethereum/pyrlp | rlp/sedes/text.py | Text.fixed_length | def fixed_length(cls, l, allow_empty=False):
"""Create a sedes for text data with exactly `l` encoded characters."""
return cls(l, l, allow_empty=allow_empty) | python | def fixed_length(cls, l, allow_empty=False):
return cls(l, l, allow_empty=allow_empty) | [
"def",
"fixed_length",
"(",
"cls",
",",
"l",
",",
"allow_empty",
"=",
"False",
")",
":",
"return",
"cls",
"(",
"l",
",",
"l",
",",
"allow_empty",
"=",
"allow_empty",
")"
]
| Create a sedes for text data with exactly `l` encoded characters. | [
"Create",
"a",
"sedes",
"for",
"text",
"data",
"with",
"exactly",
"l",
"encoded",
"characters",
"."
]
| train | https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/sedes/text.py#L24-L26 |
ethereum/pyrlp | rlp/sedes/serializable.py | _eq | def _eq(left, right):
"""
Equality comparison that allows for equality between tuple and list types
with equivalent elements.
"""
if isinstance(left, (tuple, list)) and isinstance(right, (tuple, list)):
return len(left) == len(right) and all(_eq(*pair) for pair in zip(left, right))
else:
return left == right | python | def _eq(left, right):
if isinstance(left, (tuple, list)) and isinstance(right, (tuple, list)):
return len(left) == len(right) and all(_eq(*pair) for pair in zip(left, right))
else:
return left == right | [
"def",
"_eq",
"(",
"left",
",",
"right",
")",
":",
"if",
"isinstance",
"(",
"left",
",",
"(",
"tuple",
",",
"list",
")",
")",
"and",
"isinstance",
"(",
"right",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"len",
"(",
"left",
")",
"==",
"len",
"(",
"right",
")",
"and",
"all",
"(",
"_eq",
"(",
"*",
"pair",
")",
"for",
"pair",
"in",
"zip",
"(",
"left",
",",
"right",
")",
")",
"else",
":",
"return",
"left",
"==",
"right"
]
| Equality comparison that allows for equality between tuple and list types
with equivalent elements. | [
"Equality",
"comparison",
"that",
"allows",
"for",
"equality",
"between",
"tuple",
"and",
"list",
"types",
"with",
"equivalent",
"elements",
"."
]
| train | https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/sedes/serializable.py#L82-L90 |
ethereum/pyrlp | rlp/sedes/lists.py | is_sequence | def is_sequence(obj):
"""Check if `obj` is a sequence, but not a string or bytes."""
return isinstance(obj, Sequence) and not (
isinstance(obj, str) or BinaryClass.is_valid_type(obj)) | python | def is_sequence(obj):
return isinstance(obj, Sequence) and not (
isinstance(obj, str) or BinaryClass.is_valid_type(obj)) | [
"def",
"is_sequence",
"(",
"obj",
")",
":",
"return",
"isinstance",
"(",
"obj",
",",
"Sequence",
")",
"and",
"not",
"(",
"isinstance",
"(",
"obj",
",",
"str",
")",
"or",
"BinaryClass",
".",
"is_valid_type",
"(",
"obj",
")",
")"
]
| Check if `obj` is a sequence, but not a string or bytes. | [
"Check",
"if",
"obj",
"is",
"a",
"sequence",
"but",
"not",
"a",
"string",
"or",
"bytes",
"."
]
| train | https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/sedes/lists.py#L32-L35 |
ethereum/pyrlp | rlp/codec.py | encode | def encode(obj, sedes=None, infer_serializer=True, cache=True):
"""Encode a Python object in RLP format.
By default, the object is serialized in a suitable way first (using
:func:`rlp.infer_sedes`) and then encoded. Serialization can be explicitly
suppressed by setting `infer_serializer` to ``False`` and not passing an
alternative as `sedes`.
If `obj` has an attribute :attr:`_cached_rlp` (as, notably,
:class:`rlp.Serializable`) and its value is not `None`, this value is
returned bypassing serialization and encoding, unless `sedes` is given (as
the cache is assumed to refer to the standard serialization which can be
replaced by specifying `sedes`).
If `obj` is a :class:`rlp.Serializable` and `cache` is true, the result of
the encoding will be stored in :attr:`_cached_rlp` if it is empty.
:param sedes: an object implementing a function ``serialize(obj)`` which will be used to
serialize ``obj`` before encoding, or ``None`` to use the infered one (if any)
:param infer_serializer: if ``True`` an appropriate serializer will be selected using
:func:`rlp.infer_sedes` to serialize `obj` before encoding
:param cache: cache the return value in `obj._cached_rlp` if possible
(default `True`)
:returns: the RLP encoded item
:raises: :exc:`rlp.EncodingError` in the rather unlikely case that the item is too big to
encode (will not happen)
:raises: :exc:`rlp.SerializationError` if the serialization fails
"""
if isinstance(obj, Serializable):
cached_rlp = obj._cached_rlp
if sedes is None and cached_rlp:
return cached_rlp
else:
really_cache = (
cache and
sedes is None
)
else:
really_cache = False
if sedes:
item = sedes.serialize(obj)
elif infer_serializer:
item = infer_sedes(obj).serialize(obj)
else:
item = obj
result = encode_raw(item)
if really_cache:
obj._cached_rlp = result
return result | python | def encode(obj, sedes=None, infer_serializer=True, cache=True):
if isinstance(obj, Serializable):
cached_rlp = obj._cached_rlp
if sedes is None and cached_rlp:
return cached_rlp
else:
really_cache = (
cache and
sedes is None
)
else:
really_cache = False
if sedes:
item = sedes.serialize(obj)
elif infer_serializer:
item = infer_sedes(obj).serialize(obj)
else:
item = obj
result = encode_raw(item)
if really_cache:
obj._cached_rlp = result
return result | [
"def",
"encode",
"(",
"obj",
",",
"sedes",
"=",
"None",
",",
"infer_serializer",
"=",
"True",
",",
"cache",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"Serializable",
")",
":",
"cached_rlp",
"=",
"obj",
".",
"_cached_rlp",
"if",
"sedes",
"is",
"None",
"and",
"cached_rlp",
":",
"return",
"cached_rlp",
"else",
":",
"really_cache",
"=",
"(",
"cache",
"and",
"sedes",
"is",
"None",
")",
"else",
":",
"really_cache",
"=",
"False",
"if",
"sedes",
":",
"item",
"=",
"sedes",
".",
"serialize",
"(",
"obj",
")",
"elif",
"infer_serializer",
":",
"item",
"=",
"infer_sedes",
"(",
"obj",
")",
".",
"serialize",
"(",
"obj",
")",
"else",
":",
"item",
"=",
"obj",
"result",
"=",
"encode_raw",
"(",
"item",
")",
"if",
"really_cache",
":",
"obj",
".",
"_cached_rlp",
"=",
"result",
"return",
"result"
]
| Encode a Python object in RLP format.
By default, the object is serialized in a suitable way first (using
:func:`rlp.infer_sedes`) and then encoded. Serialization can be explicitly
suppressed by setting `infer_serializer` to ``False`` and not passing an
alternative as `sedes`.
If `obj` has an attribute :attr:`_cached_rlp` (as, notably,
:class:`rlp.Serializable`) and its value is not `None`, this value is
returned bypassing serialization and encoding, unless `sedes` is given (as
the cache is assumed to refer to the standard serialization which can be
replaced by specifying `sedes`).
If `obj` is a :class:`rlp.Serializable` and `cache` is true, the result of
the encoding will be stored in :attr:`_cached_rlp` if it is empty.
:param sedes: an object implementing a function ``serialize(obj)`` which will be used to
serialize ``obj`` before encoding, or ``None`` to use the infered one (if any)
:param infer_serializer: if ``True`` an appropriate serializer will be selected using
:func:`rlp.infer_sedes` to serialize `obj` before encoding
:param cache: cache the return value in `obj._cached_rlp` if possible
(default `True`)
:returns: the RLP encoded item
:raises: :exc:`rlp.EncodingError` in the rather unlikely case that the item is too big to
encode (will not happen)
:raises: :exc:`rlp.SerializationError` if the serialization fails | [
"Encode",
"a",
"Python",
"object",
"in",
"RLP",
"format",
"."
]
| train | https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/codec.py#L20-L70 |
ethereum/pyrlp | rlp/codec.py | encode_raw | def encode_raw(item):
"""RLP encode (a nested sequence of) :class:`Atomic`s."""
if isinstance(item, Atomic):
if len(item) == 1 and item[0] < 128:
return item
payload = item
prefix_offset = 128 # string
elif not isinstance(item, str) and isinstance(item, collections.Sequence):
payload = b''.join(encode_raw(x) for x in item)
prefix_offset = 192 # list
else:
msg = 'Cannot encode object of type {0}'.format(type(item).__name__)
raise EncodingError(msg, item)
try:
prefix = length_prefix(len(payload), prefix_offset)
except ValueError:
raise EncodingError('Item too big to encode', item)
return prefix + payload | python | def encode_raw(item):
if isinstance(item, Atomic):
if len(item) == 1 and item[0] < 128:
return item
payload = item
prefix_offset = 128
elif not isinstance(item, str) and isinstance(item, collections.Sequence):
payload = b''.join(encode_raw(x) for x in item)
prefix_offset = 192
else:
msg = 'Cannot encode object of type {0}'.format(type(item).__name__)
raise EncodingError(msg, item)
try:
prefix = length_prefix(len(payload), prefix_offset)
except ValueError:
raise EncodingError('Item too big to encode', item)
return prefix + payload | [
"def",
"encode_raw",
"(",
"item",
")",
":",
"if",
"isinstance",
"(",
"item",
",",
"Atomic",
")",
":",
"if",
"len",
"(",
"item",
")",
"==",
"1",
"and",
"item",
"[",
"0",
"]",
"<",
"128",
":",
"return",
"item",
"payload",
"=",
"item",
"prefix_offset",
"=",
"128",
"# string",
"elif",
"not",
"isinstance",
"(",
"item",
",",
"str",
")",
"and",
"isinstance",
"(",
"item",
",",
"collections",
".",
"Sequence",
")",
":",
"payload",
"=",
"b''",
".",
"join",
"(",
"encode_raw",
"(",
"x",
")",
"for",
"x",
"in",
"item",
")",
"prefix_offset",
"=",
"192",
"# list",
"else",
":",
"msg",
"=",
"'Cannot encode object of type {0}'",
".",
"format",
"(",
"type",
"(",
"item",
")",
".",
"__name__",
")",
"raise",
"EncodingError",
"(",
"msg",
",",
"item",
")",
"try",
":",
"prefix",
"=",
"length_prefix",
"(",
"len",
"(",
"payload",
")",
",",
"prefix_offset",
")",
"except",
"ValueError",
":",
"raise",
"EncodingError",
"(",
"'Item too big to encode'",
",",
"item",
")",
"return",
"prefix",
"+",
"payload"
]
| RLP encode (a nested sequence of) :class:`Atomic`s. | [
"RLP",
"encode",
"(",
"a",
"nested",
"sequence",
"of",
")",
":",
"class",
":",
"Atomic",
"s",
"."
]
| train | https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/codec.py#L73-L92 |
ethereum/pyrlp | rlp/codec.py | length_prefix | def length_prefix(length, offset):
"""Construct the prefix to lists or strings denoting their length.
:param length: the length of the item in bytes
:param offset: ``0x80`` when encoding raw bytes, ``0xc0`` when encoding a
list
"""
if length < 56:
return ALL_BYTES[offset + length]
elif length < LONG_LENGTH:
length_string = int_to_big_endian(length)
return ALL_BYTES[offset + 56 - 1 + len(length_string)] + length_string
else:
raise ValueError('Length greater than 256**8') | python | def length_prefix(length, offset):
if length < 56:
return ALL_BYTES[offset + length]
elif length < LONG_LENGTH:
length_string = int_to_big_endian(length)
return ALL_BYTES[offset + 56 - 1 + len(length_string)] + length_string
else:
raise ValueError('Length greater than 256**8') | [
"def",
"length_prefix",
"(",
"length",
",",
"offset",
")",
":",
"if",
"length",
"<",
"56",
":",
"return",
"ALL_BYTES",
"[",
"offset",
"+",
"length",
"]",
"elif",
"length",
"<",
"LONG_LENGTH",
":",
"length_string",
"=",
"int_to_big_endian",
"(",
"length",
")",
"return",
"ALL_BYTES",
"[",
"offset",
"+",
"56",
"-",
"1",
"+",
"len",
"(",
"length_string",
")",
"]",
"+",
"length_string",
"else",
":",
"raise",
"ValueError",
"(",
"'Length greater than 256**8'",
")"
]
| Construct the prefix to lists or strings denoting their length.
:param length: the length of the item in bytes
:param offset: ``0x80`` when encoding raw bytes, ``0xc0`` when encoding a
list | [
"Construct",
"the",
"prefix",
"to",
"lists",
"or",
"strings",
"denoting",
"their",
"length",
"."
]
| train | https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/codec.py#L98-L111 |
ethereum/pyrlp | rlp/codec.py | consume_length_prefix | def consume_length_prefix(rlp, start):
"""Read a length prefix from an RLP string.
:param rlp: the rlp byte string to read from
:param start: the position at which to start reading
:returns: a tuple ``(prefix, type, length, end)``, where ``type`` is either ``str``
or ``list`` depending on the type of the following payload,
``length`` is the length of the payload in bytes, and ``end`` is
the position of the first payload byte in the rlp string
"""
b0 = rlp[start]
if b0 < 128: # single byte
return (b'', bytes, 1, start)
elif b0 < SHORT_STRING: # short string
if b0 - 128 == 1 and rlp[start + 1] < 128:
raise DecodingError('Encoded as short string although single byte was possible', rlp)
return (rlp[start:start + 1], bytes, b0 - 128, start + 1)
elif b0 < 192: # long string
ll = b0 - 183 # - (128 + 56 - 1)
if rlp[start + 1:start + 2] == b'\x00':
raise DecodingError('Length starts with zero bytes', rlp)
len_prefix = rlp[start + 1:start + 1 + ll]
l = big_endian_to_int(len_prefix) # noqa: E741
if l < 56:
raise DecodingError('Long string prefix used for short string', rlp)
return (rlp[start:start + 1] + len_prefix, bytes, l, start + 1 + ll)
elif b0 < 192 + 56: # short list
return (rlp[start:start + 1], list, b0 - 192, start + 1)
else: # long list
ll = b0 - 192 - 56 + 1
if rlp[start + 1:start + 2] == b'\x00':
raise DecodingError('Length starts with zero bytes', rlp)
len_prefix = rlp[start + 1:start + 1 + ll]
l = big_endian_to_int(len_prefix) # noqa: E741
if l < 56:
raise DecodingError('Long list prefix used for short list', rlp)
return (rlp[start:start + 1] + len_prefix, list, l, start + 1 + ll) | python | def consume_length_prefix(rlp, start):
b0 = rlp[start]
if b0 < 128:
return (b'', bytes, 1, start)
elif b0 < SHORT_STRING:
if b0 - 128 == 1 and rlp[start + 1] < 128:
raise DecodingError('Encoded as short string although single byte was possible', rlp)
return (rlp[start:start + 1], bytes, b0 - 128, start + 1)
elif b0 < 192:
ll = b0 - 183
if rlp[start + 1:start + 2] == b'\x00':
raise DecodingError('Length starts with zero bytes', rlp)
len_prefix = rlp[start + 1:start + 1 + ll]
l = big_endian_to_int(len_prefix)
if l < 56:
raise DecodingError('Long string prefix used for short string', rlp)
return (rlp[start:start + 1] + len_prefix, bytes, l, start + 1 + ll)
elif b0 < 192 + 56:
return (rlp[start:start + 1], list, b0 - 192, start + 1)
else:
ll = b0 - 192 - 56 + 1
if rlp[start + 1:start + 2] == b'\x00':
raise DecodingError('Length starts with zero bytes', rlp)
len_prefix = rlp[start + 1:start + 1 + ll]
l = big_endian_to_int(len_prefix)
if l < 56:
raise DecodingError('Long list prefix used for short list', rlp)
return (rlp[start:start + 1] + len_prefix, list, l, start + 1 + ll) | [
"def",
"consume_length_prefix",
"(",
"rlp",
",",
"start",
")",
":",
"b0",
"=",
"rlp",
"[",
"start",
"]",
"if",
"b0",
"<",
"128",
":",
"# single byte",
"return",
"(",
"b''",
",",
"bytes",
",",
"1",
",",
"start",
")",
"elif",
"b0",
"<",
"SHORT_STRING",
":",
"# short string",
"if",
"b0",
"-",
"128",
"==",
"1",
"and",
"rlp",
"[",
"start",
"+",
"1",
"]",
"<",
"128",
":",
"raise",
"DecodingError",
"(",
"'Encoded as short string although single byte was possible'",
",",
"rlp",
")",
"return",
"(",
"rlp",
"[",
"start",
":",
"start",
"+",
"1",
"]",
",",
"bytes",
",",
"b0",
"-",
"128",
",",
"start",
"+",
"1",
")",
"elif",
"b0",
"<",
"192",
":",
"# long string",
"ll",
"=",
"b0",
"-",
"183",
"# - (128 + 56 - 1)",
"if",
"rlp",
"[",
"start",
"+",
"1",
":",
"start",
"+",
"2",
"]",
"==",
"b'\\x00'",
":",
"raise",
"DecodingError",
"(",
"'Length starts with zero bytes'",
",",
"rlp",
")",
"len_prefix",
"=",
"rlp",
"[",
"start",
"+",
"1",
":",
"start",
"+",
"1",
"+",
"ll",
"]",
"l",
"=",
"big_endian_to_int",
"(",
"len_prefix",
")",
"# noqa: E741",
"if",
"l",
"<",
"56",
":",
"raise",
"DecodingError",
"(",
"'Long string prefix used for short string'",
",",
"rlp",
")",
"return",
"(",
"rlp",
"[",
"start",
":",
"start",
"+",
"1",
"]",
"+",
"len_prefix",
",",
"bytes",
",",
"l",
",",
"start",
"+",
"1",
"+",
"ll",
")",
"elif",
"b0",
"<",
"192",
"+",
"56",
":",
"# short list",
"return",
"(",
"rlp",
"[",
"start",
":",
"start",
"+",
"1",
"]",
",",
"list",
",",
"b0",
"-",
"192",
",",
"start",
"+",
"1",
")",
"else",
":",
"# long list",
"ll",
"=",
"b0",
"-",
"192",
"-",
"56",
"+",
"1",
"if",
"rlp",
"[",
"start",
"+",
"1",
":",
"start",
"+",
"2",
"]",
"==",
"b'\\x00'",
":",
"raise",
"DecodingError",
"(",
"'Length starts with zero bytes'",
",",
"rlp",
")",
"len_prefix",
"=",
"rlp",
"[",
"start",
"+",
"1",
":",
"start",
"+",
"1",
"+",
"ll",
"]",
"l",
"=",
"big_endian_to_int",
"(",
"len_prefix",
")",
"# noqa: E741",
"if",
"l",
"<",
"56",
":",
"raise",
"DecodingError",
"(",
"'Long list prefix used for short list'",
",",
"rlp",
")",
"return",
"(",
"rlp",
"[",
"start",
":",
"start",
"+",
"1",
"]",
"+",
"len_prefix",
",",
"list",
",",
"l",
",",
"start",
"+",
"1",
"+",
"ll",
")"
]
| Read a length prefix from an RLP string.
:param rlp: the rlp byte string to read from
:param start: the position at which to start reading
:returns: a tuple ``(prefix, type, length, end)``, where ``type`` is either ``str``
or ``list`` depending on the type of the following payload,
``length`` is the length of the payload in bytes, and ``end`` is
the position of the first payload byte in the rlp string | [
"Read",
"a",
"length",
"prefix",
"from",
"an",
"RLP",
"string",
"."
]
| train | https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/codec.py#L117-L153 |
ethereum/pyrlp | rlp/codec.py | consume_payload | def consume_payload(rlp, prefix, start, type_, length):
"""Read the payload of an item from an RLP string.
:param rlp: the rlp string to read from
:param type_: the type of the payload (``bytes`` or ``list``)
:param start: the position at which to start reading
:param length: the length of the payload in bytes
:returns: a tuple ``(item, per_item_rlp, end)``, where ``item`` is
the read item, per_item_rlp is a list containing the RLP
encoding of each item and ``end`` is the position of the
first unprocessed byte
"""
if type_ is bytes:
item = rlp[start: start + length]
return (item, [prefix + item], start + length)
elif type_ is list:
items = []
per_item_rlp = []
list_rlp = prefix
next_item_start = start
end = next_item_start + length
while next_item_start < end:
p, t, l, s = consume_length_prefix(rlp, next_item_start)
item, item_rlp, next_item_start = consume_payload(rlp, p, s, t, l)
per_item_rlp.append(item_rlp)
# When the item returned above is a single element, item_rlp will also contain a
# single element, but when it's a list, the first element will be the RLP of the
# whole List, which is what we want here.
list_rlp += item_rlp[0]
items.append(item)
per_item_rlp.insert(0, list_rlp)
if next_item_start > end:
raise DecodingError('List length prefix announced a too small '
'length', rlp)
return (items, per_item_rlp, next_item_start)
else:
raise TypeError('Type must be either list or bytes') | python | def consume_payload(rlp, prefix, start, type_, length):
if type_ is bytes:
item = rlp[start: start + length]
return (item, [prefix + item], start + length)
elif type_ is list:
items = []
per_item_rlp = []
list_rlp = prefix
next_item_start = start
end = next_item_start + length
while next_item_start < end:
p, t, l, s = consume_length_prefix(rlp, next_item_start)
item, item_rlp, next_item_start = consume_payload(rlp, p, s, t, l)
per_item_rlp.append(item_rlp)
list_rlp += item_rlp[0]
items.append(item)
per_item_rlp.insert(0, list_rlp)
if next_item_start > end:
raise DecodingError('List length prefix announced a too small '
'length', rlp)
return (items, per_item_rlp, next_item_start)
else:
raise TypeError('Type must be either list or bytes') | [
"def",
"consume_payload",
"(",
"rlp",
",",
"prefix",
",",
"start",
",",
"type_",
",",
"length",
")",
":",
"if",
"type_",
"is",
"bytes",
":",
"item",
"=",
"rlp",
"[",
"start",
":",
"start",
"+",
"length",
"]",
"return",
"(",
"item",
",",
"[",
"prefix",
"+",
"item",
"]",
",",
"start",
"+",
"length",
")",
"elif",
"type_",
"is",
"list",
":",
"items",
"=",
"[",
"]",
"per_item_rlp",
"=",
"[",
"]",
"list_rlp",
"=",
"prefix",
"next_item_start",
"=",
"start",
"end",
"=",
"next_item_start",
"+",
"length",
"while",
"next_item_start",
"<",
"end",
":",
"p",
",",
"t",
",",
"l",
",",
"s",
"=",
"consume_length_prefix",
"(",
"rlp",
",",
"next_item_start",
")",
"item",
",",
"item_rlp",
",",
"next_item_start",
"=",
"consume_payload",
"(",
"rlp",
",",
"p",
",",
"s",
",",
"t",
",",
"l",
")",
"per_item_rlp",
".",
"append",
"(",
"item_rlp",
")",
"# When the item returned above is a single element, item_rlp will also contain a",
"# single element, but when it's a list, the first element will be the RLP of the",
"# whole List, which is what we want here.",
"list_rlp",
"+=",
"item_rlp",
"[",
"0",
"]",
"items",
".",
"append",
"(",
"item",
")",
"per_item_rlp",
".",
"insert",
"(",
"0",
",",
"list_rlp",
")",
"if",
"next_item_start",
">",
"end",
":",
"raise",
"DecodingError",
"(",
"'List length prefix announced a too small '",
"'length'",
",",
"rlp",
")",
"return",
"(",
"items",
",",
"per_item_rlp",
",",
"next_item_start",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Type must be either list or bytes'",
")"
]
| Read the payload of an item from an RLP string.
:param rlp: the rlp string to read from
:param type_: the type of the payload (``bytes`` or ``list``)
:param start: the position at which to start reading
:param length: the length of the payload in bytes
:returns: a tuple ``(item, per_item_rlp, end)``, where ``item`` is
the read item, per_item_rlp is a list containing the RLP
encoding of each item and ``end`` is the position of the
first unprocessed byte | [
"Read",
"the",
"payload",
"of",
"an",
"item",
"from",
"an",
"RLP",
"string",
"."
]
| train | https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/codec.py#L156-L192 |
ethereum/pyrlp | rlp/codec.py | consume_item | def consume_item(rlp, start):
"""Read an item from an RLP string.
:param rlp: the rlp string to read from
:param start: the position at which to start reading
:returns: a tuple ``(item, per_item_rlp, end)``, where ``item`` is
the read item, per_item_rlp is a list containing the RLP
encoding of each item and ``end`` is the position of the
first unprocessed byte
"""
p, t, l, s = consume_length_prefix(rlp, start)
return consume_payload(rlp, p, s, t, l) | python | def consume_item(rlp, start):
p, t, l, s = consume_length_prefix(rlp, start)
return consume_payload(rlp, p, s, t, l) | [
"def",
"consume_item",
"(",
"rlp",
",",
"start",
")",
":",
"p",
",",
"t",
",",
"l",
",",
"s",
"=",
"consume_length_prefix",
"(",
"rlp",
",",
"start",
")",
"return",
"consume_payload",
"(",
"rlp",
",",
"p",
",",
"s",
",",
"t",
",",
"l",
")"
]
| Read an item from an RLP string.
:param rlp: the rlp string to read from
:param start: the position at which to start reading
:returns: a tuple ``(item, per_item_rlp, end)``, where ``item`` is
the read item, per_item_rlp is a list containing the RLP
encoding of each item and ``end`` is the position of the
first unprocessed byte | [
"Read",
"an",
"item",
"from",
"an",
"RLP",
"string",
"."
]
| train | https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/codec.py#L195-L206 |
ethereum/pyrlp | rlp/codec.py | decode | def decode(rlp, sedes=None, strict=True, recursive_cache=False, **kwargs):
"""Decode an RLP encoded object.
If the deserialized result `obj` has an attribute :attr:`_cached_rlp` (e.g. if `sedes` is a
subclass of :class:`rlp.Serializable`) it will be set to `rlp`, which will improve performance
on subsequent :func:`rlp.encode` calls. Bear in mind however that `obj` needs to make sure that
this value is updated whenever one of its fields changes or prevent such changes entirely
(:class:`rlp.sedes.Serializable` does the latter).
:param sedes: an object implementing a function ``deserialize(code)`` which will be applied
after decoding, or ``None`` if no deserialization should be performed
:param \*\*kwargs: additional keyword arguments that will be passed to the deserializer
:param strict: if false inputs that are longer than necessary don't cause an exception
:returns: the decoded and maybe deserialized Python object
:raises: :exc:`rlp.DecodingError` if the input string does not end after the root item and
`strict` is true
:raises: :exc:`rlp.DeserializationError` if the deserialization fails
"""
if not is_bytes(rlp):
raise DecodingError('Can only decode RLP bytes, got type %s' % type(rlp).__name__, rlp)
try:
item, per_item_rlp, end = consume_item(rlp, 0)
except IndexError:
raise DecodingError('RLP string too short', rlp)
if end != len(rlp) and strict:
msg = 'RLP string ends with {} superfluous bytes'.format(len(rlp) - end)
raise DecodingError(msg, rlp)
if sedes:
obj = sedes.deserialize(item, **kwargs)
if is_sequence(obj) or hasattr(obj, '_cached_rlp'):
_apply_rlp_cache(obj, per_item_rlp, recursive_cache)
return obj
else:
return item | python | def decode(rlp, sedes=None, strict=True, recursive_cache=False, **kwargs):
if not is_bytes(rlp):
raise DecodingError('Can only decode RLP bytes, got type %s' % type(rlp).__name__, rlp)
try:
item, per_item_rlp, end = consume_item(rlp, 0)
except IndexError:
raise DecodingError('RLP string too short', rlp)
if end != len(rlp) and strict:
msg = 'RLP string ends with {} superfluous bytes'.format(len(rlp) - end)
raise DecodingError(msg, rlp)
if sedes:
obj = sedes.deserialize(item, **kwargs)
if is_sequence(obj) or hasattr(obj, '_cached_rlp'):
_apply_rlp_cache(obj, per_item_rlp, recursive_cache)
return obj
else:
return item | [
"def",
"decode",
"(",
"rlp",
",",
"sedes",
"=",
"None",
",",
"strict",
"=",
"True",
",",
"recursive_cache",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"is_bytes",
"(",
"rlp",
")",
":",
"raise",
"DecodingError",
"(",
"'Can only decode RLP bytes, got type %s'",
"%",
"type",
"(",
"rlp",
")",
".",
"__name__",
",",
"rlp",
")",
"try",
":",
"item",
",",
"per_item_rlp",
",",
"end",
"=",
"consume_item",
"(",
"rlp",
",",
"0",
")",
"except",
"IndexError",
":",
"raise",
"DecodingError",
"(",
"'RLP string too short'",
",",
"rlp",
")",
"if",
"end",
"!=",
"len",
"(",
"rlp",
")",
"and",
"strict",
":",
"msg",
"=",
"'RLP string ends with {} superfluous bytes'",
".",
"format",
"(",
"len",
"(",
"rlp",
")",
"-",
"end",
")",
"raise",
"DecodingError",
"(",
"msg",
",",
"rlp",
")",
"if",
"sedes",
":",
"obj",
"=",
"sedes",
".",
"deserialize",
"(",
"item",
",",
"*",
"*",
"kwargs",
")",
"if",
"is_sequence",
"(",
"obj",
")",
"or",
"hasattr",
"(",
"obj",
",",
"'_cached_rlp'",
")",
":",
"_apply_rlp_cache",
"(",
"obj",
",",
"per_item_rlp",
",",
"recursive_cache",
")",
"return",
"obj",
"else",
":",
"return",
"item"
]
| Decode an RLP encoded object.
If the deserialized result `obj` has an attribute :attr:`_cached_rlp` (e.g. if `sedes` is a
subclass of :class:`rlp.Serializable`) it will be set to `rlp`, which will improve performance
on subsequent :func:`rlp.encode` calls. Bear in mind however that `obj` needs to make sure that
this value is updated whenever one of its fields changes or prevent such changes entirely
(:class:`rlp.sedes.Serializable` does the latter).
:param sedes: an object implementing a function ``deserialize(code)`` which will be applied
after decoding, or ``None`` if no deserialization should be performed
:param \*\*kwargs: additional keyword arguments that will be passed to the deserializer
:param strict: if false inputs that are longer than necessary don't cause an exception
:returns: the decoded and maybe deserialized Python object
:raises: :exc:`rlp.DecodingError` if the input string does not end after the root item and
`strict` is true
:raises: :exc:`rlp.DeserializationError` if the deserialization fails | [
"Decode",
"an",
"RLP",
"encoded",
"object",
"."
]
| train | https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/codec.py#L209-L242 |
ethereum/pyrlp | rlp/codec.py | infer_sedes | def infer_sedes(obj):
"""Try to find a sedes objects suitable for a given Python object.
The sedes objects considered are `obj`'s class, `big_endian_int` and
`binary`. If `obj` is a sequence, a :class:`rlp.sedes.List` will be
constructed recursively.
:param obj: the python object for which to find a sedes object
:raises: :exc:`TypeError` if no appropriate sedes could be found
"""
if is_sedes(obj.__class__):
return obj.__class__
elif not isinstance(obj, bool) and isinstance(obj, int) and obj >= 0:
return big_endian_int
elif BinaryClass.is_valid_type(obj):
return binary
elif not isinstance(obj, str) and isinstance(obj, collections.Sequence):
return List(map(infer_sedes, obj))
elif isinstance(obj, bool):
return boolean
elif isinstance(obj, str):
return text
msg = 'Did not find sedes handling type {}'.format(type(obj).__name__)
raise TypeError(msg) | python | def infer_sedes(obj):
if is_sedes(obj.__class__):
return obj.__class__
elif not isinstance(obj, bool) and isinstance(obj, int) and obj >= 0:
return big_endian_int
elif BinaryClass.is_valid_type(obj):
return binary
elif not isinstance(obj, str) and isinstance(obj, collections.Sequence):
return List(map(infer_sedes, obj))
elif isinstance(obj, bool):
return boolean
elif isinstance(obj, str):
return text
msg = 'Did not find sedes handling type {}'.format(type(obj).__name__)
raise TypeError(msg) | [
"def",
"infer_sedes",
"(",
"obj",
")",
":",
"if",
"is_sedes",
"(",
"obj",
".",
"__class__",
")",
":",
"return",
"obj",
".",
"__class__",
"elif",
"not",
"isinstance",
"(",
"obj",
",",
"bool",
")",
"and",
"isinstance",
"(",
"obj",
",",
"int",
")",
"and",
"obj",
">=",
"0",
":",
"return",
"big_endian_int",
"elif",
"BinaryClass",
".",
"is_valid_type",
"(",
"obj",
")",
":",
"return",
"binary",
"elif",
"not",
"isinstance",
"(",
"obj",
",",
"str",
")",
"and",
"isinstance",
"(",
"obj",
",",
"collections",
".",
"Sequence",
")",
":",
"return",
"List",
"(",
"map",
"(",
"infer_sedes",
",",
"obj",
")",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"bool",
")",
":",
"return",
"boolean",
"elif",
"isinstance",
"(",
"obj",
",",
"str",
")",
":",
"return",
"text",
"msg",
"=",
"'Did not find sedes handling type {}'",
".",
"format",
"(",
"type",
"(",
"obj",
")",
".",
"__name__",
")",
"raise",
"TypeError",
"(",
"msg",
")"
]
| Try to find a sedes objects suitable for a given Python object.
The sedes objects considered are `obj`'s class, `big_endian_int` and
`binary`. If `obj` is a sequence, a :class:`rlp.sedes.List` will be
constructed recursively.
:param obj: the python object for which to find a sedes object
:raises: :exc:`TypeError` if no appropriate sedes could be found | [
"Try",
"to",
"find",
"a",
"sedes",
"objects",
"suitable",
"for",
"a",
"given",
"Python",
"object",
"."
]
| train | https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/codec.py#L261-L284 |
graphite-project/carbonate | carbonate/config.py | Config.destinations | def destinations(self, cluster='main'):
"""Return a list of destinations for a cluster."""
if not self.config.has_section(cluster):
raise SystemExit("Cluster '%s' not defined in %s"
% (cluster, self.config_file))
destinations = self.config.get(cluster, 'destinations')
return destinations.replace(' ', '').split(',') | python | def destinations(self, cluster='main'):
if not self.config.has_section(cluster):
raise SystemExit("Cluster '%s' not defined in %s"
% (cluster, self.config_file))
destinations = self.config.get(cluster, 'destinations')
return destinations.replace(' ', '').split(',') | [
"def",
"destinations",
"(",
"self",
",",
"cluster",
"=",
"'main'",
")",
":",
"if",
"not",
"self",
".",
"config",
".",
"has_section",
"(",
"cluster",
")",
":",
"raise",
"SystemExit",
"(",
"\"Cluster '%s' not defined in %s\"",
"%",
"(",
"cluster",
",",
"self",
".",
"config_file",
")",
")",
"destinations",
"=",
"self",
".",
"config",
".",
"get",
"(",
"cluster",
",",
"'destinations'",
")",
"return",
"destinations",
".",
"replace",
"(",
"' '",
",",
"''",
")",
".",
"split",
"(",
"','",
")"
]
| Return a list of destinations for a cluster. | [
"Return",
"a",
"list",
"of",
"destinations",
"for",
"a",
"cluster",
"."
]
| train | https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/config.py#L21-L27 |
graphite-project/carbonate | carbonate/config.py | Config.replication_factor | def replication_factor(self, cluster='main'):
"""Return the replication factor for a cluster as an integer."""
if not self.config.has_section(cluster):
raise SystemExit("Cluster '%s' not defined in %s"
% (cluster, self.config_file))
return int(self.config.get(cluster, 'replication_factor')) | python | def replication_factor(self, cluster='main'):
if not self.config.has_section(cluster):
raise SystemExit("Cluster '%s' not defined in %s"
% (cluster, self.config_file))
return int(self.config.get(cluster, 'replication_factor')) | [
"def",
"replication_factor",
"(",
"self",
",",
"cluster",
"=",
"'main'",
")",
":",
"if",
"not",
"self",
".",
"config",
".",
"has_section",
"(",
"cluster",
")",
":",
"raise",
"SystemExit",
"(",
"\"Cluster '%s' not defined in %s\"",
"%",
"(",
"cluster",
",",
"self",
".",
"config_file",
")",
")",
"return",
"int",
"(",
"self",
".",
"config",
".",
"get",
"(",
"cluster",
",",
"'replication_factor'",
")",
")"
]
| Return the replication factor for a cluster as an integer. | [
"Return",
"the",
"replication",
"factor",
"for",
"a",
"cluster",
"as",
"an",
"integer",
"."
]
| train | https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/config.py#L29-L34 |
graphite-project/carbonate | carbonate/config.py | Config.ssh_user | def ssh_user(self, cluster='main'):
"""Return the ssh user for a cluster or current user if undefined."""
if not self.config.has_section(cluster):
raise SystemExit("Cluster '%s' not defined in %s"
% (cluster, self.config_file))
try:
return self.config.get(cluster, 'ssh_user')
except NoOptionError:
return pwd.getpwuid(os.getuid()).pw_name | python | def ssh_user(self, cluster='main'):
if not self.config.has_section(cluster):
raise SystemExit("Cluster '%s' not defined in %s"
% (cluster, self.config_file))
try:
return self.config.get(cluster, 'ssh_user')
except NoOptionError:
return pwd.getpwuid(os.getuid()).pw_name | [
"def",
"ssh_user",
"(",
"self",
",",
"cluster",
"=",
"'main'",
")",
":",
"if",
"not",
"self",
".",
"config",
".",
"has_section",
"(",
"cluster",
")",
":",
"raise",
"SystemExit",
"(",
"\"Cluster '%s' not defined in %s\"",
"%",
"(",
"cluster",
",",
"self",
".",
"config_file",
")",
")",
"try",
":",
"return",
"self",
".",
"config",
".",
"get",
"(",
"cluster",
",",
"'ssh_user'",
")",
"except",
"NoOptionError",
":",
"return",
"pwd",
".",
"getpwuid",
"(",
"os",
".",
"getuid",
"(",
")",
")",
".",
"pw_name"
]
| Return the ssh user for a cluster or current user if undefined. | [
"Return",
"the",
"ssh",
"user",
"for",
"a",
"cluster",
"or",
"current",
"user",
"if",
"undefined",
"."
]
| train | https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/config.py#L36-L44 |
graphite-project/carbonate | carbonate/config.py | Config.whisper_lock_writes | def whisper_lock_writes(self, cluster='main'):
"""Lock whisper files during carbon-sync."""
if not self.config.has_section(cluster):
raise SystemExit("Cluster '%s' not defined in %s"
% (cluster, self.config_file))
try:
return bool(self.config.get(cluster, 'whisper_lock_writes'))
except NoOptionError:
return False | python | def whisper_lock_writes(self, cluster='main'):
if not self.config.has_section(cluster):
raise SystemExit("Cluster '%s' not defined in %s"
% (cluster, self.config_file))
try:
return bool(self.config.get(cluster, 'whisper_lock_writes'))
except NoOptionError:
return False | [
"def",
"whisper_lock_writes",
"(",
"self",
",",
"cluster",
"=",
"'main'",
")",
":",
"if",
"not",
"self",
".",
"config",
".",
"has_section",
"(",
"cluster",
")",
":",
"raise",
"SystemExit",
"(",
"\"Cluster '%s' not defined in %s\"",
"%",
"(",
"cluster",
",",
"self",
".",
"config_file",
")",
")",
"try",
":",
"return",
"bool",
"(",
"self",
".",
"config",
".",
"get",
"(",
"cluster",
",",
"'whisper_lock_writes'",
")",
")",
"except",
"NoOptionError",
":",
"return",
"False"
]
| Lock whisper files during carbon-sync. | [
"Lock",
"whisper",
"files",
"during",
"carbon",
"-",
"sync",
"."
]
| train | https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/config.py#L46-L54 |
graphite-project/carbonate | carbonate/config.py | Config.hashing_type | def hashing_type(self, cluster='main'):
"""Hashing type of cluster."""
if not self.config.has_section(cluster):
raise SystemExit("Cluster '%s' not defined in %s"
% (cluster, self.config_file))
hashing_type = 'carbon_ch'
try:
return self.config.get(cluster, 'hashing_type')
except NoOptionError:
return hashing_type | python | def hashing_type(self, cluster='main'):
if not self.config.has_section(cluster):
raise SystemExit("Cluster '%s' not defined in %s"
% (cluster, self.config_file))
hashing_type = 'carbon_ch'
try:
return self.config.get(cluster, 'hashing_type')
except NoOptionError:
return hashing_type | [
"def",
"hashing_type",
"(",
"self",
",",
"cluster",
"=",
"'main'",
")",
":",
"if",
"not",
"self",
".",
"config",
".",
"has_section",
"(",
"cluster",
")",
":",
"raise",
"SystemExit",
"(",
"\"Cluster '%s' not defined in %s\"",
"%",
"(",
"cluster",
",",
"self",
".",
"config_file",
")",
")",
"hashing_type",
"=",
"'carbon_ch'",
"try",
":",
"return",
"self",
".",
"config",
".",
"get",
"(",
"cluster",
",",
"'hashing_type'",
")",
"except",
"NoOptionError",
":",
"return",
"hashing_type"
]
| Hashing type of cluster. | [
"Hashing",
"type",
"of",
"cluster",
"."
]
| train | https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/config.py#L56-L65 |
graphite-project/carbonate | carbonate/fill.py | fill_archives | def fill_archives(src, dst, startFrom, endAt=0, overwrite=False,
lock_writes=False):
"""
Fills gaps in dst using data from src.
src is the path as a string
dst is the path as a string
startFrom is the latest timestamp (archives are read backward)
endAt is the earliest timestamp (archives are read backward).
if absent, we take the earliest timestamp in the archive
overwrite will write all non null points from src dst.
lock using whisper lock if true
"""
if lock_writes is False:
whisper.LOCK = False
elif whisper.CAN_LOCK and lock_writes is True:
whisper.LOCK = True
header = whisper.info(dst)
archives = header['archives']
archives = sorted(archives, key=lambda t: t['retention'])
for archive in archives:
fromTime = max(endAt, time.time() - archive['retention'])
if fromTime >= startFrom:
continue
(timeInfo, values) = whisper.fetch(dst, fromTime, untilTime=startFrom)
(start, end, step) = timeInfo
gapstart = None
for value in values:
has_value = bool(value and not overwrite)
if not has_value and not gapstart:
gapstart = start
elif has_value and gapstart:
if (start - gapstart) >= archive['secondsPerPoint']:
fill(src, dst, gapstart - step, start)
gapstart = None
start += step
# fill if this gap continues to the end
if gapstart:
fill(src, dst, gapstart - step, end - step)
# The next archive only needs to be filled up to the latest point
# in time we updated.
startFrom = fromTime | python | def fill_archives(src, dst, startFrom, endAt=0, overwrite=False,
lock_writes=False):
if lock_writes is False:
whisper.LOCK = False
elif whisper.CAN_LOCK and lock_writes is True:
whisper.LOCK = True
header = whisper.info(dst)
archives = header['archives']
archives = sorted(archives, key=lambda t: t['retention'])
for archive in archives:
fromTime = max(endAt, time.time() - archive['retention'])
if fromTime >= startFrom:
continue
(timeInfo, values) = whisper.fetch(dst, fromTime, untilTime=startFrom)
(start, end, step) = timeInfo
gapstart = None
for value in values:
has_value = bool(value and not overwrite)
if not has_value and not gapstart:
gapstart = start
elif has_value and gapstart:
if (start - gapstart) >= archive['secondsPerPoint']:
fill(src, dst, gapstart - step, start)
gapstart = None
start += step
if gapstart:
fill(src, dst, gapstart - step, end - step)
startFrom = fromTime | [
"def",
"fill_archives",
"(",
"src",
",",
"dst",
",",
"startFrom",
",",
"endAt",
"=",
"0",
",",
"overwrite",
"=",
"False",
",",
"lock_writes",
"=",
"False",
")",
":",
"if",
"lock_writes",
"is",
"False",
":",
"whisper",
".",
"LOCK",
"=",
"False",
"elif",
"whisper",
".",
"CAN_LOCK",
"and",
"lock_writes",
"is",
"True",
":",
"whisper",
".",
"LOCK",
"=",
"True",
"header",
"=",
"whisper",
".",
"info",
"(",
"dst",
")",
"archives",
"=",
"header",
"[",
"'archives'",
"]",
"archives",
"=",
"sorted",
"(",
"archives",
",",
"key",
"=",
"lambda",
"t",
":",
"t",
"[",
"'retention'",
"]",
")",
"for",
"archive",
"in",
"archives",
":",
"fromTime",
"=",
"max",
"(",
"endAt",
",",
"time",
".",
"time",
"(",
")",
"-",
"archive",
"[",
"'retention'",
"]",
")",
"if",
"fromTime",
">=",
"startFrom",
":",
"continue",
"(",
"timeInfo",
",",
"values",
")",
"=",
"whisper",
".",
"fetch",
"(",
"dst",
",",
"fromTime",
",",
"untilTime",
"=",
"startFrom",
")",
"(",
"start",
",",
"end",
",",
"step",
")",
"=",
"timeInfo",
"gapstart",
"=",
"None",
"for",
"value",
"in",
"values",
":",
"has_value",
"=",
"bool",
"(",
"value",
"and",
"not",
"overwrite",
")",
"if",
"not",
"has_value",
"and",
"not",
"gapstart",
":",
"gapstart",
"=",
"start",
"elif",
"has_value",
"and",
"gapstart",
":",
"if",
"(",
"start",
"-",
"gapstart",
")",
">=",
"archive",
"[",
"'secondsPerPoint'",
"]",
":",
"fill",
"(",
"src",
",",
"dst",
",",
"gapstart",
"-",
"step",
",",
"start",
")",
"gapstart",
"=",
"None",
"start",
"+=",
"step",
"# fill if this gap continues to the end",
"if",
"gapstart",
":",
"fill",
"(",
"src",
",",
"dst",
",",
"gapstart",
"-",
"step",
",",
"end",
"-",
"step",
")",
"# The next archive only needs to be filled up to the latest point",
"# in time we updated.",
"startFrom",
"=",
"fromTime"
]
| Fills gaps in dst using data from src.
src is the path as a string
dst is the path as a string
startFrom is the latest timestamp (archives are read backward)
endAt is the earliest timestamp (archives are read backward).
if absent, we take the earliest timestamp in the archive
overwrite will write all non null points from src dst.
lock using whisper lock if true | [
"Fills",
"gaps",
"in",
"dst",
"using",
"data",
"from",
"src",
"."
]
| train | https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/fill.py#L88-L133 |
graphite-project/carbonate | carbonate/stale.py | data | def data(path, hours, offset=0):
"""
Does the metric at ``path`` have any whisper data newer than ``hours``?
If ``offset`` is not None, view the ``hours`` prior to ``offset`` hours
ago, instead of from right now.
"""
now = time.time()
end = now - _to_sec(offset) # Will default to now
start = end - _to_sec(hours)
_data = whisper.fetch(path, start, end)
return all(x is None for x in _data[-1]) | python | def data(path, hours, offset=0):
now = time.time()
end = now - _to_sec(offset)
start = end - _to_sec(hours)
_data = whisper.fetch(path, start, end)
return all(x is None for x in _data[-1]) | [
"def",
"data",
"(",
"path",
",",
"hours",
",",
"offset",
"=",
"0",
")",
":",
"now",
"=",
"time",
".",
"time",
"(",
")",
"end",
"=",
"now",
"-",
"_to_sec",
"(",
"offset",
")",
"# Will default to now",
"start",
"=",
"end",
"-",
"_to_sec",
"(",
"hours",
")",
"_data",
"=",
"whisper",
".",
"fetch",
"(",
"path",
",",
"start",
",",
"end",
")",
"return",
"all",
"(",
"x",
"is",
"None",
"for",
"x",
"in",
"_data",
"[",
"-",
"1",
"]",
")"
]
| Does the metric at ``path`` have any whisper data newer than ``hours``?
If ``offset`` is not None, view the ``hours`` prior to ``offset`` hours
ago, instead of from right now. | [
"Does",
"the",
"metric",
"at",
"path",
"have",
"any",
"whisper",
"data",
"newer",
"than",
"hours",
"?"
]
| train | https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/stale.py#L11-L22 |
graphite-project/carbonate | carbonate/stale.py | stat | def stat(path, hours, offset=None):
"""
Has the metric file at ``path`` been modified since ``hours`` ago?
.. note::
``offset`` is only for compatibility with ``data()`` and is ignored.
"""
return os.stat(path).st_mtime < (time.time() - _to_sec(hours)) | python | def stat(path, hours, offset=None):
return os.stat(path).st_mtime < (time.time() - _to_sec(hours)) | [
"def",
"stat",
"(",
"path",
",",
"hours",
",",
"offset",
"=",
"None",
")",
":",
"return",
"os",
".",
"stat",
"(",
"path",
")",
".",
"st_mtime",
"<",
"(",
"time",
".",
"time",
"(",
")",
"-",
"_to_sec",
"(",
"hours",
")",
")"
]
| Has the metric file at ``path`` been modified since ``hours`` ago?
.. note::
``offset`` is only for compatibility with ``data()`` and is ignored. | [
"Has",
"the",
"metric",
"file",
"at",
"path",
"been",
"modified",
"since",
"hours",
"ago?"
]
| train | https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/stale.py#L25-L32 |
holgern/pyedflib | util/refguide_check.py | short_path | def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath | python | def short_path(path, cwd=None):
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath | [
"def",
"short_path",
"(",
"path",
",",
"cwd",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"path",
",",
"str",
")",
":",
"return",
"path",
"if",
"cwd",
"is",
"None",
":",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"abspath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"relpath",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"path",
",",
"cwd",
")",
"if",
"len",
"(",
"abspath",
")",
"<=",
"len",
"(",
"relpath",
")",
":",
"return",
"abspath",
"return",
"relpath"
]
| Return relative or absolute path name, whichever is shortest. | [
"Return",
"relative",
"or",
"absolute",
"path",
"name",
"whichever",
"is",
"shortest",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/util/refguide_check.py#L74-L86 |
holgern/pyedflib | util/refguide_check.py | get_all_dict | def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others | python | def get_all_dict(module):
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others | [
"def",
"get_all_dict",
"(",
"module",
")",
":",
"if",
"hasattr",
"(",
"module",
",",
"\"__all__\"",
")",
":",
"all_dict",
"=",
"copy",
".",
"deepcopy",
"(",
"module",
".",
"__all__",
")",
"else",
":",
"all_dict",
"=",
"copy",
".",
"deepcopy",
"(",
"dir",
"(",
"module",
")",
")",
"all_dict",
"=",
"[",
"name",
"for",
"name",
"in",
"all_dict",
"if",
"not",
"name",
".",
"startswith",
"(",
"\"_\"",
")",
"]",
"for",
"name",
"in",
"[",
"'absolute_import'",
",",
"'division'",
",",
"'print_function'",
"]",
":",
"try",
":",
"all_dict",
".",
"remove",
"(",
"name",
")",
"except",
"ValueError",
":",
"pass",
"# Modules are almost always private; real submodules need a separate",
"# run of refguide_check.",
"all_dict",
"=",
"[",
"name",
"for",
"name",
"in",
"all_dict",
"if",
"not",
"inspect",
".",
"ismodule",
"(",
"getattr",
"(",
"module",
",",
"name",
",",
"None",
")",
")",
"]",
"deprecated",
"=",
"[",
"]",
"not_deprecated",
"=",
"[",
"]",
"for",
"name",
"in",
"all_dict",
":",
"f",
"=",
"getattr",
"(",
"module",
",",
"name",
",",
"None",
")",
"if",
"callable",
"(",
"f",
")",
"and",
"is_deprecated",
"(",
"f",
")",
":",
"deprecated",
".",
"append",
"(",
"name",
")",
"else",
":",
"not_deprecated",
".",
"append",
"(",
"name",
")",
"others",
"=",
"set",
"(",
"dir",
"(",
"module",
")",
")",
".",
"difference",
"(",
"set",
"(",
"deprecated",
")",
")",
".",
"difference",
"(",
"set",
"(",
"not_deprecated",
")",
")",
"return",
"not_deprecated",
",",
"deprecated",
",",
"others"
]
| Return a copy of the __all__ dict with irrelevant items removed. | [
"Return",
"a",
"copy",
"of",
"the",
"__all__",
"dict",
"with",
"irrelevant",
"items",
"removed",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/util/refguide_check.py#L127-L157 |
holgern/pyedflib | util/refguide_check.py | compare | def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing | python | def compare(all_dict, others, names, module_name):
only_all = set()
for name in all_dict:
if name not in names:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing | [
"def",
"compare",
"(",
"all_dict",
",",
"others",
",",
"names",
",",
"module_name",
")",
":",
"only_all",
"=",
"set",
"(",
")",
"for",
"name",
"in",
"all_dict",
":",
"if",
"name",
"not",
"in",
"names",
":",
"only_all",
".",
"add",
"(",
"name",
")",
"only_ref",
"=",
"set",
"(",
")",
"missing",
"=",
"set",
"(",
")",
"for",
"name",
"in",
"names",
":",
"if",
"name",
"not",
"in",
"all_dict",
":",
"for",
"pat",
"in",
"REFGUIDE_ALL_SKIPLIST",
":",
"if",
"re",
".",
"match",
"(",
"pat",
",",
"module_name",
"+",
"'.'",
"+",
"name",
")",
":",
"if",
"name",
"not",
"in",
"others",
":",
"missing",
".",
"add",
"(",
"name",
")",
"break",
"else",
":",
"only_ref",
".",
"add",
"(",
"name",
")",
"return",
"only_all",
",",
"only_ref",
",",
"missing"
]
| Return sets of objects only in __all__, refguide, or completely missing. | [
"Return",
"sets",
"of",
"objects",
"only",
"in",
"__all__",
"refguide",
"or",
"completely",
"missing",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/util/refguide_check.py#L160-L179 |
holgern/pyedflib | util/refguide_check.py | check_rest | def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) +
validate_rst_syntax(text, file_full_name, dots=dots))
return results | python | def check_rest(module, names, dots=True):
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) +
validate_rst_syntax(text, file_full_name, dots=dots))
return results | [
"def",
"check_rest",
"(",
"module",
",",
"names",
",",
"dots",
"=",
"True",
")",
":",
"try",
":",
"skip_types",
"=",
"(",
"dict",
",",
"str",
",",
"unicode",
",",
"float",
",",
"int",
")",
"except",
"NameError",
":",
"# python 3",
"skip_types",
"=",
"(",
"dict",
",",
"str",
",",
"float",
",",
"int",
")",
"results",
"=",
"[",
"]",
"if",
"module",
".",
"__name__",
"[",
"6",
":",
"]",
"not",
"in",
"OTHER_MODULE_DOCS",
":",
"results",
"+=",
"[",
"(",
"module",
".",
"__name__",
",",
")",
"+",
"validate_rst_syntax",
"(",
"inspect",
".",
"getdoc",
"(",
"module",
")",
",",
"module",
".",
"__name__",
",",
"dots",
"=",
"dots",
")",
"]",
"for",
"name",
"in",
"names",
":",
"full_name",
"=",
"module",
".",
"__name__",
"+",
"'.'",
"+",
"name",
"obj",
"=",
"getattr",
"(",
"module",
",",
"name",
",",
"None",
")",
"if",
"obj",
"is",
"None",
":",
"results",
".",
"append",
"(",
"(",
"full_name",
",",
"False",
",",
"\"%s has no docstring\"",
"%",
"(",
"full_name",
",",
")",
")",
")",
"continue",
"elif",
"isinstance",
"(",
"obj",
",",
"skip_types",
")",
":",
"continue",
"if",
"inspect",
".",
"ismodule",
"(",
"obj",
")",
":",
"text",
"=",
"inspect",
".",
"getdoc",
"(",
"obj",
")",
"else",
":",
"try",
":",
"text",
"=",
"str",
"(",
"get_doc_object",
"(",
"obj",
")",
")",
"except",
":",
"import",
"traceback",
"results",
".",
"append",
"(",
"(",
"full_name",
",",
"False",
",",
"\"Error in docstring format!\\n\"",
"+",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"continue",
"m",
"=",
"re",
".",
"search",
"(",
"\"([\\x00-\\x09\\x0b-\\x1f])\"",
",",
"text",
")",
"if",
"m",
":",
"msg",
"=",
"(",
"\"Docstring contains a non-printable character %r! \"",
"\"Maybe forgot r\\\"\\\"\\\"?\"",
"%",
"(",
"m",
".",
"group",
"(",
"1",
")",
",",
")",
")",
"results",
".",
"append",
"(",
"(",
"full_name",
",",
"False",
",",
"msg",
")",
")",
"continue",
"try",
":",
"src_file",
"=",
"short_path",
"(",
"inspect",
".",
"getsourcefile",
"(",
"obj",
")",
")",
"except",
"TypeError",
":",
"src_file",
"=",
"None",
"if",
"src_file",
":",
"file_full_name",
"=",
"src_file",
"+",
"':'",
"+",
"full_name",
"else",
":",
"file_full_name",
"=",
"full_name",
"results",
".",
"append",
"(",
"(",
"full_name",
",",
")",
"+",
"validate_rst_syntax",
"(",
"text",
",",
"file_full_name",
",",
"dots",
"=",
"dots",
")",
")",
"return",
"results"
]
| Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...] | [
"Check",
"reStructuredText",
"formatting",
"of",
"docstrings"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/util/refguide_check.py#L310-L372 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.update_header | def update_header(self):
"""
Updates header to edffile struct
"""
set_technician(self.handle, du(self.technician))
set_recording_additional(self.handle, du(self.recording_additional))
set_patientname(self.handle, du(self.patient_name))
set_patientcode(self.handle, du(self.patient_code))
set_patient_additional(self.handle, du(self.patient_additional))
set_equipment(self.handle, du(self.equipment))
set_admincode(self.handle, du(self.admincode))
if isinstance(self.gender, int):
set_gender(self.handle, self.gender)
elif self.gender == "Male":
set_gender(self.handle, 0)
elif self.gender == "Female":
set_gender(self.handle, 1)
set_datarecord_duration(self.handle, self.duration)
set_number_of_annotation_signals(self.handle, self.number_of_annotations)
set_startdatetime(self.handle, self.recording_start_time.year, self.recording_start_time.month,
self.recording_start_time.day, self.recording_start_time.hour,
self.recording_start_time.minute, self.recording_start_time.second)
if isstr(self.birthdate):
if self.birthdate != '':
birthday = datetime.strptime(self.birthdate, '%d %b %Y').date()
set_birthdate(self.handle, birthday.year, birthday.month, birthday.day)
else:
set_birthdate(self.handle, self.birthdate.year, self.birthdate.month, self.birthdate.day)
for i in np.arange(self.n_channels):
set_samplefrequency(self.handle, i, self.channels[i]['sample_rate'])
set_physical_maximum(self.handle, i, self.channels[i]['physical_max'])
set_physical_minimum(self.handle, i, self.channels[i]['physical_min'])
set_digital_maximum(self.handle, i, self.channels[i]['digital_max'])
set_digital_minimum(self.handle, i, self.channels[i]['digital_min'])
set_label(self.handle, i, du(self.channels[i]['label']))
set_physical_dimension(self.handle, i, du(self.channels[i]['dimension']))
set_transducer(self.handle, i, du(self.channels[i]['transducer']))
set_prefilter(self.handle, i, du(self.channels[i]['prefilter'])) | python | def update_header(self):
set_technician(self.handle, du(self.technician))
set_recording_additional(self.handle, du(self.recording_additional))
set_patientname(self.handle, du(self.patient_name))
set_patientcode(self.handle, du(self.patient_code))
set_patient_additional(self.handle, du(self.patient_additional))
set_equipment(self.handle, du(self.equipment))
set_admincode(self.handle, du(self.admincode))
if isinstance(self.gender, int):
set_gender(self.handle, self.gender)
elif self.gender == "Male":
set_gender(self.handle, 0)
elif self.gender == "Female":
set_gender(self.handle, 1)
set_datarecord_duration(self.handle, self.duration)
set_number_of_annotation_signals(self.handle, self.number_of_annotations)
set_startdatetime(self.handle, self.recording_start_time.year, self.recording_start_time.month,
self.recording_start_time.day, self.recording_start_time.hour,
self.recording_start_time.minute, self.recording_start_time.second)
if isstr(self.birthdate):
if self.birthdate != '':
birthday = datetime.strptime(self.birthdate, '%d %b %Y').date()
set_birthdate(self.handle, birthday.year, birthday.month, birthday.day)
else:
set_birthdate(self.handle, self.birthdate.year, self.birthdate.month, self.birthdate.day)
for i in np.arange(self.n_channels):
set_samplefrequency(self.handle, i, self.channels[i]['sample_rate'])
set_physical_maximum(self.handle, i, self.channels[i]['physical_max'])
set_physical_minimum(self.handle, i, self.channels[i]['physical_min'])
set_digital_maximum(self.handle, i, self.channels[i]['digital_max'])
set_digital_minimum(self.handle, i, self.channels[i]['digital_min'])
set_label(self.handle, i, du(self.channels[i]['label']))
set_physical_dimension(self.handle, i, du(self.channels[i]['dimension']))
set_transducer(self.handle, i, du(self.channels[i]['transducer']))
set_prefilter(self.handle, i, du(self.channels[i]['prefilter'])) | [
"def",
"update_header",
"(",
"self",
")",
":",
"set_technician",
"(",
"self",
".",
"handle",
",",
"du",
"(",
"self",
".",
"technician",
")",
")",
"set_recording_additional",
"(",
"self",
".",
"handle",
",",
"du",
"(",
"self",
".",
"recording_additional",
")",
")",
"set_patientname",
"(",
"self",
".",
"handle",
",",
"du",
"(",
"self",
".",
"patient_name",
")",
")",
"set_patientcode",
"(",
"self",
".",
"handle",
",",
"du",
"(",
"self",
".",
"patient_code",
")",
")",
"set_patient_additional",
"(",
"self",
".",
"handle",
",",
"du",
"(",
"self",
".",
"patient_additional",
")",
")",
"set_equipment",
"(",
"self",
".",
"handle",
",",
"du",
"(",
"self",
".",
"equipment",
")",
")",
"set_admincode",
"(",
"self",
".",
"handle",
",",
"du",
"(",
"self",
".",
"admincode",
")",
")",
"if",
"isinstance",
"(",
"self",
".",
"gender",
",",
"int",
")",
":",
"set_gender",
"(",
"self",
".",
"handle",
",",
"self",
".",
"gender",
")",
"elif",
"self",
".",
"gender",
"==",
"\"Male\"",
":",
"set_gender",
"(",
"self",
".",
"handle",
",",
"0",
")",
"elif",
"self",
".",
"gender",
"==",
"\"Female\"",
":",
"set_gender",
"(",
"self",
".",
"handle",
",",
"1",
")",
"set_datarecord_duration",
"(",
"self",
".",
"handle",
",",
"self",
".",
"duration",
")",
"set_number_of_annotation_signals",
"(",
"self",
".",
"handle",
",",
"self",
".",
"number_of_annotations",
")",
"set_startdatetime",
"(",
"self",
".",
"handle",
",",
"self",
".",
"recording_start_time",
".",
"year",
",",
"self",
".",
"recording_start_time",
".",
"month",
",",
"self",
".",
"recording_start_time",
".",
"day",
",",
"self",
".",
"recording_start_time",
".",
"hour",
",",
"self",
".",
"recording_start_time",
".",
"minute",
",",
"self",
".",
"recording_start_time",
".",
"second",
")",
"if",
"isstr",
"(",
"self",
".",
"birthdate",
")",
":",
"if",
"self",
".",
"birthdate",
"!=",
"''",
":",
"birthday",
"=",
"datetime",
".",
"strptime",
"(",
"self",
".",
"birthdate",
",",
"'%d %b %Y'",
")",
".",
"date",
"(",
")",
"set_birthdate",
"(",
"self",
".",
"handle",
",",
"birthday",
".",
"year",
",",
"birthday",
".",
"month",
",",
"birthday",
".",
"day",
")",
"else",
":",
"set_birthdate",
"(",
"self",
".",
"handle",
",",
"self",
".",
"birthdate",
".",
"year",
",",
"self",
".",
"birthdate",
".",
"month",
",",
"self",
".",
"birthdate",
".",
"day",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"self",
".",
"n_channels",
")",
":",
"set_samplefrequency",
"(",
"self",
".",
"handle",
",",
"i",
",",
"self",
".",
"channels",
"[",
"i",
"]",
"[",
"'sample_rate'",
"]",
")",
"set_physical_maximum",
"(",
"self",
".",
"handle",
",",
"i",
",",
"self",
".",
"channels",
"[",
"i",
"]",
"[",
"'physical_max'",
"]",
")",
"set_physical_minimum",
"(",
"self",
".",
"handle",
",",
"i",
",",
"self",
".",
"channels",
"[",
"i",
"]",
"[",
"'physical_min'",
"]",
")",
"set_digital_maximum",
"(",
"self",
".",
"handle",
",",
"i",
",",
"self",
".",
"channels",
"[",
"i",
"]",
"[",
"'digital_max'",
"]",
")",
"set_digital_minimum",
"(",
"self",
".",
"handle",
",",
"i",
",",
"self",
".",
"channels",
"[",
"i",
"]",
"[",
"'digital_min'",
"]",
")",
"set_label",
"(",
"self",
".",
"handle",
",",
"i",
",",
"du",
"(",
"self",
".",
"channels",
"[",
"i",
"]",
"[",
"'label'",
"]",
")",
")",
"set_physical_dimension",
"(",
"self",
".",
"handle",
",",
"i",
",",
"du",
"(",
"self",
".",
"channels",
"[",
"i",
"]",
"[",
"'dimension'",
"]",
")",
")",
"set_transducer",
"(",
"self",
".",
"handle",
",",
"i",
",",
"du",
"(",
"self",
".",
"channels",
"[",
"i",
"]",
"[",
"'transducer'",
"]",
")",
")",
"set_prefilter",
"(",
"self",
".",
"handle",
",",
"i",
",",
"du",
"(",
"self",
".",
"channels",
"[",
"i",
"]",
"[",
"'prefilter'",
"]",
")",
")"
]
| Updates header to edffile struct | [
"Updates",
"header",
"to",
"edffile",
"struct"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L137-L175 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.setHeader | def setHeader(self, fileHeader):
"""
Sets the file header
"""
self.technician = fileHeader["technician"]
self.recording_additional = fileHeader["recording_additional"]
self.patient_name = fileHeader["patientname"]
self.patient_additional = fileHeader["patient_additional"]
self.patient_code = fileHeader["patientcode"]
self.equipment = fileHeader["equipment"]
self.admincode = fileHeader["admincode"]
self.gender = fileHeader["gender"]
self.recording_start_time = fileHeader["startdate"]
self.birthdate = fileHeader["birthdate"]
self.update_header() | python | def setHeader(self, fileHeader):
self.technician = fileHeader["technician"]
self.recording_additional = fileHeader["recording_additional"]
self.patient_name = fileHeader["patientname"]
self.patient_additional = fileHeader["patient_additional"]
self.patient_code = fileHeader["patientcode"]
self.equipment = fileHeader["equipment"]
self.admincode = fileHeader["admincode"]
self.gender = fileHeader["gender"]
self.recording_start_time = fileHeader["startdate"]
self.birthdate = fileHeader["birthdate"]
self.update_header() | [
"def",
"setHeader",
"(",
"self",
",",
"fileHeader",
")",
":",
"self",
".",
"technician",
"=",
"fileHeader",
"[",
"\"technician\"",
"]",
"self",
".",
"recording_additional",
"=",
"fileHeader",
"[",
"\"recording_additional\"",
"]",
"self",
".",
"patient_name",
"=",
"fileHeader",
"[",
"\"patientname\"",
"]",
"self",
".",
"patient_additional",
"=",
"fileHeader",
"[",
"\"patient_additional\"",
"]",
"self",
".",
"patient_code",
"=",
"fileHeader",
"[",
"\"patientcode\"",
"]",
"self",
".",
"equipment",
"=",
"fileHeader",
"[",
"\"equipment\"",
"]",
"self",
".",
"admincode",
"=",
"fileHeader",
"[",
"\"admincode\"",
"]",
"self",
".",
"gender",
"=",
"fileHeader",
"[",
"\"gender\"",
"]",
"self",
".",
"recording_start_time",
"=",
"fileHeader",
"[",
"\"startdate\"",
"]",
"self",
".",
"birthdate",
"=",
"fileHeader",
"[",
"\"birthdate\"",
"]",
"self",
".",
"update_header",
"(",
")"
]
| Sets the file header | [
"Sets",
"the",
"file",
"header"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L177-L191 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.setSignalHeader | def setSignalHeader(self, edfsignal, channel_info):
"""
Sets the parameter for signal edfsignal.
channel_info should be a dict with
these values:
'label' : channel label (string, <= 16 characters, must be unique)
'dimension' : physical dimension (e.g., mV) (string, <= 8 characters)
'sample_rate' : sample frequency in hertz (int)
'physical_max' : maximum physical value (float)
'physical_min' : minimum physical value (float)
'digital_max' : maximum digital value (int, -2**15 <= x < 2**15)
'digital_min' : minimum digital value (int, -2**15 <= x < 2**15)
"""
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal] = channel_info
self.update_header() | python | def setSignalHeader(self, edfsignal, channel_info):
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal] = channel_info
self.update_header() | [
"def",
"setSignalHeader",
"(",
"self",
",",
"edfsignal",
",",
"channel_info",
")",
":",
"if",
"edfsignal",
"<",
"0",
"or",
"edfsignal",
">",
"self",
".",
"n_channels",
":",
"raise",
"ChannelDoesNotExist",
"(",
"edfsignal",
")",
"self",
".",
"channels",
"[",
"edfsignal",
"]",
"=",
"channel_info",
"self",
".",
"update_header",
"(",
")"
]
| Sets the parameter for signal edfsignal.
channel_info should be a dict with
these values:
'label' : channel label (string, <= 16 characters, must be unique)
'dimension' : physical dimension (e.g., mV) (string, <= 8 characters)
'sample_rate' : sample frequency in hertz (int)
'physical_max' : maximum physical value (float)
'physical_min' : minimum physical value (float)
'digital_max' : maximum digital value (int, -2**15 <= x < 2**15)
'digital_min' : minimum digital value (int, -2**15 <= x < 2**15) | [
"Sets",
"the",
"parameter",
"for",
"signal",
"edfsignal",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L193-L211 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.setSignalHeaders | def setSignalHeaders(self, signalHeaders):
"""
Sets the parameter for all signals
Parameters
----------
signalHeaders : array_like
containing dict with
'label' : str
channel label (string, <= 16 characters, must be unique)
'dimension' : str
physical dimension (e.g., mV) (string, <= 8 characters)
'sample_rate' : int
sample frequency in hertz
'physical_max' : float
maximum physical value
'physical_min' : float
minimum physical value
'digital_max' : int
maximum digital value (-2**15 <= x < 2**15)
'digital_min' : int
minimum digital value (-2**15 <= x < 2**15)
"""
for edfsignal in np.arange(self.n_channels):
self.channels[edfsignal] = signalHeaders[edfsignal]
self.update_header() | python | def setSignalHeaders(self, signalHeaders):
for edfsignal in np.arange(self.n_channels):
self.channels[edfsignal] = signalHeaders[edfsignal]
self.update_header() | [
"def",
"setSignalHeaders",
"(",
"self",
",",
"signalHeaders",
")",
":",
"for",
"edfsignal",
"in",
"np",
".",
"arange",
"(",
"self",
".",
"n_channels",
")",
":",
"self",
".",
"channels",
"[",
"edfsignal",
"]",
"=",
"signalHeaders",
"[",
"edfsignal",
"]",
"self",
".",
"update_header",
"(",
")"
]
| Sets the parameter for all signals
Parameters
----------
signalHeaders : array_like
containing dict with
'label' : str
channel label (string, <= 16 characters, must be unique)
'dimension' : str
physical dimension (e.g., mV) (string, <= 8 characters)
'sample_rate' : int
sample frequency in hertz
'physical_max' : float
maximum physical value
'physical_min' : float
minimum physical value
'digital_max' : int
maximum digital value (-2**15 <= x < 2**15)
'digital_min' : int
minimum digital value (-2**15 <= x < 2**15) | [
"Sets",
"the",
"parameter",
"for",
"all",
"signals"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L213-L238 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.set_number_of_annotation_signals | def set_number_of_annotation_signals(self, number_of_annotations):
"""
Sets the number of annotation signals. The default value is 1
This function is optional and can be called only after opening a file in writemode
and before the first sample write action
Normally you don't need to change the default value. Only when the number of annotations
you want to write is more than the number of seconds of the duration of the recording, you can use
this function to increase the storage space for annotations
Minimum is 1, maximum is 64
Parameters
----------
number_of_annotations : integer
Sets the number of annotation signals
"""
number_of_annotations = max((min((int(number_of_annotations), 64)), 1))
self.number_of_annotations = number_of_annotations
self.update_header() | python | def set_number_of_annotation_signals(self, number_of_annotations):
number_of_annotations = max((min((int(number_of_annotations), 64)), 1))
self.number_of_annotations = number_of_annotations
self.update_header() | [
"def",
"set_number_of_annotation_signals",
"(",
"self",
",",
"number_of_annotations",
")",
":",
"number_of_annotations",
"=",
"max",
"(",
"(",
"min",
"(",
"(",
"int",
"(",
"number_of_annotations",
")",
",",
"64",
")",
")",
",",
"1",
")",
")",
"self",
".",
"number_of_annotations",
"=",
"number_of_annotations",
"self",
".",
"update_header",
"(",
")"
]
| Sets the number of annotation signals. The default value is 1
This function is optional and can be called only after opening a file in writemode
and before the first sample write action
Normally you don't need to change the default value. Only when the number of annotations
you want to write is more than the number of seconds of the duration of the recording, you can use
this function to increase the storage space for annotations
Minimum is 1, maximum is 64
Parameters
----------
number_of_annotations : integer
Sets the number of annotation signals | [
"Sets",
"the",
"number",
"of",
"annotation",
"signals",
".",
"The",
"default",
"value",
"is",
"1",
"This",
"function",
"is",
"optional",
"and",
"can",
"be",
"called",
"only",
"after",
"opening",
"a",
"file",
"in",
"writemode",
"and",
"before",
"the",
"first",
"sample",
"write",
"action",
"Normally",
"you",
"don",
"t",
"need",
"to",
"change",
"the",
"default",
"value",
".",
"Only",
"when",
"the",
"number",
"of",
"annotations",
"you",
"want",
"to",
"write",
"is",
"more",
"than",
"the",
"number",
"of",
"seconds",
"of",
"the",
"duration",
"of",
"the",
"recording",
"you",
"can",
"use",
"this",
"function",
"to",
"increase",
"the",
"storage",
"space",
"for",
"annotations",
"Minimum",
"is",
"1",
"maximum",
"is",
"64"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L364-L381 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.setStartdatetime | def setStartdatetime(self, recording_start_time):
"""
Sets the recording start Time
Parameters
----------
recording_start_time: datetime object
Sets the recording start Time
"""
if isinstance(recording_start_time,datetime):
self.recording_start_time = recording_start_time
else:
self.recording_start_time = datetime.strptime(recording_start_time,"%d %b %Y %H:%M:%S")
self.update_header() | python | def setStartdatetime(self, recording_start_time):
if isinstance(recording_start_time,datetime):
self.recording_start_time = recording_start_time
else:
self.recording_start_time = datetime.strptime(recording_start_time,"%d %b %Y %H:%M:%S")
self.update_header() | [
"def",
"setStartdatetime",
"(",
"self",
",",
"recording_start_time",
")",
":",
"if",
"isinstance",
"(",
"recording_start_time",
",",
"datetime",
")",
":",
"self",
".",
"recording_start_time",
"=",
"recording_start_time",
"else",
":",
"self",
".",
"recording_start_time",
"=",
"datetime",
".",
"strptime",
"(",
"recording_start_time",
",",
"\"%d %b %Y %H:%M:%S\"",
")",
"self",
".",
"update_header",
"(",
")"
]
| Sets the recording start Time
Parameters
----------
recording_start_time: datetime object
Sets the recording start Time | [
"Sets",
"the",
"recording",
"start",
"Time"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L383-L396 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.setSamplefrequency | def setSamplefrequency(self, edfsignal, samplefrequency):
"""
Sets the samplefrequency of signal edfsignal.
Notes
-----
This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['sample_rate'] = samplefrequency
self.update_header() | python | def setSamplefrequency(self, edfsignal, samplefrequency):
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['sample_rate'] = samplefrequency
self.update_header() | [
"def",
"setSamplefrequency",
"(",
"self",
",",
"edfsignal",
",",
"samplefrequency",
")",
":",
"if",
"edfsignal",
"<",
"0",
"or",
"edfsignal",
">",
"self",
".",
"n_channels",
":",
"raise",
"ChannelDoesNotExist",
"(",
"edfsignal",
")",
"self",
".",
"channels",
"[",
"edfsignal",
"]",
"[",
"'sample_rate'",
"]",
"=",
"samplefrequency",
"self",
".",
"update_header",
"(",
")"
]
| Sets the samplefrequency of signal edfsignal.
Notes
-----
This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action. | [
"Sets",
"the",
"samplefrequency",
"of",
"signal",
"edfsignal",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L421-L432 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.setPhysicalMaximum | def setPhysicalMaximum(self, edfsignal, physical_maximum):
"""
Sets the physical_maximum of signal edfsignal.
Parameters
----------
edfsignal: int
signal number
physical_maximum: float
Sets the physical maximum
Notes
-----
This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['physical_max'] = physical_maximum
self.update_header() | python | def setPhysicalMaximum(self, edfsignal, physical_maximum):
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['physical_max'] = physical_maximum
self.update_header() | [
"def",
"setPhysicalMaximum",
"(",
"self",
",",
"edfsignal",
",",
"physical_maximum",
")",
":",
"if",
"edfsignal",
"<",
"0",
"or",
"edfsignal",
">",
"self",
".",
"n_channels",
":",
"raise",
"ChannelDoesNotExist",
"(",
"edfsignal",
")",
"self",
".",
"channels",
"[",
"edfsignal",
"]",
"[",
"'physical_max'",
"]",
"=",
"physical_maximum",
"self",
".",
"update_header",
"(",
")"
]
| Sets the physical_maximum of signal edfsignal.
Parameters
----------
edfsignal: int
signal number
physical_maximum: float
Sets the physical maximum
Notes
-----
This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action. | [
"Sets",
"the",
"physical_maximum",
"of",
"signal",
"edfsignal",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L434-L452 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.setPhysicalMinimum | def setPhysicalMinimum(self, edfsignal, physical_minimum):
"""
Sets the physical_minimum of signal edfsignal.
Parameters
----------
edfsignal: int
signal number
physical_minimum: float
Sets the physical minimum
Notes
-----
This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['physical_min'] = physical_minimum
self.update_header() | python | def setPhysicalMinimum(self, edfsignal, physical_minimum):
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['physical_min'] = physical_minimum
self.update_header() | [
"def",
"setPhysicalMinimum",
"(",
"self",
",",
"edfsignal",
",",
"physical_minimum",
")",
":",
"if",
"(",
"edfsignal",
"<",
"0",
"or",
"edfsignal",
">",
"self",
".",
"n_channels",
")",
":",
"raise",
"ChannelDoesNotExist",
"(",
"edfsignal",
")",
"self",
".",
"channels",
"[",
"edfsignal",
"]",
"[",
"'physical_min'",
"]",
"=",
"physical_minimum",
"self",
".",
"update_header",
"(",
")"
]
| Sets the physical_minimum of signal edfsignal.
Parameters
----------
edfsignal: int
signal number
physical_minimum: float
Sets the physical minimum
Notes
-----
This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action. | [
"Sets",
"the",
"physical_minimum",
"of",
"signal",
"edfsignal",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L454-L472 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.setDigitalMaximum | def setDigitalMaximum(self, edfsignal, digital_maximum):
"""
Sets the samplefrequency of signal edfsignal.
Usually, the value 32767 is used for EDF+ and 8388607 for BDF+.
Parameters
----------
edfsignal : int
signal number
digital_maximum : int
Sets the maximum digital value
Notes
-----
This function is optional and can be called only after opening a file in writemode and before the first sample write action.
"""
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['digital_max'] = digital_maximum
self.update_header() | python | def setDigitalMaximum(self, edfsignal, digital_maximum):
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['digital_max'] = digital_maximum
self.update_header() | [
"def",
"setDigitalMaximum",
"(",
"self",
",",
"edfsignal",
",",
"digital_maximum",
")",
":",
"if",
"(",
"edfsignal",
"<",
"0",
"or",
"edfsignal",
">",
"self",
".",
"n_channels",
")",
":",
"raise",
"ChannelDoesNotExist",
"(",
"edfsignal",
")",
"self",
".",
"channels",
"[",
"edfsignal",
"]",
"[",
"'digital_max'",
"]",
"=",
"digital_maximum",
"self",
".",
"update_header",
"(",
")"
]
| Sets the samplefrequency of signal edfsignal.
Usually, the value 32767 is used for EDF+ and 8388607 for BDF+.
Parameters
----------
edfsignal : int
signal number
digital_maximum : int
Sets the maximum digital value
Notes
-----
This function is optional and can be called only after opening a file in writemode and before the first sample write action. | [
"Sets",
"the",
"samplefrequency",
"of",
"signal",
"edfsignal",
".",
"Usually",
"the",
"value",
"32767",
"is",
"used",
"for",
"EDF",
"+",
"and",
"8388607",
"for",
"BDF",
"+",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L474-L493 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.setDigitalMinimum | def setDigitalMinimum(self, edfsignal, digital_minimum):
"""
Sets the minimum digital value of signal edfsignal.
Usually, the value -32768 is used for EDF+ and -8388608 for BDF+. Usually this will be (-(digital_maximum + 1)).
Parameters
----------
edfsignal : int
signal number
digital_minimum : int
Sets the minimum digital value
Notes
-----
This function is optional and can be called only after opening a file in writemode and before the first sample write action.
"""
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['digital_min'] = digital_minimum
self.update_header() | python | def setDigitalMinimum(self, edfsignal, digital_minimum):
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['digital_min'] = digital_minimum
self.update_header() | [
"def",
"setDigitalMinimum",
"(",
"self",
",",
"edfsignal",
",",
"digital_minimum",
")",
":",
"if",
"(",
"edfsignal",
"<",
"0",
"or",
"edfsignal",
">",
"self",
".",
"n_channels",
")",
":",
"raise",
"ChannelDoesNotExist",
"(",
"edfsignal",
")",
"self",
".",
"channels",
"[",
"edfsignal",
"]",
"[",
"'digital_min'",
"]",
"=",
"digital_minimum",
"self",
".",
"update_header",
"(",
")"
]
| Sets the minimum digital value of signal edfsignal.
Usually, the value -32768 is used for EDF+ and -8388608 for BDF+. Usually this will be (-(digital_maximum + 1)).
Parameters
----------
edfsignal : int
signal number
digital_minimum : int
Sets the minimum digital value
Notes
-----
This function is optional and can be called only after opening a file in writemode and before the first sample write action. | [
"Sets",
"the",
"minimum",
"digital",
"value",
"of",
"signal",
"edfsignal",
".",
"Usually",
"the",
"value",
"-",
"32768",
"is",
"used",
"for",
"EDF",
"+",
"and",
"-",
"8388608",
"for",
"BDF",
"+",
".",
"Usually",
"this",
"will",
"be",
"(",
"-",
"(",
"digital_maximum",
"+",
"1",
"))",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L495-L514 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.setLabel | def setLabel(self, edfsignal, label):
"""
Sets the label (name) of signal edfsignal ("FP1", "SaO2", etc.).
Parameters
----------
edfsignal : int
signal number on which the label should be changed
label : str
signal label
Notes
-----
This function is recommended for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['label'] = label
self.update_header() | python | def setLabel(self, edfsignal, label):
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['label'] = label
self.update_header() | [
"def",
"setLabel",
"(",
"self",
",",
"edfsignal",
",",
"label",
")",
":",
"if",
"(",
"edfsignal",
"<",
"0",
"or",
"edfsignal",
">",
"self",
".",
"n_channels",
")",
":",
"raise",
"ChannelDoesNotExist",
"(",
"edfsignal",
")",
"self",
".",
"channels",
"[",
"edfsignal",
"]",
"[",
"'label'",
"]",
"=",
"label",
"self",
".",
"update_header",
"(",
")"
]
| Sets the label (name) of signal edfsignal ("FP1", "SaO2", etc.).
Parameters
----------
edfsignal : int
signal number on which the label should be changed
label : str
signal label
Notes
-----
This function is recommended for every signal and can be called only after opening a file in writemode and before the first sample write action. | [
"Sets",
"the",
"label",
"(",
"name",
")",
"of",
"signal",
"edfsignal",
"(",
"FP1",
"SaO2",
"etc",
".",
")",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L516-L534 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.setPhysicalDimension | def setPhysicalDimension(self, edfsignal, physical_dimension):
"""
Sets the physical dimension of signal edfsignal ("uV", "BPM", "mA", "Degr.", etc.)
:param edfsignal: int
:param physical_dimension: str
Notes
-----
This function is recommended for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['dimension'] = physical_dimension
self.update_header() | python | def setPhysicalDimension(self, edfsignal, physical_dimension):
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['dimension'] = physical_dimension
self.update_header() | [
"def",
"setPhysicalDimension",
"(",
"self",
",",
"edfsignal",
",",
"physical_dimension",
")",
":",
"if",
"edfsignal",
"<",
"0",
"or",
"edfsignal",
">",
"self",
".",
"n_channels",
":",
"raise",
"ChannelDoesNotExist",
"(",
"edfsignal",
")",
"self",
".",
"channels",
"[",
"edfsignal",
"]",
"[",
"'dimension'",
"]",
"=",
"physical_dimension",
"self",
".",
"update_header",
"(",
")"
]
| Sets the physical dimension of signal edfsignal ("uV", "BPM", "mA", "Degr.", etc.)
:param edfsignal: int
:param physical_dimension: str
Notes
-----
This function is recommended for every signal and can be called only after opening a file in writemode and before the first sample write action. | [
"Sets",
"the",
"physical",
"dimension",
"of",
"signal",
"edfsignal",
"(",
"uV",
"BPM",
"mA",
"Degr",
".",
"etc",
".",
")"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L536-L550 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.setTransducer | def setTransducer(self, edfsignal, transducer):
"""
Sets the transducer of signal edfsignal
:param edfsignal: int
:param transducer: str
Notes
-----
This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['transducer'] = transducer
self.update_header() | python | def setTransducer(self, edfsignal, transducer):
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['transducer'] = transducer
self.update_header() | [
"def",
"setTransducer",
"(",
"self",
",",
"edfsignal",
",",
"transducer",
")",
":",
"if",
"(",
"edfsignal",
"<",
"0",
"or",
"edfsignal",
">",
"self",
".",
"n_channels",
")",
":",
"raise",
"ChannelDoesNotExist",
"(",
"edfsignal",
")",
"self",
".",
"channels",
"[",
"edfsignal",
"]",
"[",
"'transducer'",
"]",
"=",
"transducer",
"self",
".",
"update_header",
"(",
")"
]
| Sets the transducer of signal edfsignal
:param edfsignal: int
:param transducer: str
Notes
-----
This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action. | [
"Sets",
"the",
"transducer",
"of",
"signal",
"edfsignal"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L552-L566 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.setPrefilter | def setPrefilter(self, edfsignal, prefilter):
"""
Sets the prefilter of signal edfsignal ("HP:0.1Hz", "LP:75Hz N:50Hz", etc.)
:param edfsignal: int
:param prefilter: str
Notes
-----
This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['prefilter'] = prefilter
self.update_header() | python | def setPrefilter(self, edfsignal, prefilter):
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['prefilter'] = prefilter
self.update_header() | [
"def",
"setPrefilter",
"(",
"self",
",",
"edfsignal",
",",
"prefilter",
")",
":",
"if",
"edfsignal",
"<",
"0",
"or",
"edfsignal",
">",
"self",
".",
"n_channels",
":",
"raise",
"ChannelDoesNotExist",
"(",
"edfsignal",
")",
"self",
".",
"channels",
"[",
"edfsignal",
"]",
"[",
"'prefilter'",
"]",
"=",
"prefilter",
"self",
".",
"update_header",
"(",
")"
]
| Sets the prefilter of signal edfsignal ("HP:0.1Hz", "LP:75Hz N:50Hz", etc.)
:param edfsignal: int
:param prefilter: str
Notes
-----
This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action. | [
"Sets",
"the",
"prefilter",
"of",
"signal",
"edfsignal",
"(",
"HP",
":",
"0",
".",
"1Hz",
"LP",
":",
"75Hz",
"N",
":",
"50Hz",
"etc",
".",
")"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L568-L582 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.writeSamples | def writeSamples(self, data_list, digital = False):
"""
Writes physical samples (uV, mA, Ohm) from data belonging to all signals
The physical samples will be converted to digital samples using the values
of physical maximum, physical minimum, digital maximum and digital minimum.
if the samplefrequency of all signals are equal, then the data could be
saved into a matrix with the size (N,signals) If the samplefrequency
is different, then sample_freq is a vector containing all the different
samplefrequencys. The data is saved as list. Each list entry contains
a vector with the data of one signal.
If digital is True, digital signals (as directly from the ADC) will be expected.
(e.g. int16 from 0 to 2048)
All parameters must be already written into the bdf/edf-file.
"""
if (len(data_list) != len(self.channels)):
raise WrongInputSize(len(data_list))
if digital:
if any([not np.issubdtype(a.dtype, np.integer) for a in data_list]):
raise TypeError('Digital = True requires all signals in int')
ind = []
notAtEnd = True
for i in np.arange(len(data_list)):
ind.append(0)
sampleLength = 0
sampleRates = np.zeros(len(data_list), dtype=np.int)
for i in np.arange(len(data_list)):
sampleRates[i] = self.channels[i]['sample_rate']
if (np.size(data_list[i]) < ind[i] + self.channels[i]['sample_rate']):
notAtEnd = False
sampleLength += self.channels[i]['sample_rate']
dataOfOneSecond = np.array([], dtype=np.int if digital else None)
while notAtEnd:
# dataOfOneSecondInd = 0
del dataOfOneSecond
dataOfOneSecond = np.array([], dtype=np.int if digital else None)
for i in np.arange(len(data_list)):
# dataOfOneSecond[dataOfOneSecondInd:dataOfOneSecondInd+self.channels[i]['sample_rate']] = data_list[i].ravel()[int(ind[i]):int(ind[i]+self.channels[i]['sample_rate'])]
dataOfOneSecond = np.append(dataOfOneSecond,data_list[i].ravel()[int(ind[i]):int(ind[i]+sampleRates[i])])
# self.writePhysicalSamples(data_list[i].ravel()[int(ind[i]):int(ind[i]+self.channels[i]['sample_rate'])])
ind[i] += sampleRates[i]
# dataOfOneSecondInd += sampleRates[i]
if digital:
self.blockWriteDigitalSamples(dataOfOneSecond)
else:
self.blockWritePhysicalSamples(dataOfOneSecond)
for i in np.arange(len(data_list)):
if (np.size(data_list[i]) < ind[i] + sampleRates[i]):
notAtEnd = False
# dataOfOneSecondInd = 0
for i in np.arange(len(data_list)):
lastSamples = np.zeros(sampleRates[i], dtype=np.int if digital else None)
lastSampleInd = int(np.max(data_list[i].shape) - ind[i])
lastSampleInd = int(np.min((lastSampleInd,sampleRates[i])))
if lastSampleInd > 0:
lastSamples[:lastSampleInd] = data_list[i].ravel()[-lastSampleInd:]
# dataOfOneSecond[dataOfOneSecondInd:dataOfOneSecondInd+self.channels[i]['sample_rate']] = lastSamples
# dataOfOneSecondInd += self.channels[i]['sample_rate']
if digital:
self.writeDigitalSamples(lastSamples)
else:
self.writePhysicalSamples(lastSamples) | python | def writeSamples(self, data_list, digital = False):
if (len(data_list) != len(self.channels)):
raise WrongInputSize(len(data_list))
if digital:
if any([not np.issubdtype(a.dtype, np.integer) for a in data_list]):
raise TypeError('Digital = True requires all signals in int')
ind = []
notAtEnd = True
for i in np.arange(len(data_list)):
ind.append(0)
sampleLength = 0
sampleRates = np.zeros(len(data_list), dtype=np.int)
for i in np.arange(len(data_list)):
sampleRates[i] = self.channels[i]['sample_rate']
if (np.size(data_list[i]) < ind[i] + self.channels[i]['sample_rate']):
notAtEnd = False
sampleLength += self.channels[i]['sample_rate']
dataOfOneSecond = np.array([], dtype=np.int if digital else None)
while notAtEnd:
del dataOfOneSecond
dataOfOneSecond = np.array([], dtype=np.int if digital else None)
for i in np.arange(len(data_list)):
dataOfOneSecond = np.append(dataOfOneSecond,data_list[i].ravel()[int(ind[i]):int(ind[i]+sampleRates[i])])
ind[i] += sampleRates[i]
if digital:
self.blockWriteDigitalSamples(dataOfOneSecond)
else:
self.blockWritePhysicalSamples(dataOfOneSecond)
for i in np.arange(len(data_list)):
if (np.size(data_list[i]) < ind[i] + sampleRates[i]):
notAtEnd = False
for i in np.arange(len(data_list)):
lastSamples = np.zeros(sampleRates[i], dtype=np.int if digital else None)
lastSampleInd = int(np.max(data_list[i].shape) - ind[i])
lastSampleInd = int(np.min((lastSampleInd,sampleRates[i])))
if lastSampleInd > 0:
lastSamples[:lastSampleInd] = data_list[i].ravel()[-lastSampleInd:]
if digital:
self.writeDigitalSamples(lastSamples)
else:
self.writePhysicalSamples(lastSamples) | [
"def",
"writeSamples",
"(",
"self",
",",
"data_list",
",",
"digital",
"=",
"False",
")",
":",
"if",
"(",
"len",
"(",
"data_list",
")",
"!=",
"len",
"(",
"self",
".",
"channels",
")",
")",
":",
"raise",
"WrongInputSize",
"(",
"len",
"(",
"data_list",
")",
")",
"if",
"digital",
":",
"if",
"any",
"(",
"[",
"not",
"np",
".",
"issubdtype",
"(",
"a",
".",
"dtype",
",",
"np",
".",
"integer",
")",
"for",
"a",
"in",
"data_list",
"]",
")",
":",
"raise",
"TypeError",
"(",
"'Digital = True requires all signals in int'",
")",
"ind",
"=",
"[",
"]",
"notAtEnd",
"=",
"True",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"data_list",
")",
")",
":",
"ind",
".",
"append",
"(",
"0",
")",
"sampleLength",
"=",
"0",
"sampleRates",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"data_list",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"data_list",
")",
")",
":",
"sampleRates",
"[",
"i",
"]",
"=",
"self",
".",
"channels",
"[",
"i",
"]",
"[",
"'sample_rate'",
"]",
"if",
"(",
"np",
".",
"size",
"(",
"data_list",
"[",
"i",
"]",
")",
"<",
"ind",
"[",
"i",
"]",
"+",
"self",
".",
"channels",
"[",
"i",
"]",
"[",
"'sample_rate'",
"]",
")",
":",
"notAtEnd",
"=",
"False",
"sampleLength",
"+=",
"self",
".",
"channels",
"[",
"i",
"]",
"[",
"'sample_rate'",
"]",
"dataOfOneSecond",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"np",
".",
"int",
"if",
"digital",
"else",
"None",
")",
"while",
"notAtEnd",
":",
"# dataOfOneSecondInd = 0",
"del",
"dataOfOneSecond",
"dataOfOneSecond",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"np",
".",
"int",
"if",
"digital",
"else",
"None",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"data_list",
")",
")",
":",
"# dataOfOneSecond[dataOfOneSecondInd:dataOfOneSecondInd+self.channels[i]['sample_rate']] = data_list[i].ravel()[int(ind[i]):int(ind[i]+self.channels[i]['sample_rate'])]",
"dataOfOneSecond",
"=",
"np",
".",
"append",
"(",
"dataOfOneSecond",
",",
"data_list",
"[",
"i",
"]",
".",
"ravel",
"(",
")",
"[",
"int",
"(",
"ind",
"[",
"i",
"]",
")",
":",
"int",
"(",
"ind",
"[",
"i",
"]",
"+",
"sampleRates",
"[",
"i",
"]",
")",
"]",
")",
"# self.writePhysicalSamples(data_list[i].ravel()[int(ind[i]):int(ind[i]+self.channels[i]['sample_rate'])])",
"ind",
"[",
"i",
"]",
"+=",
"sampleRates",
"[",
"i",
"]",
"# dataOfOneSecondInd += sampleRates[i]",
"if",
"digital",
":",
"self",
".",
"blockWriteDigitalSamples",
"(",
"dataOfOneSecond",
")",
"else",
":",
"self",
".",
"blockWritePhysicalSamples",
"(",
"dataOfOneSecond",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"data_list",
")",
")",
":",
"if",
"(",
"np",
".",
"size",
"(",
"data_list",
"[",
"i",
"]",
")",
"<",
"ind",
"[",
"i",
"]",
"+",
"sampleRates",
"[",
"i",
"]",
")",
":",
"notAtEnd",
"=",
"False",
"# dataOfOneSecondInd = 0",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"data_list",
")",
")",
":",
"lastSamples",
"=",
"np",
".",
"zeros",
"(",
"sampleRates",
"[",
"i",
"]",
",",
"dtype",
"=",
"np",
".",
"int",
"if",
"digital",
"else",
"None",
")",
"lastSampleInd",
"=",
"int",
"(",
"np",
".",
"max",
"(",
"data_list",
"[",
"i",
"]",
".",
"shape",
")",
"-",
"ind",
"[",
"i",
"]",
")",
"lastSampleInd",
"=",
"int",
"(",
"np",
".",
"min",
"(",
"(",
"lastSampleInd",
",",
"sampleRates",
"[",
"i",
"]",
")",
")",
")",
"if",
"lastSampleInd",
">",
"0",
":",
"lastSamples",
"[",
":",
"lastSampleInd",
"]",
"=",
"data_list",
"[",
"i",
"]",
".",
"ravel",
"(",
")",
"[",
"-",
"lastSampleInd",
":",
"]",
"# dataOfOneSecond[dataOfOneSecondInd:dataOfOneSecondInd+self.channels[i]['sample_rate']] = lastSamples",
"# dataOfOneSecondInd += self.channels[i]['sample_rate']",
"if",
"digital",
":",
"self",
".",
"writeDigitalSamples",
"(",
"lastSamples",
")",
"else",
":",
"self",
".",
"writePhysicalSamples",
"(",
"lastSamples",
")"
]
| Writes physical samples (uV, mA, Ohm) from data belonging to all signals
The physical samples will be converted to digital samples using the values
of physical maximum, physical minimum, digital maximum and digital minimum.
if the samplefrequency of all signals are equal, then the data could be
saved into a matrix with the size (N,signals) If the samplefrequency
is different, then sample_freq is a vector containing all the different
samplefrequencys. The data is saved as list. Each list entry contains
a vector with the data of one signal.
If digital is True, digital signals (as directly from the ADC) will be expected.
(e.g. int16 from 0 to 2048)
All parameters must be already written into the bdf/edf-file. | [
"Writes",
"physical",
"samples",
"(",
"uV",
"mA",
"Ohm",
")",
"from",
"data",
"belonging",
"to",
"all",
"signals",
"The",
"physical",
"samples",
"will",
"be",
"converted",
"to",
"digital",
"samples",
"using",
"the",
"values",
"of",
"physical",
"maximum",
"physical",
"minimum",
"digital",
"maximum",
"and",
"digital",
"minimum",
".",
"if",
"the",
"samplefrequency",
"of",
"all",
"signals",
"are",
"equal",
"then",
"the",
"data",
"could",
"be",
"saved",
"into",
"a",
"matrix",
"with",
"the",
"size",
"(",
"N",
"signals",
")",
"If",
"the",
"samplefrequency",
"is",
"different",
"then",
"sample_freq",
"is",
"a",
"vector",
"containing",
"all",
"the",
"different",
"samplefrequencys",
".",
"The",
"data",
"is",
"saved",
"as",
"list",
".",
"Each",
"list",
"entry",
"contains",
"a",
"vector",
"with",
"the",
"data",
"of",
"one",
"signal",
".",
"If",
"digital",
"is",
"True",
"digital",
"signals",
"(",
"as",
"directly",
"from",
"the",
"ADC",
")",
"will",
"be",
"expected",
".",
"(",
"e",
".",
"g",
".",
"int16",
"from",
"0",
"to",
"2048",
")"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L639-L711 |
holgern/pyedflib | pyedflib/edfwriter.py | EdfWriter.writeAnnotation | def writeAnnotation(self, onset_in_seconds, duration_in_seconds, description, str_format='utf-8'):
"""
Writes an annotation/event to the file
"""
if str_format == 'utf-8':
if duration_in_seconds >= 0:
return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(int), np.round(duration_in_seconds*10000).astype(int), du(description))
else:
return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(int), -1, du(description))
else:
if duration_in_seconds >= 0:
return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(int), np.round(duration_in_seconds*10000).astype(int), u(description).encode('latin1'))
else:
return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(int), -1, u(description).encode('latin1')) | python | def writeAnnotation(self, onset_in_seconds, duration_in_seconds, description, str_format='utf-8'):
if str_format == 'utf-8':
if duration_in_seconds >= 0:
return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(int), np.round(duration_in_seconds*10000).astype(int), du(description))
else:
return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(int), -1, du(description))
else:
if duration_in_seconds >= 0:
return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(int), np.round(duration_in_seconds*10000).astype(int), u(description).encode('latin1'))
else:
return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(int), -1, u(description).encode('latin1')) | [
"def",
"writeAnnotation",
"(",
"self",
",",
"onset_in_seconds",
",",
"duration_in_seconds",
",",
"description",
",",
"str_format",
"=",
"'utf-8'",
")",
":",
"if",
"str_format",
"==",
"'utf-8'",
":",
"if",
"duration_in_seconds",
">=",
"0",
":",
"return",
"write_annotation_utf8",
"(",
"self",
".",
"handle",
",",
"np",
".",
"round",
"(",
"onset_in_seconds",
"*",
"10000",
")",
".",
"astype",
"(",
"int",
")",
",",
"np",
".",
"round",
"(",
"duration_in_seconds",
"*",
"10000",
")",
".",
"astype",
"(",
"int",
")",
",",
"du",
"(",
"description",
")",
")",
"else",
":",
"return",
"write_annotation_utf8",
"(",
"self",
".",
"handle",
",",
"np",
".",
"round",
"(",
"onset_in_seconds",
"*",
"10000",
")",
".",
"astype",
"(",
"int",
")",
",",
"-",
"1",
",",
"du",
"(",
"description",
")",
")",
"else",
":",
"if",
"duration_in_seconds",
">=",
"0",
":",
"return",
"write_annotation_latin1",
"(",
"self",
".",
"handle",
",",
"np",
".",
"round",
"(",
"onset_in_seconds",
"*",
"10000",
")",
".",
"astype",
"(",
"int",
")",
",",
"np",
".",
"round",
"(",
"duration_in_seconds",
"*",
"10000",
")",
".",
"astype",
"(",
"int",
")",
",",
"u",
"(",
"description",
")",
".",
"encode",
"(",
"'latin1'",
")",
")",
"else",
":",
"return",
"write_annotation_latin1",
"(",
"self",
".",
"handle",
",",
"np",
".",
"round",
"(",
"onset_in_seconds",
"*",
"10000",
")",
".",
"astype",
"(",
"int",
")",
",",
"-",
"1",
",",
"u",
"(",
"description",
")",
".",
"encode",
"(",
"'latin1'",
")",
")"
]
| Writes an annotation/event to the file | [
"Writes",
"an",
"annotation",
"/",
"event",
"to",
"the",
"file"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L714-L727 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.readAnnotations | def readAnnotations(self):
"""
Annotations from a edf-file
Parameters
----------
None
"""
annot = self.read_annotation()
annot = np.array(annot)
if (annot.shape[0] == 0):
return np.array([]), np.array([]), np.array([])
ann_time = self._get_float(annot[:, 0])
ann_text = annot[:, 2]
ann_text_out = ["" for x in range(len(annot[:, 1]))]
for i in np.arange(len(annot[:, 1])):
ann_text_out[i] = self._convert_string(ann_text[i])
if annot[i, 1] == '':
annot[i, 1] = '-1'
ann_duration = self._get_float(annot[:, 1])
return ann_time/10000000, ann_duration, np.array(ann_text_out) | python | def readAnnotations(self):
annot = self.read_annotation()
annot = np.array(annot)
if (annot.shape[0] == 0):
return np.array([]), np.array([]), np.array([])
ann_time = self._get_float(annot[:, 0])
ann_text = annot[:, 2]
ann_text_out = ["" for x in range(len(annot[:, 1]))]
for i in np.arange(len(annot[:, 1])):
ann_text_out[i] = self._convert_string(ann_text[i])
if annot[i, 1] == '':
annot[i, 1] = '-1'
ann_duration = self._get_float(annot[:, 1])
return ann_time/10000000, ann_duration, np.array(ann_text_out) | [
"def",
"readAnnotations",
"(",
"self",
")",
":",
"annot",
"=",
"self",
".",
"read_annotation",
"(",
")",
"annot",
"=",
"np",
".",
"array",
"(",
"annot",
")",
"if",
"(",
"annot",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"]",
")",
",",
"np",
".",
"array",
"(",
"[",
"]",
")",
"ann_time",
"=",
"self",
".",
"_get_float",
"(",
"annot",
"[",
":",
",",
"0",
"]",
")",
"ann_text",
"=",
"annot",
"[",
":",
",",
"2",
"]",
"ann_text_out",
"=",
"[",
"\"\"",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"annot",
"[",
":",
",",
"1",
"]",
")",
")",
"]",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"len",
"(",
"annot",
"[",
":",
",",
"1",
"]",
")",
")",
":",
"ann_text_out",
"[",
"i",
"]",
"=",
"self",
".",
"_convert_string",
"(",
"ann_text",
"[",
"i",
"]",
")",
"if",
"annot",
"[",
"i",
",",
"1",
"]",
"==",
"''",
":",
"annot",
"[",
"i",
",",
"1",
"]",
"=",
"'-1'",
"ann_duration",
"=",
"self",
".",
"_get_float",
"(",
"annot",
"[",
":",
",",
"1",
"]",
")",
"return",
"ann_time",
"/",
"10000000",
",",
"ann_duration",
",",
"np",
".",
"array",
"(",
"ann_text_out",
")"
]
| Annotations from a edf-file
Parameters
----------
None | [
"Annotations",
"from",
"a",
"edf",
"-",
"file"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L44-L64 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getHeader | def getHeader(self):
"""
Returns the file header as dict
Parameters
----------
None
"""
return {"technician": self.getTechnician(), "recording_additional": self.getRecordingAdditional(),
"patientname": self.getPatientName(), "patient_additional": self.getPatientAdditional(),
"patientcode": self.getPatientCode(), "equipment": self.getEquipment(),
"admincode": self.getAdmincode(), "gender": self.getGender(), "startdate": self.getStartdatetime(),
"birthdate": self.getBirthdate()} | python | def getHeader(self):
return {"technician": self.getTechnician(), "recording_additional": self.getRecordingAdditional(),
"patientname": self.getPatientName(), "patient_additional": self.getPatientAdditional(),
"patientcode": self.getPatientCode(), "equipment": self.getEquipment(),
"admincode": self.getAdmincode(), "gender": self.getGender(), "startdate": self.getStartdatetime(),
"birthdate": self.getBirthdate()} | [
"def",
"getHeader",
"(",
"self",
")",
":",
"return",
"{",
"\"technician\"",
":",
"self",
".",
"getTechnician",
"(",
")",
",",
"\"recording_additional\"",
":",
"self",
".",
"getRecordingAdditional",
"(",
")",
",",
"\"patientname\"",
":",
"self",
".",
"getPatientName",
"(",
")",
",",
"\"patient_additional\"",
":",
"self",
".",
"getPatientAdditional",
"(",
")",
",",
"\"patientcode\"",
":",
"self",
".",
"getPatientCode",
"(",
")",
",",
"\"equipment\"",
":",
"self",
".",
"getEquipment",
"(",
")",
",",
"\"admincode\"",
":",
"self",
".",
"getAdmincode",
"(",
")",
",",
"\"gender\"",
":",
"self",
".",
"getGender",
"(",
")",
",",
"\"startdate\"",
":",
"self",
".",
"getStartdatetime",
"(",
")",
",",
"\"birthdate\"",
":",
"self",
".",
"getBirthdate",
"(",
")",
"}"
]
| Returns the file header as dict
Parameters
----------
None | [
"Returns",
"the",
"file",
"header",
"as",
"dict"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L92-L104 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getSignalHeader | def getSignalHeader(self, chn):
"""
Returns the header of one signal as dicts
Parameters
----------
None
"""
return {'label': self.getLabel(chn),
'dimension': self.getPhysicalDimension(chn),
'sample_rate': self.getSampleFrequency(chn),
'physical_max':self.getPhysicalMaximum(chn),
'physical_min': self.getPhysicalMinimum(chn),
'digital_max': self.getDigitalMaximum(chn),
'digital_min': self.getDigitalMinimum(chn),
'prefilter':self.getPrefilter(chn),
'transducer': self.getTransducer(chn)} | python | def getSignalHeader(self, chn):
return {'label': self.getLabel(chn),
'dimension': self.getPhysicalDimension(chn),
'sample_rate': self.getSampleFrequency(chn),
'physical_max':self.getPhysicalMaximum(chn),
'physical_min': self.getPhysicalMinimum(chn),
'digital_max': self.getDigitalMaximum(chn),
'digital_min': self.getDigitalMinimum(chn),
'prefilter':self.getPrefilter(chn),
'transducer': self.getTransducer(chn)} | [
"def",
"getSignalHeader",
"(",
"self",
",",
"chn",
")",
":",
"return",
"{",
"'label'",
":",
"self",
".",
"getLabel",
"(",
"chn",
")",
",",
"'dimension'",
":",
"self",
".",
"getPhysicalDimension",
"(",
"chn",
")",
",",
"'sample_rate'",
":",
"self",
".",
"getSampleFrequency",
"(",
"chn",
")",
",",
"'physical_max'",
":",
"self",
".",
"getPhysicalMaximum",
"(",
"chn",
")",
",",
"'physical_min'",
":",
"self",
".",
"getPhysicalMinimum",
"(",
"chn",
")",
",",
"'digital_max'",
":",
"self",
".",
"getDigitalMaximum",
"(",
"chn",
")",
",",
"'digital_min'",
":",
"self",
".",
"getDigitalMinimum",
"(",
"chn",
")",
",",
"'prefilter'",
":",
"self",
".",
"getPrefilter",
"(",
"chn",
")",
",",
"'transducer'",
":",
"self",
".",
"getTransducer",
"(",
"chn",
")",
"}"
]
| Returns the header of one signal as dicts
Parameters
----------
None | [
"Returns",
"the",
"header",
"of",
"one",
"signal",
"as",
"dicts"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L106-L122 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getSignalHeaders | def getSignalHeaders(self):
"""
Returns the header of all signals as array of dicts
Parameters
----------
None
"""
signalHeader = []
for chn in np.arange(self.signals_in_file):
signalHeader.append(self.getSignalHeader(chn))
return signalHeader | python | def getSignalHeaders(self):
signalHeader = []
for chn in np.arange(self.signals_in_file):
signalHeader.append(self.getSignalHeader(chn))
return signalHeader | [
"def",
"getSignalHeaders",
"(",
"self",
")",
":",
"signalHeader",
"=",
"[",
"]",
"for",
"chn",
"in",
"np",
".",
"arange",
"(",
"self",
".",
"signals_in_file",
")",
":",
"signalHeader",
".",
"append",
"(",
"self",
".",
"getSignalHeader",
"(",
"chn",
")",
")",
"return",
"signalHeader"
]
| Returns the header of all signals as array of dicts
Parameters
----------
None | [
"Returns",
"the",
"header",
"of",
"all",
"signals",
"as",
"array",
"of",
"dicts"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L124-L135 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getStartdatetime | def getStartdatetime(self):
"""
Returns the date and starttime as datetime object
Parameters
----------
None
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getStartdatetime()
datetime.datetime(2011, 4, 4, 12, 57, 2)
>>> f._close()
>>> del f
"""
return datetime(self.startdate_year, self.startdate_month, self.startdate_day,
self.starttime_hour, self.starttime_minute, self.starttime_second) | python | def getStartdatetime(self):
return datetime(self.startdate_year, self.startdate_month, self.startdate_day,
self.starttime_hour, self.starttime_minute, self.starttime_second) | [
"def",
"getStartdatetime",
"(",
"self",
")",
":",
"return",
"datetime",
"(",
"self",
".",
"startdate_year",
",",
"self",
".",
"startdate_month",
",",
"self",
".",
"startdate_day",
",",
"self",
".",
"starttime_hour",
",",
"self",
".",
"starttime_minute",
",",
"self",
".",
"starttime_second",
")"
]
| Returns the date and starttime as datetime object
Parameters
----------
None
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getStartdatetime()
datetime.datetime(2011, 4, 4, 12, 57, 2)
>>> f._close()
>>> del f | [
"Returns",
"the",
"date",
"and",
"starttime",
"as",
"datetime",
"object"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L317-L336 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getBirthdate | def getBirthdate(self, string=True):
"""
Returns the birthdate as string object
Parameters
----------
None
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getBirthdate()=='30 jun 1969'
True
>>> f._close()
>>> del f
"""
if string:
return self._convert_string(self.birthdate.rstrip())
else:
return datetime.strptime(self._convert_string(self.birthdate.rstrip()), "%d %b %Y") | python | def getBirthdate(self, string=True):
if string:
return self._convert_string(self.birthdate.rstrip())
else:
return datetime.strptime(self._convert_string(self.birthdate.rstrip()), "%d %b %Y") | [
"def",
"getBirthdate",
"(",
"self",
",",
"string",
"=",
"True",
")",
":",
"if",
"string",
":",
"return",
"self",
".",
"_convert_string",
"(",
"self",
".",
"birthdate",
".",
"rstrip",
"(",
")",
")",
"else",
":",
"return",
"datetime",
".",
"strptime",
"(",
"self",
".",
"_convert_string",
"(",
"self",
".",
"birthdate",
".",
"rstrip",
"(",
")",
")",
",",
"\"%d %b %Y\"",
")"
]
| Returns the birthdate as string object
Parameters
----------
None
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getBirthdate()=='30 jun 1969'
True
>>> f._close()
>>> del f | [
"Returns",
"the",
"birthdate",
"as",
"string",
"object"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L338-L360 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getSampleFrequencies | def getSampleFrequencies(self):
"""
Returns samplefrequencies of all signals.
Parameters
----------
None
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> all(f.getSampleFrequencies()==200.0)
True
>>> f._close()
>>> del f
"""
return np.array([round(self.samplefrequency(chn))
for chn in np.arange(self.signals_in_file)]) | python | def getSampleFrequencies(self):
return np.array([round(self.samplefrequency(chn))
for chn in np.arange(self.signals_in_file)]) | [
"def",
"getSampleFrequencies",
"(",
"self",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"round",
"(",
"self",
".",
"samplefrequency",
"(",
"chn",
")",
")",
"for",
"chn",
"in",
"np",
".",
"arange",
"(",
"self",
".",
"signals_in_file",
")",
"]",
")"
]
| Returns samplefrequencies of all signals.
Parameters
----------
None
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> all(f.getSampleFrequencies()==200.0)
True
>>> f._close()
>>> del f | [
"Returns",
"samplefrequencies",
"of",
"all",
"signals",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L362-L381 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getSampleFrequency | def getSampleFrequency(self,chn):
"""
Returns the samplefrequency of signal edfsignal.
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getSampleFrequency(0)==200.0
True
>>> f._close()
>>> del f
"""
if 0 <= chn < self.signals_in_file:
return round(self.samplefrequency(chn))
else:
return 0 | python | def getSampleFrequency(self,chn):
if 0 <= chn < self.signals_in_file:
return round(self.samplefrequency(chn))
else:
return 0 | [
"def",
"getSampleFrequency",
"(",
"self",
",",
"chn",
")",
":",
"if",
"0",
"<=",
"chn",
"<",
"self",
".",
"signals_in_file",
":",
"return",
"round",
"(",
"self",
".",
"samplefrequency",
"(",
"chn",
")",
")",
"else",
":",
"return",
"0"
]
| Returns the samplefrequency of signal edfsignal.
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getSampleFrequency(0)==200.0
True
>>> f._close()
>>> del f | [
"Returns",
"the",
"samplefrequency",
"of",
"signal",
"edfsignal",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L383-L405 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getSignalLabels | def getSignalLabels(self):
"""
Returns all labels (name) ("FP1", "SaO2", etc.).
Parameters
----------
None
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getSignalLabels()==['squarewave', 'ramp', 'pulse', 'noise', 'sine 1 Hz', 'sine 8 Hz', 'sine 8.1777 Hz', 'sine 8.5 Hz', 'sine 15 Hz', 'sine 17 Hz', 'sine 50 Hz']
True
>>> f._close()
>>> del f
"""
return [self._convert_string(self.signal_label(chn).strip())
for chn in np.arange(self.signals_in_file)] | python | def getSignalLabels(self):
return [self._convert_string(self.signal_label(chn).strip())
for chn in np.arange(self.signals_in_file)] | [
"def",
"getSignalLabels",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"_convert_string",
"(",
"self",
".",
"signal_label",
"(",
"chn",
")",
".",
"strip",
"(",
")",
")",
"for",
"chn",
"in",
"np",
".",
"arange",
"(",
"self",
".",
"signals_in_file",
")",
"]"
]
| Returns all labels (name) ("FP1", "SaO2", etc.).
Parameters
----------
None
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getSignalLabels()==['squarewave', 'ramp', 'pulse', 'noise', 'sine 1 Hz', 'sine 8 Hz', 'sine 8.1777 Hz', 'sine 8.5 Hz', 'sine 15 Hz', 'sine 17 Hz', 'sine 50 Hz']
True
>>> f._close()
>>> del f | [
"Returns",
"all",
"labels",
"(",
"name",
")",
"(",
"FP1",
"SaO2",
"etc",
".",
")",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L407-L426 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getLabel | def getLabel(self,chn):
"""
Returns the label (name) of signal chn ("FP1", "SaO2", etc.).
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getLabel(0)=='squarewave'
True
>>> f._close()
>>> del f
"""
if 0 <= chn < self.signals_in_file:
return self._convert_string(self.signal_label(chn).rstrip())
else:
return self._convert_string('') | python | def getLabel(self,chn):
if 0 <= chn < self.signals_in_file:
return self._convert_string(self.signal_label(chn).rstrip())
else:
return self._convert_string('') | [
"def",
"getLabel",
"(",
"self",
",",
"chn",
")",
":",
"if",
"0",
"<=",
"chn",
"<",
"self",
".",
"signals_in_file",
":",
"return",
"self",
".",
"_convert_string",
"(",
"self",
".",
"signal_label",
"(",
"chn",
")",
".",
"rstrip",
"(",
")",
")",
"else",
":",
"return",
"self",
".",
"_convert_string",
"(",
"''",
")"
]
| Returns the label (name) of signal chn ("FP1", "SaO2", etc.).
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getLabel(0)=='squarewave'
True
>>> f._close()
>>> del f | [
"Returns",
"the",
"label",
"(",
"name",
")",
"of",
"signal",
"chn",
"(",
"FP1",
"SaO2",
"etc",
".",
")",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L428-L450 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getPrefilter | def getPrefilter(self,chn):
"""
Returns the prefilter of signal chn ("HP:0.1Hz", "LP:75Hz N:50Hz", etc.)
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getPrefilter(0)==''
True
>>> f._close()
>>> del f
"""
if 0 <= chn < self.signals_in_file:
return self._convert_string(self.prefilter(chn).rstrip())
else:
return self._convert_string('') | python | def getPrefilter(self,chn):
if 0 <= chn < self.signals_in_file:
return self._convert_string(self.prefilter(chn).rstrip())
else:
return self._convert_string('') | [
"def",
"getPrefilter",
"(",
"self",
",",
"chn",
")",
":",
"if",
"0",
"<=",
"chn",
"<",
"self",
".",
"signals_in_file",
":",
"return",
"self",
".",
"_convert_string",
"(",
"self",
".",
"prefilter",
"(",
"chn",
")",
".",
"rstrip",
"(",
")",
")",
"else",
":",
"return",
"self",
".",
"_convert_string",
"(",
"''",
")"
]
| Returns the prefilter of signal chn ("HP:0.1Hz", "LP:75Hz N:50Hz", etc.)
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getPrefilter(0)==''
True
>>> f._close()
>>> del f | [
"Returns",
"the",
"prefilter",
"of",
"signal",
"chn",
"(",
"HP",
":",
"0",
".",
"1Hz",
"LP",
":",
"75Hz",
"N",
":",
"50Hz",
"etc",
".",
")"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L452-L474 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getPhysicalMaximum | def getPhysicalMaximum(self,chn=None):
"""
Returns the maximum physical value of signal edfsignal.
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getPhysicalMaximum(0)==1000.0
True
>>> f._close()
>>> del f
"""
if chn is not None:
if 0 <= chn < self.signals_in_file:
return self.physical_max(chn)
else:
return 0
else:
physMax = np.zeros(self.signals_in_file)
for i in np.arange(self.signals_in_file):
physMax[i] = self.physical_max(i)
return physMax | python | def getPhysicalMaximum(self,chn=None):
if chn is not None:
if 0 <= chn < self.signals_in_file:
return self.physical_max(chn)
else:
return 0
else:
physMax = np.zeros(self.signals_in_file)
for i in np.arange(self.signals_in_file):
physMax[i] = self.physical_max(i)
return physMax | [
"def",
"getPhysicalMaximum",
"(",
"self",
",",
"chn",
"=",
"None",
")",
":",
"if",
"chn",
"is",
"not",
"None",
":",
"if",
"0",
"<=",
"chn",
"<",
"self",
".",
"signals_in_file",
":",
"return",
"self",
".",
"physical_max",
"(",
"chn",
")",
"else",
":",
"return",
"0",
"else",
":",
"physMax",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"signals_in_file",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"self",
".",
"signals_in_file",
")",
":",
"physMax",
"[",
"i",
"]",
"=",
"self",
".",
"physical_max",
"(",
"i",
")",
"return",
"physMax"
]
| Returns the maximum physical value of signal edfsignal.
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getPhysicalMaximum(0)==1000.0
True
>>> f._close()
>>> del f | [
"Returns",
"the",
"maximum",
"physical",
"value",
"of",
"signal",
"edfsignal",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L476-L504 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getPhysicalMinimum | def getPhysicalMinimum(self,chn=None):
"""
Returns the minimum physical value of signal edfsignal.
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getPhysicalMinimum(0)==-1000.0
True
>>> f._close()
>>> del f
"""
if chn is not None:
if 0 <= chn < self.signals_in_file:
return self.physical_min(chn)
else:
return 0
else:
physMin = np.zeros(self.signals_in_file)
for i in np.arange(self.signals_in_file):
physMin[i] = self.physical_min(i)
return physMin | python | def getPhysicalMinimum(self,chn=None):
if chn is not None:
if 0 <= chn < self.signals_in_file:
return self.physical_min(chn)
else:
return 0
else:
physMin = np.zeros(self.signals_in_file)
for i in np.arange(self.signals_in_file):
physMin[i] = self.physical_min(i)
return physMin | [
"def",
"getPhysicalMinimum",
"(",
"self",
",",
"chn",
"=",
"None",
")",
":",
"if",
"chn",
"is",
"not",
"None",
":",
"if",
"0",
"<=",
"chn",
"<",
"self",
".",
"signals_in_file",
":",
"return",
"self",
".",
"physical_min",
"(",
"chn",
")",
"else",
":",
"return",
"0",
"else",
":",
"physMin",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"signals_in_file",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"self",
".",
"signals_in_file",
")",
":",
"physMin",
"[",
"i",
"]",
"=",
"self",
".",
"physical_min",
"(",
"i",
")",
"return",
"physMin"
]
| Returns the minimum physical value of signal edfsignal.
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getPhysicalMinimum(0)==-1000.0
True
>>> f._close()
>>> del f | [
"Returns",
"the",
"minimum",
"physical",
"value",
"of",
"signal",
"edfsignal",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L506-L534 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getDigitalMaximum | def getDigitalMaximum(self, chn=None):
"""
Returns the maximum digital value of signal edfsignal.
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getDigitalMaximum(0)
32767
>>> f._close()
>>> del f
"""
if chn is not None:
if 0 <= chn < self.signals_in_file:
return self.digital_max(chn)
else:
return 0
else:
digMax = np.zeros(self.signals_in_file)
for i in np.arange(self.signals_in_file):
digMax[i] = self.digital_max(i)
return digMax | python | def getDigitalMaximum(self, chn=None):
if chn is not None:
if 0 <= chn < self.signals_in_file:
return self.digital_max(chn)
else:
return 0
else:
digMax = np.zeros(self.signals_in_file)
for i in np.arange(self.signals_in_file):
digMax[i] = self.digital_max(i)
return digMax | [
"def",
"getDigitalMaximum",
"(",
"self",
",",
"chn",
"=",
"None",
")",
":",
"if",
"chn",
"is",
"not",
"None",
":",
"if",
"0",
"<=",
"chn",
"<",
"self",
".",
"signals_in_file",
":",
"return",
"self",
".",
"digital_max",
"(",
"chn",
")",
"else",
":",
"return",
"0",
"else",
":",
"digMax",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"signals_in_file",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"self",
".",
"signals_in_file",
")",
":",
"digMax",
"[",
"i",
"]",
"=",
"self",
".",
"digital_max",
"(",
"i",
")",
"return",
"digMax"
]
| Returns the maximum digital value of signal edfsignal.
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getDigitalMaximum(0)
32767
>>> f._close()
>>> del f | [
"Returns",
"the",
"maximum",
"digital",
"value",
"of",
"signal",
"edfsignal",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L536-L564 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getDigitalMinimum | def getDigitalMinimum(self, chn=None):
"""
Returns the minimum digital value of signal edfsignal.
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getDigitalMinimum(0)
-32768
>>> f._close()
>>> del f
"""
if chn is not None:
if 0 <= chn < self.signals_in_file:
return self.digital_min(chn)
else:
return 0
else:
digMin = np.zeros(self.signals_in_file)
for i in np.arange(self.signals_in_file):
digMin[i] = self.digital_min(i)
return digMin | python | def getDigitalMinimum(self, chn=None):
if chn is not None:
if 0 <= chn < self.signals_in_file:
return self.digital_min(chn)
else:
return 0
else:
digMin = np.zeros(self.signals_in_file)
for i in np.arange(self.signals_in_file):
digMin[i] = self.digital_min(i)
return digMin | [
"def",
"getDigitalMinimum",
"(",
"self",
",",
"chn",
"=",
"None",
")",
":",
"if",
"chn",
"is",
"not",
"None",
":",
"if",
"0",
"<=",
"chn",
"<",
"self",
".",
"signals_in_file",
":",
"return",
"self",
".",
"digital_min",
"(",
"chn",
")",
"else",
":",
"return",
"0",
"else",
":",
"digMin",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"signals_in_file",
")",
"for",
"i",
"in",
"np",
".",
"arange",
"(",
"self",
".",
"signals_in_file",
")",
":",
"digMin",
"[",
"i",
"]",
"=",
"self",
".",
"digital_min",
"(",
"i",
")",
"return",
"digMin"
]
| Returns the minimum digital value of signal edfsignal.
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getDigitalMinimum(0)
-32768
>>> f._close()
>>> del f | [
"Returns",
"the",
"minimum",
"digital",
"value",
"of",
"signal",
"edfsignal",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L566-L594 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getTransducer | def getTransducer(self, chn):
"""
Returns the transducer of signal chn ("AgAgCl cup electrodes", etc.).
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getTransducer(0)==''
True
>>> f._close()
>>> del f
"""
if 0 <= chn < self.signals_in_file:
return self._convert_string(self.transducer(chn).rstrip())
else:
return self._convert_string('') | python | def getTransducer(self, chn):
if 0 <= chn < self.signals_in_file:
return self._convert_string(self.transducer(chn).rstrip())
else:
return self._convert_string('') | [
"def",
"getTransducer",
"(",
"self",
",",
"chn",
")",
":",
"if",
"0",
"<=",
"chn",
"<",
"self",
".",
"signals_in_file",
":",
"return",
"self",
".",
"_convert_string",
"(",
"self",
".",
"transducer",
"(",
"chn",
")",
".",
"rstrip",
"(",
")",
")",
"else",
":",
"return",
"self",
".",
"_convert_string",
"(",
"''",
")"
]
| Returns the transducer of signal chn ("AgAgCl cup electrodes", etc.).
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getTransducer(0)==''
True
>>> f._close()
>>> del f | [
"Returns",
"the",
"transducer",
"of",
"signal",
"chn",
"(",
"AgAgCl",
"cup",
"electrodes",
"etc",
".",
")",
"."
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L596-L618 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.getPhysicalDimension | def getPhysicalDimension(self, chn):
"""
Returns the physical dimension of signal edfsignal ("uV", "BPM", "mA", "Degr.", etc.)
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getPhysicalDimension(0)=='uV'
True
>>> f._close()
>>> del f
"""
if 0 <= chn < self.signals_in_file:
return self._convert_string(self.physical_dimension(chn).rstrip())
else:
return self._convert_string('') | python | def getPhysicalDimension(self, chn):
if 0 <= chn < self.signals_in_file:
return self._convert_string(self.physical_dimension(chn).rstrip())
else:
return self._convert_string('') | [
"def",
"getPhysicalDimension",
"(",
"self",
",",
"chn",
")",
":",
"if",
"0",
"<=",
"chn",
"<",
"self",
".",
"signals_in_file",
":",
"return",
"self",
".",
"_convert_string",
"(",
"self",
".",
"physical_dimension",
"(",
"chn",
")",
".",
"rstrip",
"(",
")",
")",
"else",
":",
"return",
"self",
".",
"_convert_string",
"(",
"''",
")"
]
| Returns the physical dimension of signal edfsignal ("uV", "BPM", "mA", "Degr.", etc.)
Parameters
----------
chn : int
channel number
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> f.getPhysicalDimension(0)=='uV'
True
>>> f._close()
>>> del f | [
"Returns",
"the",
"physical",
"dimension",
"of",
"signal",
"edfsignal",
"(",
"uV",
"BPM",
"mA",
"Degr",
".",
"etc",
".",
")"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L620-L642 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.readSignal | def readSignal(self, chn, start=0, n=None):
"""
Returns the physical data of signal chn. When start and n is set, a subset is returned
Parameters
----------
chn : int
channel number
start : int
start pointer (default is 0)
n : int
length of data to read (default is None, by which the complete data of the channel are returned)
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> x = f.readSignal(0,0,1000)
>>> int(x.shape[0])
1000
>>> x2 = f.readSignal(0)
>>> int(x2.shape[0])
120000
>>> f._close()
>>> del f
"""
if start < 0:
return np.array([])
if n is not None and n < 0:
return np.array([])
nsamples = self.getNSamples()
if chn < len(nsamples):
if n is None:
n = nsamples[chn]
elif n > nsamples[chn]:
return np.array([])
x = np.zeros(n, dtype=np.float64)
self.readsignal(chn, start, n, x)
return x
else:
return np.array([]) | python | def readSignal(self, chn, start=0, n=None):
if start < 0:
return np.array([])
if n is not None and n < 0:
return np.array([])
nsamples = self.getNSamples()
if chn < len(nsamples):
if n is None:
n = nsamples[chn]
elif n > nsamples[chn]:
return np.array([])
x = np.zeros(n, dtype=np.float64)
self.readsignal(chn, start, n, x)
return x
else:
return np.array([]) | [
"def",
"readSignal",
"(",
"self",
",",
"chn",
",",
"start",
"=",
"0",
",",
"n",
"=",
"None",
")",
":",
"if",
"start",
"<",
"0",
":",
"return",
"np",
".",
"array",
"(",
"[",
"]",
")",
"if",
"n",
"is",
"not",
"None",
"and",
"n",
"<",
"0",
":",
"return",
"np",
".",
"array",
"(",
"[",
"]",
")",
"nsamples",
"=",
"self",
".",
"getNSamples",
"(",
")",
"if",
"chn",
"<",
"len",
"(",
"nsamples",
")",
":",
"if",
"n",
"is",
"None",
":",
"n",
"=",
"nsamples",
"[",
"chn",
"]",
"elif",
"n",
">",
"nsamples",
"[",
"chn",
"]",
":",
"return",
"np",
".",
"array",
"(",
"[",
"]",
")",
"x",
"=",
"np",
".",
"zeros",
"(",
"n",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"self",
".",
"readsignal",
"(",
"chn",
",",
"start",
",",
"n",
",",
"x",
")",
"return",
"x",
"else",
":",
"return",
"np",
".",
"array",
"(",
"[",
"]",
")"
]
| Returns the physical data of signal chn. When start and n is set, a subset is returned
Parameters
----------
chn : int
channel number
start : int
start pointer (default is 0)
n : int
length of data to read (default is None, by which the complete data of the channel are returned)
Examples
--------
>>> import pyedflib
>>> f = pyedflib.data.test_generator()
>>> x = f.readSignal(0,0,1000)
>>> int(x.shape[0])
1000
>>> x2 = f.readSignal(0)
>>> int(x2.shape[0])
120000
>>> f._close()
>>> del f | [
"Returns",
"the",
"physical",
"data",
"of",
"signal",
"chn",
".",
"When",
"start",
"and",
"n",
"is",
"set",
"a",
"subset",
"is",
"returned"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L644-L685 |
holgern/pyedflib | pyedflib/edfreader.py | EdfReader.file_info_long | def file_info_long(self):
"""
Returns information about the opened EDF/BDF file
"""
self.file_info()
for ii in np.arange(self.signals_in_file):
print("label:", self.getSignalLabels()[ii], "fs:",
self.getSampleFrequencies()[ii], "nsamples",
self.getNSamples()[ii]) | python | def file_info_long(self):
self.file_info()
for ii in np.arange(self.signals_in_file):
print("label:", self.getSignalLabels()[ii], "fs:",
self.getSampleFrequencies()[ii], "nsamples",
self.getNSamples()[ii]) | [
"def",
"file_info_long",
"(",
"self",
")",
":",
"self",
".",
"file_info",
"(",
")",
"for",
"ii",
"in",
"np",
".",
"arange",
"(",
"self",
".",
"signals_in_file",
")",
":",
"print",
"(",
"\"label:\"",
",",
"self",
".",
"getSignalLabels",
"(",
")",
"[",
"ii",
"]",
",",
"\"fs:\"",
",",
"self",
".",
"getSampleFrequencies",
"(",
")",
"[",
"ii",
"]",
",",
"\"nsamples\"",
",",
"self",
".",
"getNSamples",
"(",
")",
"[",
"ii",
"]",
")"
]
| Returns information about the opened EDF/BDF file | [
"Returns",
"information",
"about",
"the",
"opened",
"EDF",
"/",
"BDF",
"file"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L691-L699 |
holgern/pyedflib | demo/stacklineplot.py | stackplot | def stackplot(marray, seconds=None, start_time=None, ylabels=None):
"""
will plot a stack of traces one above the other assuming
marray.shape = numRows, numSamples
"""
tarray = np.transpose(marray)
stackplot_t(tarray, seconds=seconds, start_time=start_time, ylabels=ylabels)
plt.show() | python | def stackplot(marray, seconds=None, start_time=None, ylabels=None):
tarray = np.transpose(marray)
stackplot_t(tarray, seconds=seconds, start_time=start_time, ylabels=ylabels)
plt.show() | [
"def",
"stackplot",
"(",
"marray",
",",
"seconds",
"=",
"None",
",",
"start_time",
"=",
"None",
",",
"ylabels",
"=",
"None",
")",
":",
"tarray",
"=",
"np",
".",
"transpose",
"(",
"marray",
")",
"stackplot_t",
"(",
"tarray",
",",
"seconds",
"=",
"seconds",
",",
"start_time",
"=",
"start_time",
",",
"ylabels",
"=",
"ylabels",
")",
"plt",
".",
"show",
"(",
")"
]
| will plot a stack of traces one above the other assuming
marray.shape = numRows, numSamples | [
"will",
"plot",
"a",
"stack",
"of",
"traces",
"one",
"above",
"the",
"other",
"assuming",
"marray",
".",
"shape",
"=",
"numRows",
"numSamples"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/demo/stacklineplot.py#L10-L17 |
holgern/pyedflib | demo/stacklineplot.py | stackplot_t | def stackplot_t(tarray, seconds=None, start_time=None, ylabels=None):
"""
will plot a stack of traces one above the other assuming
tarray.shape = numSamples, numRows
"""
data = tarray
numSamples, numRows = tarray.shape
# data = np.random.randn(numSamples,numRows) # test data
# data.shape = numSamples, numRows
if seconds:
t = seconds * np.arange(numSamples, dtype=float)/numSamples
# import pdb
# pdb.set_trace()
if start_time:
t = t+start_time
xlm = (start_time, start_time+seconds)
else:
xlm = (0,seconds)
else:
t = np.arange(numSamples, dtype=float)
xlm = (0,numSamples)
ticklocs = []
ax = plt.subplot(111)
plt.xlim(*xlm)
# xticks(np.linspace(xlm, 10))
dmin = data.min()
dmax = data.max()
dr = (dmax - dmin)*0.7 # Crowd them a bit.
y0 = dmin
y1 = (numRows-1) * dr + dmax
plt.ylim(y0, y1)
segs = []
for i in range(numRows):
segs.append(np.hstack((t[:,np.newaxis], data[:,i,np.newaxis])))
# print "segs[-1].shape:", segs[-1].shape
ticklocs.append(i*dr)
offsets = np.zeros((numRows,2), dtype=float)
offsets[:,1] = ticklocs
lines = LineCollection(segs, offsets=offsets,
transOffset=None,
)
ax.add_collection(lines)
# set the yticks to use axes coords on the y axis
ax.set_yticks(ticklocs)
# ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9'])
# if not plt.ylabels:
plt.ylabels = ["%d" % ii for ii in range(numRows)]
ax.set_yticklabels(ylabels)
plt.xlabel('time (s)') | python | def stackplot_t(tarray, seconds=None, start_time=None, ylabels=None):
data = tarray
numSamples, numRows = tarray.shape
if seconds:
t = seconds * np.arange(numSamples, dtype=float)/numSamples
if start_time:
t = t+start_time
xlm = (start_time, start_time+seconds)
else:
xlm = (0,seconds)
else:
t = np.arange(numSamples, dtype=float)
xlm = (0,numSamples)
ticklocs = []
ax = plt.subplot(111)
plt.xlim(*xlm)
dmin = data.min()
dmax = data.max()
dr = (dmax - dmin)*0.7
y0 = dmin
y1 = (numRows-1) * dr + dmax
plt.ylim(y0, y1)
segs = []
for i in range(numRows):
segs.append(np.hstack((t[:,np.newaxis], data[:,i,np.newaxis])))
ticklocs.append(i*dr)
offsets = np.zeros((numRows,2), dtype=float)
offsets[:,1] = ticklocs
lines = LineCollection(segs, offsets=offsets,
transOffset=None,
)
ax.add_collection(lines)
ax.set_yticks(ticklocs)
plt.ylabels = ["%d" % ii for ii in range(numRows)]
ax.set_yticklabels(ylabels)
plt.xlabel('time (s)') | [
"def",
"stackplot_t",
"(",
"tarray",
",",
"seconds",
"=",
"None",
",",
"start_time",
"=",
"None",
",",
"ylabels",
"=",
"None",
")",
":",
"data",
"=",
"tarray",
"numSamples",
",",
"numRows",
"=",
"tarray",
".",
"shape",
"# data = np.random.randn(numSamples,numRows) # test data",
"# data.shape = numSamples, numRows",
"if",
"seconds",
":",
"t",
"=",
"seconds",
"*",
"np",
".",
"arange",
"(",
"numSamples",
",",
"dtype",
"=",
"float",
")",
"/",
"numSamples",
"# import pdb",
"# pdb.set_trace()",
"if",
"start_time",
":",
"t",
"=",
"t",
"+",
"start_time",
"xlm",
"=",
"(",
"start_time",
",",
"start_time",
"+",
"seconds",
")",
"else",
":",
"xlm",
"=",
"(",
"0",
",",
"seconds",
")",
"else",
":",
"t",
"=",
"np",
".",
"arange",
"(",
"numSamples",
",",
"dtype",
"=",
"float",
")",
"xlm",
"=",
"(",
"0",
",",
"numSamples",
")",
"ticklocs",
"=",
"[",
"]",
"ax",
"=",
"plt",
".",
"subplot",
"(",
"111",
")",
"plt",
".",
"xlim",
"(",
"*",
"xlm",
")",
"# xticks(np.linspace(xlm, 10))",
"dmin",
"=",
"data",
".",
"min",
"(",
")",
"dmax",
"=",
"data",
".",
"max",
"(",
")",
"dr",
"=",
"(",
"dmax",
"-",
"dmin",
")",
"*",
"0.7",
"# Crowd them a bit.",
"y0",
"=",
"dmin",
"y1",
"=",
"(",
"numRows",
"-",
"1",
")",
"*",
"dr",
"+",
"dmax",
"plt",
".",
"ylim",
"(",
"y0",
",",
"y1",
")",
"segs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"numRows",
")",
":",
"segs",
".",
"append",
"(",
"np",
".",
"hstack",
"(",
"(",
"t",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"data",
"[",
":",
",",
"i",
",",
"np",
".",
"newaxis",
"]",
")",
")",
")",
"# print \"segs[-1].shape:\", segs[-1].shape",
"ticklocs",
".",
"append",
"(",
"i",
"*",
"dr",
")",
"offsets",
"=",
"np",
".",
"zeros",
"(",
"(",
"numRows",
",",
"2",
")",
",",
"dtype",
"=",
"float",
")",
"offsets",
"[",
":",
",",
"1",
"]",
"=",
"ticklocs",
"lines",
"=",
"LineCollection",
"(",
"segs",
",",
"offsets",
"=",
"offsets",
",",
"transOffset",
"=",
"None",
",",
")",
"ax",
".",
"add_collection",
"(",
"lines",
")",
"# set the yticks to use axes coords on the y axis",
"ax",
".",
"set_yticks",
"(",
"ticklocs",
")",
"# ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9'])",
"# if not plt.ylabels:",
"plt",
".",
"ylabels",
"=",
"[",
"\"%d\"",
"%",
"ii",
"for",
"ii",
"in",
"range",
"(",
"numRows",
")",
"]",
"ax",
".",
"set_yticklabels",
"(",
"ylabels",
")",
"plt",
".",
"xlabel",
"(",
"'time (s)'",
")"
]
| will plot a stack of traces one above the other assuming
tarray.shape = numSamples, numRows | [
"will",
"plot",
"a",
"stack",
"of",
"traces",
"one",
"above",
"the",
"other",
"assuming",
"tarray",
".",
"shape",
"=",
"numSamples",
"numRows"
]
| train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/demo/stacklineplot.py#L20-L76 |
ansibleplaybookbundle/ansible-playbook-bundle | src/apb/cli.py | subcmd_list_parser | def subcmd_list_parser(subcmd):
""" list subcommand """
subcmd.add_argument(
'--broker',
action='store',
dest='broker',
help=u'Route to the Ansible Service Broker'
)
subcmd.add_argument(
'--secure',
action='store_true',
dest='verify',
help=u'Verify SSL connection to Ansible Service Broker',
default=False
)
subcmd.add_argument(
'--ca-path',
action='store',
dest='cert',
help=u'CA cert to use for verifying SSL connection to Ansible Service Broker',
default=None
)
subcmd.add_argument(
'--verbose',
'-v',
action='store_true',
dest='verbose',
help=u'Output verbose spec information from Ansible Service Broker',
default=False
)
subcmd.add_argument(
'--output',
'-o',
action='store',
dest='output',
help=u'Specify verbose output format in yaml (default) or json',
default='optional',
choices=['yaml', 'json']
)
subcmd.add_argument(
'--username',
'-u',
action='store',
default=None,
dest='basic_auth_username',
help=u'Specify the basic auth username to be used'
)
subcmd.add_argument(
'--password',
'-p',
action='store',
default=None,
dest='basic_auth_password',
help=u'Specify the basic auth password to be used'
)
return | python | def subcmd_list_parser(subcmd):
subcmd.add_argument(
'--broker',
action='store',
dest='broker',
help=u'Route to the Ansible Service Broker'
)
subcmd.add_argument(
'--secure',
action='store_true',
dest='verify',
help=u'Verify SSL connection to Ansible Service Broker',
default=False
)
subcmd.add_argument(
'--ca-path',
action='store',
dest='cert',
help=u'CA cert to use for verifying SSL connection to Ansible Service Broker',
default=None
)
subcmd.add_argument(
'--verbose',
'-v',
action='store_true',
dest='verbose',
help=u'Output verbose spec information from Ansible Service Broker',
default=False
)
subcmd.add_argument(
'--output',
'-o',
action='store',
dest='output',
help=u'Specify verbose output format in yaml (default) or json',
default='optional',
choices=['yaml', 'json']
)
subcmd.add_argument(
'--username',
'-u',
action='store',
default=None,
dest='basic_auth_username',
help=u'Specify the basic auth username to be used'
)
subcmd.add_argument(
'--password',
'-p',
action='store',
default=None,
dest='basic_auth_password',
help=u'Specify the basic auth password to be used'
)
return | [
"def",
"subcmd_list_parser",
"(",
"subcmd",
")",
":",
"subcmd",
".",
"add_argument",
"(",
"'--broker'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'broker'",
",",
"help",
"=",
"u'Route to the Ansible Service Broker'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--secure'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'verify'",
",",
"help",
"=",
"u'Verify SSL connection to Ansible Service Broker'",
",",
"default",
"=",
"False",
")",
"subcmd",
".",
"add_argument",
"(",
"'--ca-path'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'cert'",
",",
"help",
"=",
"u'CA cert to use for verifying SSL connection to Ansible Service Broker'",
",",
"default",
"=",
"None",
")",
"subcmd",
".",
"add_argument",
"(",
"'--verbose'",
",",
"'-v'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'verbose'",
",",
"help",
"=",
"u'Output verbose spec information from Ansible Service Broker'",
",",
"default",
"=",
"False",
")",
"subcmd",
".",
"add_argument",
"(",
"'--output'",
",",
"'-o'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'output'",
",",
"help",
"=",
"u'Specify verbose output format in yaml (default) or json'",
",",
"default",
"=",
"'optional'",
",",
"choices",
"=",
"[",
"'yaml'",
",",
"'json'",
"]",
")",
"subcmd",
".",
"add_argument",
"(",
"'--username'",
",",
"'-u'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'basic_auth_username'",
",",
"help",
"=",
"u'Specify the basic auth username to be used'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--password'",
",",
"'-p'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'basic_auth_password'",
",",
"help",
"=",
"u'Specify the basic auth password to be used'",
")",
"return"
]
| list subcommand | [
"list",
"subcommand"
]
| train | https://github.com/ansibleplaybookbundle/ansible-playbook-bundle/blob/585694be9b417f1a88354cbfe286bfd68c2c9494/src/apb/cli.py#L30-L85 |
ansibleplaybookbundle/ansible-playbook-bundle | src/apb/cli.py | subcmd_build_parser | def subcmd_build_parser(subcmd):
""" build subcommand """
subcmd.add_argument(
'--tag',
action='store',
dest='tag',
help=u'Tag of APB to build (ie. mysql-apb or docker.io/username/mysql-apb)'
)
subcmd.add_argument(
'--dockerfile',
'-f',
action='store',
dest='dockerfile',
help=u'Name of Dockerfile to build with'
)
return | python | def subcmd_build_parser(subcmd):
subcmd.add_argument(
'--tag',
action='store',
dest='tag',
help=u'Tag of APB to build (ie. mysql-apb or docker.io/username/mysql-apb)'
)
subcmd.add_argument(
'--dockerfile',
'-f',
action='store',
dest='dockerfile',
help=u'Name of Dockerfile to build with'
)
return | [
"def",
"subcmd_build_parser",
"(",
"subcmd",
")",
":",
"subcmd",
".",
"add_argument",
"(",
"'--tag'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'tag'",
",",
"help",
"=",
"u'Tag of APB to build (ie. mysql-apb or docker.io/username/mysql-apb)'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--dockerfile'",
",",
"'-f'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'dockerfile'",
",",
"help",
"=",
"u'Name of Dockerfile to build with'",
")",
"return"
]
| build subcommand | [
"build",
"subcommand"
]
| train | https://github.com/ansibleplaybookbundle/ansible-playbook-bundle/blob/585694be9b417f1a88354cbfe286bfd68c2c9494/src/apb/cli.py#L88-L105 |
ansibleplaybookbundle/ansible-playbook-bundle | src/apb/cli.py | subcmd_init_parser | def subcmd_init_parser(subcmd):
""" init subcommand """
subcmd.add_argument(
'tag',
action='store',
help=u'Tag (org/name) or name of APB to initialize'
)
subcmd.add_argument(
'--force',
action='store_true',
dest='force',
help=u'Force re-init on current directory',
default=False
)
subcmd.add_argument(
'--dockerhost',
action='store',
help=u'set the dockerhost for this project',
default="docker.io"
)
subcmd.add_argument(
'--async',
action='store',
dest='async',
help=u'Specify asynchronous operation on application.',
default='optional',
choices=['required', 'optional', 'unsupported']
)
subcmd.add_argument(
'--bindable',
action='store_true',
dest='bindable',
help=u'Make application bindable on the spec.',
default=False
)
subcmd.add_argument(
'--dep',
'-d',
action='append',
dest='dependencies',
help=u'Add image dependency to APB spec'
)
for opt in SKIP_OPTIONS:
subcmd.add_argument(
'--skip-%s' % opt,
action='store_true',
dest='skip-%s' % opt,
help=u'Specify which playbooks to not generate by default.',
default=False
)
return | python | def subcmd_init_parser(subcmd):
subcmd.add_argument(
'tag',
action='store',
help=u'Tag (org/name) or name of APB to initialize'
)
subcmd.add_argument(
'--force',
action='store_true',
dest='force',
help=u'Force re-init on current directory',
default=False
)
subcmd.add_argument(
'--dockerhost',
action='store',
help=u'set the dockerhost for this project',
default="docker.io"
)
subcmd.add_argument(
'--async',
action='store',
dest='async',
help=u'Specify asynchronous operation on application.',
default='optional',
choices=['required', 'optional', 'unsupported']
)
subcmd.add_argument(
'--bindable',
action='store_true',
dest='bindable',
help=u'Make application bindable on the spec.',
default=False
)
subcmd.add_argument(
'--dep',
'-d',
action='append',
dest='dependencies',
help=u'Add image dependency to APB spec'
)
for opt in SKIP_OPTIONS:
subcmd.add_argument(
'--skip-%s' % opt,
action='store_true',
dest='skip-%s' % opt,
help=u'Specify which playbooks to not generate by default.',
default=False
)
return | [
"def",
"subcmd_init_parser",
"(",
"subcmd",
")",
":",
"subcmd",
".",
"add_argument",
"(",
"'tag'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"u'Tag (org/name) or name of APB to initialize'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--force'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'force'",
",",
"help",
"=",
"u'Force re-init on current directory'",
",",
"default",
"=",
"False",
")",
"subcmd",
".",
"add_argument",
"(",
"'--dockerhost'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"u'set the dockerhost for this project'",
",",
"default",
"=",
"\"docker.io\"",
")",
"subcmd",
".",
"add_argument",
"(",
"'--async'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'async'",
",",
"help",
"=",
"u'Specify asynchronous operation on application.'",
",",
"default",
"=",
"'optional'",
",",
"choices",
"=",
"[",
"'required'",
",",
"'optional'",
",",
"'unsupported'",
"]",
")",
"subcmd",
".",
"add_argument",
"(",
"'--bindable'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'bindable'",
",",
"help",
"=",
"u'Make application bindable on the spec.'",
",",
"default",
"=",
"False",
")",
"subcmd",
".",
"add_argument",
"(",
"'--dep'",
",",
"'-d'",
",",
"action",
"=",
"'append'",
",",
"dest",
"=",
"'dependencies'",
",",
"help",
"=",
"u'Add image dependency to APB spec'",
")",
"for",
"opt",
"in",
"SKIP_OPTIONS",
":",
"subcmd",
".",
"add_argument",
"(",
"'--skip-%s'",
"%",
"opt",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'skip-%s'",
"%",
"opt",
",",
"help",
"=",
"u'Specify which playbooks to not generate by default.'",
",",
"default",
"=",
"False",
")",
"return"
]
| init subcommand | [
"init",
"subcommand"
]
| train | https://github.com/ansibleplaybookbundle/ansible-playbook-bundle/blob/585694be9b417f1a88354cbfe286bfd68c2c9494/src/apb/cli.py#L108-L165 |
ansibleplaybookbundle/ansible-playbook-bundle | src/apb/cli.py | subcmd_prepare_parser | def subcmd_prepare_parser(subcmd):
""" prepare subcommand """
subcmd.add_argument(
'--provider',
action='store',
dest='provider',
help=u'Targeted cluster type',
choices=['openshift', 'kubernetes'],
default='openshift'
)
subcmd.add_argument(
'--dockerfile',
'-f',
action='store',
dest='dockerfile',
help=u'Name of Dockerfile to build with'
)
return | python | def subcmd_prepare_parser(subcmd):
subcmd.add_argument(
'--provider',
action='store',
dest='provider',
help=u'Targeted cluster type',
choices=['openshift', 'kubernetes'],
default='openshift'
)
subcmd.add_argument(
'--dockerfile',
'-f',
action='store',
dest='dockerfile',
help=u'Name of Dockerfile to build with'
)
return | [
"def",
"subcmd_prepare_parser",
"(",
"subcmd",
")",
":",
"subcmd",
".",
"add_argument",
"(",
"'--provider'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'provider'",
",",
"help",
"=",
"u'Targeted cluster type'",
",",
"choices",
"=",
"[",
"'openshift'",
",",
"'kubernetes'",
"]",
",",
"default",
"=",
"'openshift'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--dockerfile'",
",",
"'-f'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'dockerfile'",
",",
"help",
"=",
"u'Name of Dockerfile to build with'",
")",
"return"
]
| prepare subcommand | [
"prepare",
"subcommand"
]
| train | https://github.com/ansibleplaybookbundle/ansible-playbook-bundle/blob/585694be9b417f1a88354cbfe286bfd68c2c9494/src/apb/cli.py#L168-L187 |
ansibleplaybookbundle/ansible-playbook-bundle | src/apb/cli.py | subcmd_push_parser | def subcmd_push_parser(subcmd):
""" push subcommand """
subcmd.add_argument(
'--broker',
action='store',
dest='broker',
help=u'Route to the Ansible Service Broker'
)
subcmd.add_argument(
'--registry-service-name',
action='store',
dest='reg_svc_name',
help=u'Name of service for internal OpenShift registry',
default=u'docker-registry'
)
subcmd.add_argument(
'--registry-namespace',
action='store',
dest='reg_namespace',
help=u'Namespace of internal OpenShift registry',
default=u'default'
)
subcmd.add_argument(
'--namespace',
action='store',
dest='namespace',
help=u'Namespace to push APB in OpenShift registry',
default=u'openshift'
)
subcmd.add_argument(
'--registry-route',
action='store',
dest='reg_route',
help=u'Route of internal OpenShift registry'
)
subcmd.add_argument(
'--dockerfile',
'-f',
action='store',
dest='dockerfile',
help=u'Dockerfile to build internal registry image with',
default=u'Dockerfile'
)
subcmd.add_argument(
'--secure',
action='store_true',
dest='verify',
help=u'Verify SSL connection to Ansible Service Broker',
default=False
)
subcmd.add_argument(
'--ca-path',
action='store',
dest='cert',
help=u'CA cert to use for verifying SSL connection to Ansible Service Broker',
default=None
)
subcmd.add_argument(
'--username',
'-u',
action='store',
default=None,
dest='basic_auth_username',
help=u'Specify the basic auth username to be used'
)
subcmd.add_argument(
'--password',
'-p',
action='store',
default=None,
dest='basic_auth_password',
help=u'Specify the basic auth password to be used'
)
subcmd.add_argument(
'--no-relist',
action='store_true',
dest='no_relist',
help=u'Do not relist the catalog after pushing an apb to the broker',
default=False
)
subcmd.add_argument(
'--broker-name',
action='store',
dest='broker_name',
help=u'Name of the ServiceBroker k8s resource',
default=u'ansible-service-broker'
)
subcmd.add_argument(
'--push-to-broker',
action='store_true',
dest='broker_push',
help=u'Use Broker development endpoint at /v2/apb/',
default=False
)
return | python | def subcmd_push_parser(subcmd):
subcmd.add_argument(
'--broker',
action='store',
dest='broker',
help=u'Route to the Ansible Service Broker'
)
subcmd.add_argument(
'--registry-service-name',
action='store',
dest='reg_svc_name',
help=u'Name of service for internal OpenShift registry',
default=u'docker-registry'
)
subcmd.add_argument(
'--registry-namespace',
action='store',
dest='reg_namespace',
help=u'Namespace of internal OpenShift registry',
default=u'default'
)
subcmd.add_argument(
'--namespace',
action='store',
dest='namespace',
help=u'Namespace to push APB in OpenShift registry',
default=u'openshift'
)
subcmd.add_argument(
'--registry-route',
action='store',
dest='reg_route',
help=u'Route of internal OpenShift registry'
)
subcmd.add_argument(
'--dockerfile',
'-f',
action='store',
dest='dockerfile',
help=u'Dockerfile to build internal registry image with',
default=u'Dockerfile'
)
subcmd.add_argument(
'--secure',
action='store_true',
dest='verify',
help=u'Verify SSL connection to Ansible Service Broker',
default=False
)
subcmd.add_argument(
'--ca-path',
action='store',
dest='cert',
help=u'CA cert to use for verifying SSL connection to Ansible Service Broker',
default=None
)
subcmd.add_argument(
'--username',
'-u',
action='store',
default=None,
dest='basic_auth_username',
help=u'Specify the basic auth username to be used'
)
subcmd.add_argument(
'--password',
'-p',
action='store',
default=None,
dest='basic_auth_password',
help=u'Specify the basic auth password to be used'
)
subcmd.add_argument(
'--no-relist',
action='store_true',
dest='no_relist',
help=u'Do not relist the catalog after pushing an apb to the broker',
default=False
)
subcmd.add_argument(
'--broker-name',
action='store',
dest='broker_name',
help=u'Name of the ServiceBroker k8s resource',
default=u'ansible-service-broker'
)
subcmd.add_argument(
'--push-to-broker',
action='store_true',
dest='broker_push',
help=u'Use Broker development endpoint at /v2/apb/',
default=False
)
return | [
"def",
"subcmd_push_parser",
"(",
"subcmd",
")",
":",
"subcmd",
".",
"add_argument",
"(",
"'--broker'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'broker'",
",",
"help",
"=",
"u'Route to the Ansible Service Broker'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--registry-service-name'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'reg_svc_name'",
",",
"help",
"=",
"u'Name of service for internal OpenShift registry'",
",",
"default",
"=",
"u'docker-registry'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--registry-namespace'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'reg_namespace'",
",",
"help",
"=",
"u'Namespace of internal OpenShift registry'",
",",
"default",
"=",
"u'default'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--namespace'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'namespace'",
",",
"help",
"=",
"u'Namespace to push APB in OpenShift registry'",
",",
"default",
"=",
"u'openshift'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--registry-route'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'reg_route'",
",",
"help",
"=",
"u'Route of internal OpenShift registry'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--dockerfile'",
",",
"'-f'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'dockerfile'",
",",
"help",
"=",
"u'Dockerfile to build internal registry image with'",
",",
"default",
"=",
"u'Dockerfile'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--secure'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'verify'",
",",
"help",
"=",
"u'Verify SSL connection to Ansible Service Broker'",
",",
"default",
"=",
"False",
")",
"subcmd",
".",
"add_argument",
"(",
"'--ca-path'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'cert'",
",",
"help",
"=",
"u'CA cert to use for verifying SSL connection to Ansible Service Broker'",
",",
"default",
"=",
"None",
")",
"subcmd",
".",
"add_argument",
"(",
"'--username'",
",",
"'-u'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'basic_auth_username'",
",",
"help",
"=",
"u'Specify the basic auth username to be used'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--password'",
",",
"'-p'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'basic_auth_password'",
",",
"help",
"=",
"u'Specify the basic auth password to be used'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--no-relist'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'no_relist'",
",",
"help",
"=",
"u'Do not relist the catalog after pushing an apb to the broker'",
",",
"default",
"=",
"False",
")",
"subcmd",
".",
"add_argument",
"(",
"'--broker-name'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'broker_name'",
",",
"help",
"=",
"u'Name of the ServiceBroker k8s resource'",
",",
"default",
"=",
"u'ansible-service-broker'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--push-to-broker'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'broker_push'",
",",
"help",
"=",
"u'Use Broker development endpoint at /v2/apb/'",
",",
"default",
"=",
"False",
")",
"return"
]
| push subcommand | [
"push",
"subcommand"
]
| train | https://github.com/ansibleplaybookbundle/ansible-playbook-bundle/blob/585694be9b417f1a88354cbfe286bfd68c2c9494/src/apb/cli.py#L190-L284 |
ansibleplaybookbundle/ansible-playbook-bundle | src/apb/cli.py | subcmd_remove_parser | def subcmd_remove_parser(subcmd):
""" remove subcommand """
subcmd.add_argument(
'--broker',
action='store',
dest='broker',
help=u'Route to the Ansible Service Broker'
)
subcmd.add_argument(
'--local', '-l',
action='store_true',
dest='local',
help=u'Remove image from internal OpenShift registry',
default=False
)
subcmd.add_argument(
'--all',
action='store_true',
dest='all',
help=u'Remove all stored APBs',
default=False
)
subcmd.add_argument(
'--id',
action='store',
dest='id',
help=u'ID of APB to remove'
)
subcmd.add_argument(
'--secure',
action='store_true',
dest='verify',
help=u'Verify SSL connection to Ansible Service Broker',
default=False
)
subcmd.add_argument(
'--ca-path',
action='store',
dest='cert',
help=u'CA cert to use for verifying SSL connection to Ansible Service Broker',
default=None
)
subcmd.add_argument(
'--username',
'-u',
action='store',
default=None,
dest='basic_auth_username',
help=u'Specify the basic auth username to be used'
)
subcmd.add_argument(
'--password',
'-p',
action='store',
default=None,
dest='basic_auth_password',
help=u'Specify the basic auth password to be used'
)
subcmd.add_argument(
'--no-relist',
action='store_true',
dest='no_relist',
help=u'Do not relist the catalog after pushing an apb to the broker',
default=False
)
subcmd.add_argument(
'--broker-name',
action='store',
dest='broker_name',
help=u'Name of the ServiceBroker k8s resource',
default=u'ansible-service-broker'
)
return | python | def subcmd_remove_parser(subcmd):
subcmd.add_argument(
'--broker',
action='store',
dest='broker',
help=u'Route to the Ansible Service Broker'
)
subcmd.add_argument(
'--local', '-l',
action='store_true',
dest='local',
help=u'Remove image from internal OpenShift registry',
default=False
)
subcmd.add_argument(
'--all',
action='store_true',
dest='all',
help=u'Remove all stored APBs',
default=False
)
subcmd.add_argument(
'--id',
action='store',
dest='id',
help=u'ID of APB to remove'
)
subcmd.add_argument(
'--secure',
action='store_true',
dest='verify',
help=u'Verify SSL connection to Ansible Service Broker',
default=False
)
subcmd.add_argument(
'--ca-path',
action='store',
dest='cert',
help=u'CA cert to use for verifying SSL connection to Ansible Service Broker',
default=None
)
subcmd.add_argument(
'--username',
'-u',
action='store',
default=None,
dest='basic_auth_username',
help=u'Specify the basic auth username to be used'
)
subcmd.add_argument(
'--password',
'-p',
action='store',
default=None,
dest='basic_auth_password',
help=u'Specify the basic auth password to be used'
)
subcmd.add_argument(
'--no-relist',
action='store_true',
dest='no_relist',
help=u'Do not relist the catalog after pushing an apb to the broker',
default=False
)
subcmd.add_argument(
'--broker-name',
action='store',
dest='broker_name',
help=u'Name of the ServiceBroker k8s resource',
default=u'ansible-service-broker'
)
return | [
"def",
"subcmd_remove_parser",
"(",
"subcmd",
")",
":",
"subcmd",
".",
"add_argument",
"(",
"'--broker'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'broker'",
",",
"help",
"=",
"u'Route to the Ansible Service Broker'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--local'",
",",
"'-l'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'local'",
",",
"help",
"=",
"u'Remove image from internal OpenShift registry'",
",",
"default",
"=",
"False",
")",
"subcmd",
".",
"add_argument",
"(",
"'--all'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'all'",
",",
"help",
"=",
"u'Remove all stored APBs'",
",",
"default",
"=",
"False",
")",
"subcmd",
".",
"add_argument",
"(",
"'--id'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'id'",
",",
"help",
"=",
"u'ID of APB to remove'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--secure'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'verify'",
",",
"help",
"=",
"u'Verify SSL connection to Ansible Service Broker'",
",",
"default",
"=",
"False",
")",
"subcmd",
".",
"add_argument",
"(",
"'--ca-path'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'cert'",
",",
"help",
"=",
"u'CA cert to use for verifying SSL connection to Ansible Service Broker'",
",",
"default",
"=",
"None",
")",
"subcmd",
".",
"add_argument",
"(",
"'--username'",
",",
"'-u'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'basic_auth_username'",
",",
"help",
"=",
"u'Specify the basic auth username to be used'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--password'",
",",
"'-p'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'basic_auth_password'",
",",
"help",
"=",
"u'Specify the basic auth password to be used'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--no-relist'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'no_relist'",
",",
"help",
"=",
"u'Do not relist the catalog after pushing an apb to the broker'",
",",
"default",
"=",
"False",
")",
"subcmd",
".",
"add_argument",
"(",
"'--broker-name'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'broker_name'",
",",
"help",
"=",
"u'Name of the ServiceBroker k8s resource'",
",",
"default",
"=",
"u'ansible-service-broker'",
")",
"return"
]
| remove subcommand | [
"remove",
"subcommand"
]
| train | https://github.com/ansibleplaybookbundle/ansible-playbook-bundle/blob/585694be9b417f1a88354cbfe286bfd68c2c9494/src/apb/cli.py#L287-L359 |
ansibleplaybookbundle/ansible-playbook-bundle | src/apb/cli.py | subcmd_bootstrap_parser | def subcmd_bootstrap_parser(subcmd):
""" bootstrap subcommand """
subcmd.add_argument(
'--broker',
action='store',
dest='broker',
help=u'Route to the Ansible Service Broker'
)
subcmd.add_argument(
'--secure',
action='store_true',
dest='verify',
help=u'Verify SSL connection to Ansible Service Broker',
default=False
)
subcmd.add_argument(
'--ca-path',
action='store',
dest='cert',
help=u'CA cert to use for verifying SSL connection to Ansible Service Broker',
default=None
)
subcmd.add_argument(
'--no-relist',
action='store_true',
dest='no_relist',
help=u'Do not relist the catalog after bootstrapping the broker',
default=False
)
subcmd.add_argument(
'--username',
'-u',
action='store',
default=None,
dest='basic_auth_username',
help=u'Specify the basic auth username to be used'
)
subcmd.add_argument(
'--password',
'-p',
action='store',
default=None,
dest='basic_auth_password',
help=u'Specify the basic auth password to be used'
)
subcmd.add_argument(
'--broker-name',
action='store',
dest='broker_name',
help=u'Name of the ServiceBroker k8s resource',
default=u'ansible-service-broker'
)
return | python | def subcmd_bootstrap_parser(subcmd):
subcmd.add_argument(
'--broker',
action='store',
dest='broker',
help=u'Route to the Ansible Service Broker'
)
subcmd.add_argument(
'--secure',
action='store_true',
dest='verify',
help=u'Verify SSL connection to Ansible Service Broker',
default=False
)
subcmd.add_argument(
'--ca-path',
action='store',
dest='cert',
help=u'CA cert to use for verifying SSL connection to Ansible Service Broker',
default=None
)
subcmd.add_argument(
'--no-relist',
action='store_true',
dest='no_relist',
help=u'Do not relist the catalog after bootstrapping the broker',
default=False
)
subcmd.add_argument(
'--username',
'-u',
action='store',
default=None,
dest='basic_auth_username',
help=u'Specify the basic auth username to be used'
)
subcmd.add_argument(
'--password',
'-p',
action='store',
default=None,
dest='basic_auth_password',
help=u'Specify the basic auth password to be used'
)
subcmd.add_argument(
'--broker-name',
action='store',
dest='broker_name',
help=u'Name of the ServiceBroker k8s resource',
default=u'ansible-service-broker'
)
return | [
"def",
"subcmd_bootstrap_parser",
"(",
"subcmd",
")",
":",
"subcmd",
".",
"add_argument",
"(",
"'--broker'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'broker'",
",",
"help",
"=",
"u'Route to the Ansible Service Broker'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--secure'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'verify'",
",",
"help",
"=",
"u'Verify SSL connection to Ansible Service Broker'",
",",
"default",
"=",
"False",
")",
"subcmd",
".",
"add_argument",
"(",
"'--ca-path'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'cert'",
",",
"help",
"=",
"u'CA cert to use for verifying SSL connection to Ansible Service Broker'",
",",
"default",
"=",
"None",
")",
"subcmd",
".",
"add_argument",
"(",
"'--no-relist'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'no_relist'",
",",
"help",
"=",
"u'Do not relist the catalog after bootstrapping the broker'",
",",
"default",
"=",
"False",
")",
"subcmd",
".",
"add_argument",
"(",
"'--username'",
",",
"'-u'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'basic_auth_username'",
",",
"help",
"=",
"u'Specify the basic auth username to be used'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--password'",
",",
"'-p'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'basic_auth_password'",
",",
"help",
"=",
"u'Specify the basic auth password to be used'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--broker-name'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'broker_name'",
",",
"help",
"=",
"u'Name of the ServiceBroker k8s resource'",
",",
"default",
"=",
"u'ansible-service-broker'",
")",
"return"
]
| bootstrap subcommand | [
"bootstrap",
"subcommand"
]
| train | https://github.com/ansibleplaybookbundle/ansible-playbook-bundle/blob/585694be9b417f1a88354cbfe286bfd68c2c9494/src/apb/cli.py#L362-L414 |
ansibleplaybookbundle/ansible-playbook-bundle | src/apb/cli.py | subcmd_run_parser | def subcmd_run_parser(subcmd):
""" provision subcommand """
subcmd.add_argument(
'--project',
action='store',
dest='project',
required=True,
help=u'Project where the APB should be run'
)
subcmd.add_argument(
'--action',
action='store',
dest='action',
required=False,
help=u'The action to perform when running the APB',
default='provision'
)
subcmd.add_argument(
'--registry-service-name',
action='store',
dest='reg_svc_name',
help=u'Name of service for internal OpenShift registry',
default=u'docker-registry'
)
subcmd.add_argument(
'--registry-namespace',
action='store',
dest='reg_namespace',
help=u'Namespace of internal OpenShift registry',
default=u'default'
)
subcmd.add_argument(
'--namespace',
action='store',
dest='namespace',
help=u'Namespace to push APB in OpenShift registry',
default=u'openshift'
)
subcmd.add_argument(
'--registry-route',
action='store',
dest='reg_route',
help=u'Route of internal OpenShift registry'
)
subcmd.add_argument(
'--dockerfile',
'-f',
action='store',
dest='dockerfile',
help=u'Dockerfile to build internal registry image with',
default=u'Dockerfile'
)
return | python | def subcmd_run_parser(subcmd):
subcmd.add_argument(
'--project',
action='store',
dest='project',
required=True,
help=u'Project where the APB should be run'
)
subcmd.add_argument(
'--action',
action='store',
dest='action',
required=False,
help=u'The action to perform when running the APB',
default='provision'
)
subcmd.add_argument(
'--registry-service-name',
action='store',
dest='reg_svc_name',
help=u'Name of service for internal OpenShift registry',
default=u'docker-registry'
)
subcmd.add_argument(
'--registry-namespace',
action='store',
dest='reg_namespace',
help=u'Namespace of internal OpenShift registry',
default=u'default'
)
subcmd.add_argument(
'--namespace',
action='store',
dest='namespace',
help=u'Namespace to push APB in OpenShift registry',
default=u'openshift'
)
subcmd.add_argument(
'--registry-route',
action='store',
dest='reg_route',
help=u'Route of internal OpenShift registry'
)
subcmd.add_argument(
'--dockerfile',
'-f',
action='store',
dest='dockerfile',
help=u'Dockerfile to build internal registry image with',
default=u'Dockerfile'
)
return | [
"def",
"subcmd_run_parser",
"(",
"subcmd",
")",
":",
"subcmd",
".",
"add_argument",
"(",
"'--project'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'project'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"u'Project where the APB should be run'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--action'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'action'",
",",
"required",
"=",
"False",
",",
"help",
"=",
"u'The action to perform when running the APB'",
",",
"default",
"=",
"'provision'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--registry-service-name'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'reg_svc_name'",
",",
"help",
"=",
"u'Name of service for internal OpenShift registry'",
",",
"default",
"=",
"u'docker-registry'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--registry-namespace'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'reg_namespace'",
",",
"help",
"=",
"u'Namespace of internal OpenShift registry'",
",",
"default",
"=",
"u'default'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--namespace'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'namespace'",
",",
"help",
"=",
"u'Namespace to push APB in OpenShift registry'",
",",
"default",
"=",
"u'openshift'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--registry-route'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'reg_route'",
",",
"help",
"=",
"u'Route of internal OpenShift registry'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--dockerfile'",
",",
"'-f'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'dockerfile'",
",",
"help",
"=",
"u'Dockerfile to build internal registry image with'",
",",
"default",
"=",
"u'Dockerfile'",
")",
"return"
]
| provision subcommand | [
"provision",
"subcommand"
]
| train | https://github.com/ansibleplaybookbundle/ansible-playbook-bundle/blob/585694be9b417f1a88354cbfe286bfd68c2c9494/src/apb/cli.py#L461-L513 |
ansibleplaybookbundle/ansible-playbook-bundle | src/apb/cli.py | subcmd_relist_parser | def subcmd_relist_parser(subcmd):
""" relist subcommand """
subcmd.add_argument(
'--broker-name',
action='store',
dest='broker_name',
help=u'Name of the ServiceBroker k8s resource',
default=u'ansible-service-broker'
)
subcmd.add_argument(
'--secure',
action='store_true',
dest='verify',
help=u'Verify SSL connection to Ansible Service Broker',
default=False
)
subcmd.add_argument(
'--ca-path',
action='store',
dest='cert',
help=u'CA cert to use for verifying SSL connection to Ansible Service Broker',
default=None
)
subcmd.add_argument(
'--username',
'-u',
action='store',
default=None,
dest='basic_auth_username',
help=u'Specify the basic auth username to be used'
)
subcmd.add_argument(
'--password',
'-p',
action='store',
default=None,
dest='basic_auth_password',
help=u'Specify the basic auth password to be used'
)
return | python | def subcmd_relist_parser(subcmd):
subcmd.add_argument(
'--broker-name',
action='store',
dest='broker_name',
help=u'Name of the ServiceBroker k8s resource',
default=u'ansible-service-broker'
)
subcmd.add_argument(
'--secure',
action='store_true',
dest='verify',
help=u'Verify SSL connection to Ansible Service Broker',
default=False
)
subcmd.add_argument(
'--ca-path',
action='store',
dest='cert',
help=u'CA cert to use for verifying SSL connection to Ansible Service Broker',
default=None
)
subcmd.add_argument(
'--username',
'-u',
action='store',
default=None,
dest='basic_auth_username',
help=u'Specify the basic auth username to be used'
)
subcmd.add_argument(
'--password',
'-p',
action='store',
default=None,
dest='basic_auth_password',
help=u'Specify the basic auth password to be used'
)
return | [
"def",
"subcmd_relist_parser",
"(",
"subcmd",
")",
":",
"subcmd",
".",
"add_argument",
"(",
"'--broker-name'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'broker_name'",
",",
"help",
"=",
"u'Name of the ServiceBroker k8s resource'",
",",
"default",
"=",
"u'ansible-service-broker'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--secure'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'verify'",
",",
"help",
"=",
"u'Verify SSL connection to Ansible Service Broker'",
",",
"default",
"=",
"False",
")",
"subcmd",
".",
"add_argument",
"(",
"'--ca-path'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'cert'",
",",
"help",
"=",
"u'CA cert to use for verifying SSL connection to Ansible Service Broker'",
",",
"default",
"=",
"None",
")",
"subcmd",
".",
"add_argument",
"(",
"'--username'",
",",
"'-u'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'basic_auth_username'",
",",
"help",
"=",
"u'Specify the basic auth username to be used'",
")",
"subcmd",
".",
"add_argument",
"(",
"'--password'",
",",
"'-p'",
",",
"action",
"=",
"'store'",
",",
"default",
"=",
"None",
",",
"dest",
"=",
"'basic_auth_password'",
",",
"help",
"=",
"u'Specify the basic auth password to be used'",
")",
"return"
]
| relist subcommand | [
"relist",
"subcommand"
]
| train | https://github.com/ansibleplaybookbundle/ansible-playbook-bundle/blob/585694be9b417f1a88354cbfe286bfd68c2c9494/src/apb/cli.py#L524-L563 |
ansibleplaybookbundle/ansible-playbook-bundle | src/apb/cli.py | main | def main():
""" main """
# BZ 1581651 - Override the ArgumentParser to disable argument abbreviations.
parser = OverrideArgumentParser(
description=u'APB tooling for '
u'assisting in building and packaging APBs.'
)
parser.add_argument(
'--debug',
action='store_true',
dest='debug',
help=u'Enable debug output',
default=False
)
# TODO: Modify project to accept relative paths
parser.add_argument(
'--project',
'-p',
action='store',
dest='base_path',
help=u'Specify a path to your project. Defaults to CWD.',
default=os.getcwd()
)
parser.add_argument(
'--token',
action='store',
dest='auth_token',
help=u'Specify OpenShift auth token to be used',
default=None
)
subparsers = parser.add_subparsers(title='subcommand', dest='subcommand')
subparsers.required = True
for subcommand in AVAILABLE_COMMANDS:
subparser = subparsers.add_parser(
subcommand, help=AVAILABLE_COMMANDS[subcommand]
)
globals()['subcmd_%s_parser' % subcommand](subparser)
args = parser.parse_args()
if args.subcommand == 'help':
parser.print_help()
sys.exit(0)
if args.subcommand == 'version':
version = pkg_resources.require("apb")[0].version
print("Version: apb-%s" % version)
sys.exit(0)
try:
getattr(apb.engine,
u'cmdrun_{}'.format(args.subcommand))(**vars(args))
except Exception as e:
print("Exception occurred! %s" % e)
sys.exit(1) | python | def main():
parser = OverrideArgumentParser(
description=u'APB tooling for '
u'assisting in building and packaging APBs.'
)
parser.add_argument(
'--debug',
action='store_true',
dest='debug',
help=u'Enable debug output',
default=False
)
parser.add_argument(
'--project',
'-p',
action='store',
dest='base_path',
help=u'Specify a path to your project. Defaults to CWD.',
default=os.getcwd()
)
parser.add_argument(
'--token',
action='store',
dest='auth_token',
help=u'Specify OpenShift auth token to be used',
default=None
)
subparsers = parser.add_subparsers(title='subcommand', dest='subcommand')
subparsers.required = True
for subcommand in AVAILABLE_COMMANDS:
subparser = subparsers.add_parser(
subcommand, help=AVAILABLE_COMMANDS[subcommand]
)
globals()['subcmd_%s_parser' % subcommand](subparser)
args = parser.parse_args()
if args.subcommand == 'help':
parser.print_help()
sys.exit(0)
if args.subcommand == 'version':
version = pkg_resources.require("apb")[0].version
print("Version: apb-%s" % version)
sys.exit(0)
try:
getattr(apb.engine,
u'cmdrun_{}'.format(args.subcommand))(**vars(args))
except Exception as e:
print("Exception occurred! %s" % e)
sys.exit(1) | [
"def",
"main",
"(",
")",
":",
"# BZ 1581651 - Override the ArgumentParser to disable argument abbreviations.",
"parser",
"=",
"OverrideArgumentParser",
"(",
"description",
"=",
"u'APB tooling for '",
"u'assisting in building and packaging APBs.'",
")",
"parser",
".",
"add_argument",
"(",
"'--debug'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'debug'",
",",
"help",
"=",
"u'Enable debug output'",
",",
"default",
"=",
"False",
")",
"# TODO: Modify project to accept relative paths",
"parser",
".",
"add_argument",
"(",
"'--project'",
",",
"'-p'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'base_path'",
",",
"help",
"=",
"u'Specify a path to your project. Defaults to CWD.'",
",",
"default",
"=",
"os",
".",
"getcwd",
"(",
")",
")",
"parser",
".",
"add_argument",
"(",
"'--token'",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'auth_token'",
",",
"help",
"=",
"u'Specify OpenShift auth token to be used'",
",",
"default",
"=",
"None",
")",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
"title",
"=",
"'subcommand'",
",",
"dest",
"=",
"'subcommand'",
")",
"subparsers",
".",
"required",
"=",
"True",
"for",
"subcommand",
"in",
"AVAILABLE_COMMANDS",
":",
"subparser",
"=",
"subparsers",
".",
"add_parser",
"(",
"subcommand",
",",
"help",
"=",
"AVAILABLE_COMMANDS",
"[",
"subcommand",
"]",
")",
"globals",
"(",
")",
"[",
"'subcmd_%s_parser'",
"%",
"subcommand",
"]",
"(",
"subparser",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"args",
".",
"subcommand",
"==",
"'help'",
":",
"parser",
".",
"print_help",
"(",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"if",
"args",
".",
"subcommand",
"==",
"'version'",
":",
"version",
"=",
"pkg_resources",
".",
"require",
"(",
"\"apb\"",
")",
"[",
"0",
"]",
".",
"version",
"print",
"(",
"\"Version: apb-%s\"",
"%",
"version",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"try",
":",
"getattr",
"(",
"apb",
".",
"engine",
",",
"u'cmdrun_{}'",
".",
"format",
"(",
"args",
".",
"subcommand",
")",
")",
"(",
"*",
"*",
"vars",
"(",
"args",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"Exception occurred! %s\"",
"%",
"e",
")",
"sys",
".",
"exit",
"(",
"1",
")"
]
| main | [
"main"
]
| train | https://github.com/ansibleplaybookbundle/ansible-playbook-bundle/blob/585694be9b417f1a88354cbfe286bfd68c2c9494/src/apb/cli.py#L686-L745 |
jrialland/python-astar | src/astar/__init__.py | find_path | def find_path(start, goal, neighbors_fnct, reversePath=False, heuristic_cost_estimate_fnct=lambda a, b: Infinite, distance_between_fnct=lambda a, b: 1.0, is_goal_reached_fnct=lambda a, b: a == b):
"""A non-class version of the path finding algorithm"""
class FindPath(AStar):
def heuristic_cost_estimate(self, current, goal):
return heuristic_cost_estimate_fnct(current, goal)
def distance_between(self, n1, n2):
return distance_between_fnct(n1, n2)
def neighbors(self, node):
return neighbors_fnct(node)
def is_goal_reached(self, current, goal):
return is_goal_reached_fnct(current, goal)
return FindPath().astar(start, goal, reversePath) | python | def find_path(start, goal, neighbors_fnct, reversePath=False, heuristic_cost_estimate_fnct=lambda a, b: Infinite, distance_between_fnct=lambda a, b: 1.0, is_goal_reached_fnct=lambda a, b: a == b):
class FindPath(AStar):
def heuristic_cost_estimate(self, current, goal):
return heuristic_cost_estimate_fnct(current, goal)
def distance_between(self, n1, n2):
return distance_between_fnct(n1, n2)
def neighbors(self, node):
return neighbors_fnct(node)
def is_goal_reached(self, current, goal):
return is_goal_reached_fnct(current, goal)
return FindPath().astar(start, goal, reversePath) | [
"def",
"find_path",
"(",
"start",
",",
"goal",
",",
"neighbors_fnct",
",",
"reversePath",
"=",
"False",
",",
"heuristic_cost_estimate_fnct",
"=",
"lambda",
"a",
",",
"b",
":",
"Infinite",
",",
"distance_between_fnct",
"=",
"lambda",
"a",
",",
"b",
":",
"1.0",
",",
"is_goal_reached_fnct",
"=",
"lambda",
"a",
",",
"b",
":",
"a",
"==",
"b",
")",
":",
"class",
"FindPath",
"(",
"AStar",
")",
":",
"def",
"heuristic_cost_estimate",
"(",
"self",
",",
"current",
",",
"goal",
")",
":",
"return",
"heuristic_cost_estimate_fnct",
"(",
"current",
",",
"goal",
")",
"def",
"distance_between",
"(",
"self",
",",
"n1",
",",
"n2",
")",
":",
"return",
"distance_between_fnct",
"(",
"n1",
",",
"n2",
")",
"def",
"neighbors",
"(",
"self",
",",
"node",
")",
":",
"return",
"neighbors_fnct",
"(",
"node",
")",
"def",
"is_goal_reached",
"(",
"self",
",",
"current",
",",
"goal",
")",
":",
"return",
"is_goal_reached_fnct",
"(",
"current",
",",
"goal",
")",
"return",
"FindPath",
"(",
")",
".",
"astar",
"(",
"start",
",",
"goal",
",",
"reversePath",
")"
]
| A non-class version of the path finding algorithm | [
"A",
"non",
"-",
"class",
"version",
"of",
"the",
"path",
"finding",
"algorithm"
]
| train | https://github.com/jrialland/python-astar/blob/7a3f5b33bedd03bd09792fe0d5b6fe28d50f9514/src/astar/__init__.py#L109-L124 |
frictionlessdata/goodtables-py | goodtables/registry.py | preset | def preset(name):
"""https://github.com/frictionlessdata/goodtables-py#custom-presets
"""
def decorator(func):
registry.register_preset(func, name)
return func
return decorator | python | def preset(name):
def decorator(func):
registry.register_preset(func, name)
return func
return decorator | [
"def",
"preset",
"(",
"name",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"registry",
".",
"register_preset",
"(",
"func",
",",
"name",
")",
"return",
"func",
"return",
"decorator"
]
| https://github.com/frictionlessdata/goodtables-py#custom-presets | [
"https",
":",
"//",
"github",
".",
"com",
"/",
"frictionlessdata",
"/",
"goodtables",
"-",
"py#custom",
"-",
"presets"
]
| train | https://github.com/frictionlessdata/goodtables-py/blob/3e7d6891d2f4e342dfafbe0e951e204ccc252a44/goodtables/registry.py#L17-L23 |
frictionlessdata/goodtables-py | goodtables/registry.py | check | def check(name, type=None, context=None, position=None):
"""https://github.com/frictionlessdata/goodtables-py#custom-checks
"""
def decorator(func):
registry.register_check(func, name, type, context, position)
return func
return decorator | python | def check(name, type=None, context=None, position=None):
def decorator(func):
registry.register_check(func, name, type, context, position)
return func
return decorator | [
"def",
"check",
"(",
"name",
",",
"type",
"=",
"None",
",",
"context",
"=",
"None",
",",
"position",
"=",
"None",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"registry",
".",
"register_check",
"(",
"func",
",",
"name",
",",
"type",
",",
"context",
",",
"position",
")",
"return",
"func",
"return",
"decorator"
]
| https://github.com/frictionlessdata/goodtables-py#custom-checks | [
"https",
":",
"//",
"github",
".",
"com",
"/",
"frictionlessdata",
"/",
"goodtables",
"-",
"py#custom",
"-",
"checks"
]
| train | https://github.com/frictionlessdata/goodtables-py/blob/3e7d6891d2f4e342dfafbe0e951e204ccc252a44/goodtables/registry.py#L26-L32 |
frictionlessdata/goodtables-py | goodtables/validate.py | validate | def validate(source, **options):
"""Validates a source file and returns a report.
Args:
source (Union[str, Dict, List[Dict], IO]): The source to be validated.
It can be a local file path, URL, dict, list of dicts, or a
file-like object. If it's a list of dicts and the `preset` is
"nested", each of the dict key's will be used as if it was passed
as a keyword argument to this method.
The file can be a CSV, XLS, JSON, and any other format supported by
`tabulator`_.
Keyword Args:
checks (List[str]): List of checks names to be enabled. They can be
individual check names (e.g. `blank-headers`), or check types (e.g.
`structure`).
skip_checks (List[str]): List of checks names to be skipped. They can
be individual check names (e.g. `blank-headers`), or check types
(e.g. `structure`).
infer_schema (bool): Infer schema if one wasn't passed as an argument.
infer_fields (bool): Infer schema for columns not present in the received schema.
order_fields (bool): Order source columns based on schema fields order.
This is useful when you don't want to validate that the data
columns' order is the same as the schema's.
error_limit (int): Stop validation if the number of errors per table
exceeds this value.
table_limit (int): Maximum number of tables to validate.
row_limit (int): Maximum number of rows to validate.
preset (str): Dataset type could be `table` (default), `datapackage`,
`nested` or custom. Usually, the preset can be inferred from the
source, so you don't need to define it.
Any (Any): Any additional arguments not defined here will be passed on,
depending on the chosen `preset`. If the `preset` is `table`, the
extra arguments will be passed on to `tabulator`_, if it is
`datapackage`, they will be passed on to the `datapackage`_
constructor.
# Table preset
schema (Union[str, Dict, IO]): The Table Schema for the
source.
headers (Union[int, List[str]): Either the row number that contains
the headers, or a list with them. If the row number is given, ?????
scheme (str): The scheme used to access the source (e.g. `file`,
`http`). This is usually inferred correctly from the source. See
the `tabulator`_ documentation for the list of supported schemes.
format (str): Format of the source data (`csv`, `datapackage`, ...).
This is usually inferred correctly from the source. See the
the `tabulator`_ documentation for the list of supported formats.
encoding (str): Encoding of the source.
skip_rows (Union[int, List[Union[int, str]]]): Row numbers or a
string. Rows beginning with the string will be ignored (e.g. '#',
'//').
Raises:
GoodtablesException: Raised on any non-tabular error.
Returns:
dict: The validation report.
.. _tabulator:
https://github.com/frictionlessdata/tabulator-py
.. _tabulator_schemes:
https://github.com/frictionlessdata/tabulator-py
.. _tabulator:
https://github.com/frictionlessdata/datapackage-py
"""
source, options, inspector_settings = _parse_arguments(source, **options)
# Validate
inspector = Inspector(**inspector_settings)
report = inspector.inspect(source, **options)
return report | python | def validate(source, **options):
source, options, inspector_settings = _parse_arguments(source, **options)
inspector = Inspector(**inspector_settings)
report = inspector.inspect(source, **options)
return report | [
"def",
"validate",
"(",
"source",
",",
"*",
"*",
"options",
")",
":",
"source",
",",
"options",
",",
"inspector_settings",
"=",
"_parse_arguments",
"(",
"source",
",",
"*",
"*",
"options",
")",
"# Validate",
"inspector",
"=",
"Inspector",
"(",
"*",
"*",
"inspector_settings",
")",
"report",
"=",
"inspector",
".",
"inspect",
"(",
"source",
",",
"*",
"*",
"options",
")",
"return",
"report"
]
| Validates a source file and returns a report.
Args:
source (Union[str, Dict, List[Dict], IO]): The source to be validated.
It can be a local file path, URL, dict, list of dicts, or a
file-like object. If it's a list of dicts and the `preset` is
"nested", each of the dict key's will be used as if it was passed
as a keyword argument to this method.
The file can be a CSV, XLS, JSON, and any other format supported by
`tabulator`_.
Keyword Args:
checks (List[str]): List of checks names to be enabled. They can be
individual check names (e.g. `blank-headers`), or check types (e.g.
`structure`).
skip_checks (List[str]): List of checks names to be skipped. They can
be individual check names (e.g. `blank-headers`), or check types
(e.g. `structure`).
infer_schema (bool): Infer schema if one wasn't passed as an argument.
infer_fields (bool): Infer schema for columns not present in the received schema.
order_fields (bool): Order source columns based on schema fields order.
This is useful when you don't want to validate that the data
columns' order is the same as the schema's.
error_limit (int): Stop validation if the number of errors per table
exceeds this value.
table_limit (int): Maximum number of tables to validate.
row_limit (int): Maximum number of rows to validate.
preset (str): Dataset type could be `table` (default), `datapackage`,
`nested` or custom. Usually, the preset can be inferred from the
source, so you don't need to define it.
Any (Any): Any additional arguments not defined here will be passed on,
depending on the chosen `preset`. If the `preset` is `table`, the
extra arguments will be passed on to `tabulator`_, if it is
`datapackage`, they will be passed on to the `datapackage`_
constructor.
# Table preset
schema (Union[str, Dict, IO]): The Table Schema for the
source.
headers (Union[int, List[str]): Either the row number that contains
the headers, or a list with them. If the row number is given, ?????
scheme (str): The scheme used to access the source (e.g. `file`,
`http`). This is usually inferred correctly from the source. See
the `tabulator`_ documentation for the list of supported schemes.
format (str): Format of the source data (`csv`, `datapackage`, ...).
This is usually inferred correctly from the source. See the
the `tabulator`_ documentation for the list of supported formats.
encoding (str): Encoding of the source.
skip_rows (Union[int, List[Union[int, str]]]): Row numbers or a
string. Rows beginning with the string will be ignored (e.g. '#',
'//').
Raises:
GoodtablesException: Raised on any non-tabular error.
Returns:
dict: The validation report.
.. _tabulator:
https://github.com/frictionlessdata/tabulator-py
.. _tabulator_schemes:
https://github.com/frictionlessdata/tabulator-py
.. _tabulator:
https://github.com/frictionlessdata/datapackage-py | [
"Validates",
"a",
"source",
"file",
"and",
"returns",
"a",
"report",
"."
]
| train | https://github.com/frictionlessdata/goodtables-py/blob/3e7d6891d2f4e342dfafbe0e951e204ccc252a44/goodtables/validate.py#L13-L87 |
frictionlessdata/goodtables-py | goodtables/validate.py | init_datapackage | def init_datapackage(resource_paths):
"""Create tabular data package with resources.
It will also infer the tabular resources' schemas.
Args:
resource_paths (List[str]): Paths to the data package resources.
Returns:
datapackage.Package: The data package.
"""
dp = datapackage.Package({
'name': 'change-me',
'schema': 'tabular-data-package',
})
for path in resource_paths:
dp.infer(path)
return dp | python | def init_datapackage(resource_paths):
dp = datapackage.Package({
'name': 'change-me',
'schema': 'tabular-data-package',
})
for path in resource_paths:
dp.infer(path)
return dp | [
"def",
"init_datapackage",
"(",
"resource_paths",
")",
":",
"dp",
"=",
"datapackage",
".",
"Package",
"(",
"{",
"'name'",
":",
"'change-me'",
",",
"'schema'",
":",
"'tabular-data-package'",
",",
"}",
")",
"for",
"path",
"in",
"resource_paths",
":",
"dp",
".",
"infer",
"(",
"path",
")",
"return",
"dp"
]
| Create tabular data package with resources.
It will also infer the tabular resources' schemas.
Args:
resource_paths (List[str]): Paths to the data package resources.
Returns:
datapackage.Package: The data package. | [
"Create",
"tabular",
"data",
"package",
"with",
"resources",
"."
]
| train | https://github.com/frictionlessdata/goodtables-py/blob/3e7d6891d2f4e342dfafbe0e951e204ccc252a44/goodtables/validate.py#L90-L109 |
frictionlessdata/goodtables-py | goodtables/cli.py | init | def init(paths, output, **kwargs):
"""Init data package from list of files.
It will also infer tabular data's schemas from their contents.
"""
dp = goodtables.init_datapackage(paths)
click.secho(
json_module.dumps(dp.descriptor, indent=4),
file=output
)
exit(dp.valid) | python | def init(paths, output, **kwargs):
dp = goodtables.init_datapackage(paths)
click.secho(
json_module.dumps(dp.descriptor, indent=4),
file=output
)
exit(dp.valid) | [
"def",
"init",
"(",
"paths",
",",
"output",
",",
"*",
"*",
"kwargs",
")",
":",
"dp",
"=",
"goodtables",
".",
"init_datapackage",
"(",
"paths",
")",
"click",
".",
"secho",
"(",
"json_module",
".",
"dumps",
"(",
"dp",
".",
"descriptor",
",",
"indent",
"=",
"4",
")",
",",
"file",
"=",
"output",
")",
"exit",
"(",
"dp",
".",
"valid",
")"
]
| Init data package from list of files.
It will also infer tabular data's schemas from their contents. | [
"Init",
"data",
"package",
"from",
"list",
"of",
"files",
"."
]
| train | https://github.com/frictionlessdata/goodtables-py/blob/3e7d6891d2f4e342dfafbe0e951e204ccc252a44/goodtables/cli.py#L121-L133 |
frictionlessdata/goodtables-py | goodtables/inspector.py | _clean_empty | def _clean_empty(d):
"""Remove None values from a dict."""
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
return [v for v in (_clean_empty(v) for v in d) if v is not None]
return {
k: v for k, v in
((k, _clean_empty(v)) for k, v in d.items())
if v is not None
} | python | def _clean_empty(d):
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
return [v for v in (_clean_empty(v) for v in d) if v is not None]
return {
k: v for k, v in
((k, _clean_empty(v)) for k, v in d.items())
if v is not None
} | [
"def",
"_clean_empty",
"(",
"d",
")",
":",
"if",
"not",
"isinstance",
"(",
"d",
",",
"(",
"dict",
",",
"list",
")",
")",
":",
"return",
"d",
"if",
"isinstance",
"(",
"d",
",",
"list",
")",
":",
"return",
"[",
"v",
"for",
"v",
"in",
"(",
"_clean_empty",
"(",
"v",
")",
"for",
"v",
"in",
"d",
")",
"if",
"v",
"is",
"not",
"None",
"]",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"(",
"(",
"k",
",",
"_clean_empty",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
")",
"if",
"v",
"is",
"not",
"None",
"}"
]
| Remove None values from a dict. | [
"Remove",
"None",
"values",
"from",
"a",
"dict",
"."
]
| train | https://github.com/frictionlessdata/goodtables-py/blob/3e7d6891d2f4e342dfafbe0e951e204ccc252a44/goodtables/inspector.py#L330-L340 |
frictionlessdata/goodtables-py | goodtables/inspector.py | Inspector.inspect | def inspect(self, source, preset=None, **options):
"""https://github.com/frictionlessdata/goodtables-py#inspector
"""
# Start timer
start = datetime.datetime.now()
# Prepare preset
preset = self.__get_source_preset(source, preset)
if preset == 'nested':
options['presets'] = self.__presets
for s in source:
if s.get('preset') is None:
s['preset'] = self.__get_source_preset(s['source'])
# Prepare tables
preset_func = self.__get_preset(preset)['func']
warnings, tables = preset_func(source, **options)
if len(tables) > self.__table_limit:
warnings.append(
'Dataset inspection has reached %s table(s) limit' %
(self.__table_limit))
tables = tables[:self.__table_limit]
# Collect table reports
table_reports = []
if tables:
tasks = []
pool = ThreadPool(processes=len(tables))
try:
for table in tables:
tasks.append(pool.apply_async(self.__inspect_table, (table,)))
for task in tasks:
table_warnings, table_report = task.get()
warnings.extend(table_warnings)
table_reports.append(table_report)
finally:
pool.terminate()
# Stop timer
stop = datetime.datetime.now()
# Compose report
report = {
'time': round((stop - start).total_seconds(), 3),
'valid': all(item['valid'] for item in table_reports),
'error-count': sum(len(item['errors']) for item in table_reports),
'table-count': len(tables),
'tables': table_reports,
'warnings': warnings,
'preset': preset,
}
return report | python | def inspect(self, source, preset=None, **options):
start = datetime.datetime.now()
preset = self.__get_source_preset(source, preset)
if preset == 'nested':
options['presets'] = self.__presets
for s in source:
if s.get('preset') is None:
s['preset'] = self.__get_source_preset(s['source'])
preset_func = self.__get_preset(preset)['func']
warnings, tables = preset_func(source, **options)
if len(tables) > self.__table_limit:
warnings.append(
'Dataset inspection has reached %s table(s) limit' %
(self.__table_limit))
tables = tables[:self.__table_limit]
table_reports = []
if tables:
tasks = []
pool = ThreadPool(processes=len(tables))
try:
for table in tables:
tasks.append(pool.apply_async(self.__inspect_table, (table,)))
for task in tasks:
table_warnings, table_report = task.get()
warnings.extend(table_warnings)
table_reports.append(table_report)
finally:
pool.terminate()
stop = datetime.datetime.now()
report = {
'time': round((stop - start).total_seconds(), 3),
'valid': all(item['valid'] for item in table_reports),
'error-count': sum(len(item['errors']) for item in table_reports),
'table-count': len(tables),
'tables': table_reports,
'warnings': warnings,
'preset': preset,
}
return report | [
"def",
"inspect",
"(",
"self",
",",
"source",
",",
"preset",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"# Start timer",
"start",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"# Prepare preset",
"preset",
"=",
"self",
".",
"__get_source_preset",
"(",
"source",
",",
"preset",
")",
"if",
"preset",
"==",
"'nested'",
":",
"options",
"[",
"'presets'",
"]",
"=",
"self",
".",
"__presets",
"for",
"s",
"in",
"source",
":",
"if",
"s",
".",
"get",
"(",
"'preset'",
")",
"is",
"None",
":",
"s",
"[",
"'preset'",
"]",
"=",
"self",
".",
"__get_source_preset",
"(",
"s",
"[",
"'source'",
"]",
")",
"# Prepare tables",
"preset_func",
"=",
"self",
".",
"__get_preset",
"(",
"preset",
")",
"[",
"'func'",
"]",
"warnings",
",",
"tables",
"=",
"preset_func",
"(",
"source",
",",
"*",
"*",
"options",
")",
"if",
"len",
"(",
"tables",
")",
">",
"self",
".",
"__table_limit",
":",
"warnings",
".",
"append",
"(",
"'Dataset inspection has reached %s table(s) limit'",
"%",
"(",
"self",
".",
"__table_limit",
")",
")",
"tables",
"=",
"tables",
"[",
":",
"self",
".",
"__table_limit",
"]",
"# Collect table reports",
"table_reports",
"=",
"[",
"]",
"if",
"tables",
":",
"tasks",
"=",
"[",
"]",
"pool",
"=",
"ThreadPool",
"(",
"processes",
"=",
"len",
"(",
"tables",
")",
")",
"try",
":",
"for",
"table",
"in",
"tables",
":",
"tasks",
".",
"append",
"(",
"pool",
".",
"apply_async",
"(",
"self",
".",
"__inspect_table",
",",
"(",
"table",
",",
")",
")",
")",
"for",
"task",
"in",
"tasks",
":",
"table_warnings",
",",
"table_report",
"=",
"task",
".",
"get",
"(",
")",
"warnings",
".",
"extend",
"(",
"table_warnings",
")",
"table_reports",
".",
"append",
"(",
"table_report",
")",
"finally",
":",
"pool",
".",
"terminate",
"(",
")",
"# Stop timer",
"stop",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"# Compose report",
"report",
"=",
"{",
"'time'",
":",
"round",
"(",
"(",
"stop",
"-",
"start",
")",
".",
"total_seconds",
"(",
")",
",",
"3",
")",
",",
"'valid'",
":",
"all",
"(",
"item",
"[",
"'valid'",
"]",
"for",
"item",
"in",
"table_reports",
")",
",",
"'error-count'",
":",
"sum",
"(",
"len",
"(",
"item",
"[",
"'errors'",
"]",
")",
"for",
"item",
"in",
"table_reports",
")",
",",
"'table-count'",
":",
"len",
"(",
"tables",
")",
",",
"'tables'",
":",
"table_reports",
",",
"'warnings'",
":",
"warnings",
",",
"'preset'",
":",
"preset",
",",
"}",
"return",
"report"
]
| https://github.com/frictionlessdata/goodtables-py#inspector | [
"https",
":",
"//",
"github",
".",
"com",
"/",
"frictionlessdata",
"/",
"goodtables",
"-",
"py#inspector"
]
| train | https://github.com/frictionlessdata/goodtables-py/blob/3e7d6891d2f4e342dfafbe0e951e204ccc252a44/goodtables/inspector.py#L51-L104 |
frictionlessdata/goodtables-py | goodtables/cells.py | create_cells | def create_cells(headers, schema_fields, values=None, row_number=None):
"""Create list of cells from headers, fields and values.
Args:
headers (List[str]): The headers values.
schema_fields (List[tableschema.field.Field]): The tableschema
fields.
values (List[Any], optional): The cells values. If not specified,
the created cells will have the same values as their
corresponding headers. This is useful for specifying headers
cells.
If the list has any `None` values, as is the case on empty
cells, the resulting Cell will have an empty string value. If
the `values` list has a different length than the `headers`,
the resulting Cell will have value `None`.
row_number (int, optional): The row number.
Returns:
List[dict]: List of cells.
"""
fillvalue = '_fillvalue'
is_header_row = (values is None)
cells = []
iterator = zip_longest(headers, schema_fields, values or [], fillvalue=fillvalue)
for column_number, (header, field, value) in enumerate(iterator, start=1):
if header == fillvalue:
header = None
elif is_header_row:
value = header
if field == fillvalue:
field = None
if value == fillvalue:
value = None
elif value is None:
value = ''
cell = create_cell(header, value, field, column_number, row_number)
cells.append(cell)
return cells | python | def create_cells(headers, schema_fields, values=None, row_number=None):
fillvalue = '_fillvalue'
is_header_row = (values is None)
cells = []
iterator = zip_longest(headers, schema_fields, values or [], fillvalue=fillvalue)
for column_number, (header, field, value) in enumerate(iterator, start=1):
if header == fillvalue:
header = None
elif is_header_row:
value = header
if field == fillvalue:
field = None
if value == fillvalue:
value = None
elif value is None:
value = ''
cell = create_cell(header, value, field, column_number, row_number)
cells.append(cell)
return cells | [
"def",
"create_cells",
"(",
"headers",
",",
"schema_fields",
",",
"values",
"=",
"None",
",",
"row_number",
"=",
"None",
")",
":",
"fillvalue",
"=",
"'_fillvalue'",
"is_header_row",
"=",
"(",
"values",
"is",
"None",
")",
"cells",
"=",
"[",
"]",
"iterator",
"=",
"zip_longest",
"(",
"headers",
",",
"schema_fields",
",",
"values",
"or",
"[",
"]",
",",
"fillvalue",
"=",
"fillvalue",
")",
"for",
"column_number",
",",
"(",
"header",
",",
"field",
",",
"value",
")",
"in",
"enumerate",
"(",
"iterator",
",",
"start",
"=",
"1",
")",
":",
"if",
"header",
"==",
"fillvalue",
":",
"header",
"=",
"None",
"elif",
"is_header_row",
":",
"value",
"=",
"header",
"if",
"field",
"==",
"fillvalue",
":",
"field",
"=",
"None",
"if",
"value",
"==",
"fillvalue",
":",
"value",
"=",
"None",
"elif",
"value",
"is",
"None",
":",
"value",
"=",
"''",
"cell",
"=",
"create_cell",
"(",
"header",
",",
"value",
",",
"field",
",",
"column_number",
",",
"row_number",
")",
"cells",
".",
"append",
"(",
"cell",
")",
"return",
"cells"
]
| Create list of cells from headers, fields and values.
Args:
headers (List[str]): The headers values.
schema_fields (List[tableschema.field.Field]): The tableschema
fields.
values (List[Any], optional): The cells values. If not specified,
the created cells will have the same values as their
corresponding headers. This is useful for specifying headers
cells.
If the list has any `None` values, as is the case on empty
cells, the resulting Cell will have an empty string value. If
the `values` list has a different length than the `headers`,
the resulting Cell will have value `None`.
row_number (int, optional): The row number.
Returns:
List[dict]: List of cells. | [
"Create",
"list",
"of",
"cells",
"from",
"headers",
"fields",
"and",
"values",
"."
]
| train | https://github.com/frictionlessdata/goodtables-py/blob/3e7d6891d2f4e342dfafbe0e951e204ccc252a44/goodtables/cells.py#L4-L44 |
unixfreak0037/officeparser | officeparser.py | CompoundBinaryFile.__impl_read_chain | def __impl_read_chain(self, start, read_sector_f, read_fat_f):
"""Returns the entire contents of a chain starting at the given sector."""
sector = start
check = [ sector ] # keep a list of sectors we've already read
buffer = StringIO()
while sector != ENDOFCHAIN:
buffer.write(read_sector_f(sector))
next = read_fat_f(sector)
if next in check:
logging.error('infinite loop detected at {0} to {1} starting at {2}'.format(
sector, next, sector_start))
return buffer.getvalue()
check.append(next)
sector = next
return buffer.getvalue() | python | def __impl_read_chain(self, start, read_sector_f, read_fat_f):
sector = start
check = [ sector ]
buffer = StringIO()
while sector != ENDOFCHAIN:
buffer.write(read_sector_f(sector))
next = read_fat_f(sector)
if next in check:
logging.error('infinite loop detected at {0} to {1} starting at {2}'.format(
sector, next, sector_start))
return buffer.getvalue()
check.append(next)
sector = next
return buffer.getvalue() | [
"def",
"__impl_read_chain",
"(",
"self",
",",
"start",
",",
"read_sector_f",
",",
"read_fat_f",
")",
":",
"sector",
"=",
"start",
"check",
"=",
"[",
"sector",
"]",
"# keep a list of sectors we've already read",
"buffer",
"=",
"StringIO",
"(",
")",
"while",
"sector",
"!=",
"ENDOFCHAIN",
":",
"buffer",
".",
"write",
"(",
"read_sector_f",
"(",
"sector",
")",
")",
"next",
"=",
"read_fat_f",
"(",
"sector",
")",
"if",
"next",
"in",
"check",
":",
"logging",
".",
"error",
"(",
"'infinite loop detected at {0} to {1} starting at {2}'",
".",
"format",
"(",
"sector",
",",
"next",
",",
"sector_start",
")",
")",
"return",
"buffer",
".",
"getvalue",
"(",
")",
"check",
".",
"append",
"(",
"next",
")",
"sector",
"=",
"next",
"return",
"buffer",
".",
"getvalue",
"(",
")"
]
| Returns the entire contents of a chain starting at the given sector. | [
"Returns",
"the",
"entire",
"contents",
"of",
"a",
"chain",
"starting",
"at",
"the",
"given",
"sector",
"."
]
| train | https://github.com/unixfreak0037/officeparser/blob/42c2d40372fe271f2039ca1adc145d2aef8c9545/officeparser.py#L247-L261 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.