id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
|
---|---|---|---|---|---|---|---|---|---|---|---|
250,700 | edeposit/edeposit.amqp.harvester | src/edeposit/amqp/harvester/scrappers/cpress_cz.py | _parse_from_table | def _parse_from_table(html_chunk, what):
"""
Go thru table data in `html_chunk` and try to locate content of the
neighbor cell of the cell containing `what`.
Returns:
str: Table data or None.
"""
ean_tag = html_chunk.find("tr", fn=must_contain("th", what, "td"))
if not ean_tag:
return None
return get_first_content(ean_tag[0].find("td")) | python | def _parse_from_table(html_chunk, what):
"""
Go thru table data in `html_chunk` and try to locate content of the
neighbor cell of the cell containing `what`.
Returns:
str: Table data or None.
"""
ean_tag = html_chunk.find("tr", fn=must_contain("th", what, "td"))
if not ean_tag:
return None
return get_first_content(ean_tag[0].find("td")) | [
"def",
"_parse_from_table",
"(",
"html_chunk",
",",
"what",
")",
":",
"ean_tag",
"=",
"html_chunk",
".",
"find",
"(",
"\"tr\"",
",",
"fn",
"=",
"must_contain",
"(",
"\"th\"",
",",
"what",
",",
"\"td\"",
")",
")",
"if",
"not",
"ean_tag",
":",
"return",
"None",
"return",
"get_first_content",
"(",
"ean_tag",
"[",
"0",
"]",
".",
"find",
"(",
"\"td\"",
")",
")"
] | Go thru table data in `html_chunk` and try to locate content of the
neighbor cell of the cell containing `what`.
Returns:
str: Table data or None. | [
"Go",
"thru",
"table",
"data",
"in",
"html_chunk",
"and",
"try",
"to",
"locate",
"content",
"of",
"the",
"neighbor",
"cell",
"of",
"the",
"cell",
"containing",
"what",
"."
] | 38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e | https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/scrappers/cpress_cz.py#L150-L163 |
250,701 | edeposit/edeposit.amqp.harvester | src/edeposit/amqp/harvester/scrappers/cpress_cz.py | get_publications | def get_publications():
"""
Get list of publication offered by cpress.cz.
Returns:
list: List of :class:`.Publication` objects.
"""
data = DOWNER.download(URL)
dom = dhtmlparser.parseString(
handle_encodnig(data)
)
book_list = dom.find("div", {"class": "polozka"})
books = []
for book in book_list:
books.append(
_process_book(book)
)
return books | python | def get_publications():
"""
Get list of publication offered by cpress.cz.
Returns:
list: List of :class:`.Publication` objects.
"""
data = DOWNER.download(URL)
dom = dhtmlparser.parseString(
handle_encodnig(data)
)
book_list = dom.find("div", {"class": "polozka"})
books = []
for book in book_list:
books.append(
_process_book(book)
)
return books | [
"def",
"get_publications",
"(",
")",
":",
"data",
"=",
"DOWNER",
".",
"download",
"(",
"URL",
")",
"dom",
"=",
"dhtmlparser",
".",
"parseString",
"(",
"handle_encodnig",
"(",
"data",
")",
")",
"book_list",
"=",
"dom",
".",
"find",
"(",
"\"div\"",
",",
"{",
"\"class\"",
":",
"\"polozka\"",
"}",
")",
"books",
"=",
"[",
"]",
"for",
"book",
"in",
"book_list",
":",
"books",
".",
"append",
"(",
"_process_book",
"(",
"book",
")",
")",
"return",
"books"
] | Get list of publication offered by cpress.cz.
Returns:
list: List of :class:`.Publication` objects. | [
"Get",
"list",
"of",
"publication",
"offered",
"by",
"cpress",
".",
"cz",
"."
] | 38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e | https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/scrappers/cpress_cz.py#L267-L287 |
250,702 | b3j0f/conf | b3j0f/conf/model/conf.py | Configuration.resolve | def resolve(
self, configurable=None, scope=None, safe=None, besteffort=None
):
"""Resolve all parameters.
:param Configurable configurable: configurable to use for foreign
parameter resolution.
:param dict scope: variables to use for parameter expression evaluation.
:param bool safe: safe execution (remove builtins functions).
:raises: Parameter.Error for any raised exception.
"""
if scope is None:
scope = self.scope
if safe is None:
safe = self.safe
if besteffort is None:
besteffort = self.besteffort
for category in self.values():
for param in category.values():
param.resolve(
configurable=configurable, conf=self,
scope=scope, safe=safe, besteffort=besteffort
) | python | def resolve(
self, configurable=None, scope=None, safe=None, besteffort=None
):
"""Resolve all parameters.
:param Configurable configurable: configurable to use for foreign
parameter resolution.
:param dict scope: variables to use for parameter expression evaluation.
:param bool safe: safe execution (remove builtins functions).
:raises: Parameter.Error for any raised exception.
"""
if scope is None:
scope = self.scope
if safe is None:
safe = self.safe
if besteffort is None:
besteffort = self.besteffort
for category in self.values():
for param in category.values():
param.resolve(
configurable=configurable, conf=self,
scope=scope, safe=safe, besteffort=besteffort
) | [
"def",
"resolve",
"(",
"self",
",",
"configurable",
"=",
"None",
",",
"scope",
"=",
"None",
",",
"safe",
"=",
"None",
",",
"besteffort",
"=",
"None",
")",
":",
"if",
"scope",
"is",
"None",
":",
"scope",
"=",
"self",
".",
"scope",
"if",
"safe",
"is",
"None",
":",
"safe",
"=",
"self",
".",
"safe",
"if",
"besteffort",
"is",
"None",
":",
"besteffort",
"=",
"self",
".",
"besteffort",
"for",
"category",
"in",
"self",
".",
"values",
"(",
")",
":",
"for",
"param",
"in",
"category",
".",
"values",
"(",
")",
":",
"param",
".",
"resolve",
"(",
"configurable",
"=",
"configurable",
",",
"conf",
"=",
"self",
",",
"scope",
"=",
"scope",
",",
"safe",
"=",
"safe",
",",
"besteffort",
"=",
"besteffort",
")"
] | Resolve all parameters.
:param Configurable configurable: configurable to use for foreign
parameter resolution.
:param dict scope: variables to use for parameter expression evaluation.
:param bool safe: safe execution (remove builtins functions).
:raises: Parameter.Error for any raised exception. | [
"Resolve",
"all",
"parameters",
"."
] | 18dd6d5d6560f9b202793739e2330a2181163511 | https://github.com/b3j0f/conf/blob/18dd6d5d6560f9b202793739e2330a2181163511/b3j0f/conf/model/conf.py#L62-L90 |
250,703 | b3j0f/conf | b3j0f/conf/model/conf.py | Configuration.param | def param(self, pname, cname=None, history=0):
"""Get parameter from a category and history.
:param str pname: parameter name.
:param str cname: category name. Default is the last registered.
:param int history: historical param value from specific category or
final parameter value if cname is not given. For example, if history
equals 1 and cname is None, result is the value defined just before
the last parameter value if exist. If cname is given, the result
is the parameter value defined before the category cname.
:rtype: Parameter
:raises: NameError if pname or cname do not exist."""
result = None
category = None
categories = [] # list of categories containing input parameter name
for cat in self.values():
if pname in cat:
categories.append(cat)
if cname == cat.name:
break
if cname is not None and (
not categories or categories[-1].name != cname
):
raise NameError('Category {0} does not exist.'.format(cname))
categories = categories[:max(1, len(categories) - history)]
for category in categories:
if pname in category:
if result is None:
result = category[pname].copy()
else:
result.update(category[pname])
return result | python | def param(self, pname, cname=None, history=0):
"""Get parameter from a category and history.
:param str pname: parameter name.
:param str cname: category name. Default is the last registered.
:param int history: historical param value from specific category or
final parameter value if cname is not given. For example, if history
equals 1 and cname is None, result is the value defined just before
the last parameter value if exist. If cname is given, the result
is the parameter value defined before the category cname.
:rtype: Parameter
:raises: NameError if pname or cname do not exist."""
result = None
category = None
categories = [] # list of categories containing input parameter name
for cat in self.values():
if pname in cat:
categories.append(cat)
if cname == cat.name:
break
if cname is not None and (
not categories or categories[-1].name != cname
):
raise NameError('Category {0} does not exist.'.format(cname))
categories = categories[:max(1, len(categories) - history)]
for category in categories:
if pname in category:
if result is None:
result = category[pname].copy()
else:
result.update(category[pname])
return result | [
"def",
"param",
"(",
"self",
",",
"pname",
",",
"cname",
"=",
"None",
",",
"history",
"=",
"0",
")",
":",
"result",
"=",
"None",
"category",
"=",
"None",
"categories",
"=",
"[",
"]",
"# list of categories containing input parameter name",
"for",
"cat",
"in",
"self",
".",
"values",
"(",
")",
":",
"if",
"pname",
"in",
"cat",
":",
"categories",
".",
"append",
"(",
"cat",
")",
"if",
"cname",
"==",
"cat",
".",
"name",
":",
"break",
"if",
"cname",
"is",
"not",
"None",
"and",
"(",
"not",
"categories",
"or",
"categories",
"[",
"-",
"1",
"]",
".",
"name",
"!=",
"cname",
")",
":",
"raise",
"NameError",
"(",
"'Category {0} does not exist.'",
".",
"format",
"(",
"cname",
")",
")",
"categories",
"=",
"categories",
"[",
":",
"max",
"(",
"1",
",",
"len",
"(",
"categories",
")",
"-",
"history",
")",
"]",
"for",
"category",
"in",
"categories",
":",
"if",
"pname",
"in",
"category",
":",
"if",
"result",
"is",
"None",
":",
"result",
"=",
"category",
"[",
"pname",
"]",
".",
"copy",
"(",
")",
"else",
":",
"result",
".",
"update",
"(",
"category",
"[",
"pname",
"]",
")",
"return",
"result"
] | Get parameter from a category and history.
:param str pname: parameter name.
:param str cname: category name. Default is the last registered.
:param int history: historical param value from specific category or
final parameter value if cname is not given. For example, if history
equals 1 and cname is None, result is the value defined just before
the last parameter value if exist. If cname is given, the result
is the parameter value defined before the category cname.
:rtype: Parameter
:raises: NameError if pname or cname do not exist. | [
"Get",
"parameter",
"from",
"a",
"category",
"and",
"history",
"."
] | 18dd6d5d6560f9b202793739e2330a2181163511 | https://github.com/b3j0f/conf/blob/18dd6d5d6560f9b202793739e2330a2181163511/b3j0f/conf/model/conf.py#L92-L135 |
250,704 | tomekwojcik/flask-htauth | flask_htauth/htpasswd.py | apache_md5crypt | def apache_md5crypt(password, salt, magic='$apr1$'):
"""
Calculates the Apache-style MD5 hash of a password
"""
password = password.encode('utf-8')
salt = salt.encode('utf-8')
magic = magic.encode('utf-8')
m = md5()
m.update(password + magic + salt)
mixin = md5(password + salt + password).digest()
for i in range(0, len(password)):
m.update(mixin[i % 16])
i = len(password)
while i:
if i & 1:
m.update('\x00')
else:
m.update(password[0])
i >>= 1
final = m.digest()
for i in range(1000):
m2 = md5()
if i & 1:
m2.update(password)
else:
m2.update(final)
if i % 3:
m2.update(salt)
if i % 7:
m2.update(password)
if i & 1:
m2.update(final)
else:
m2.update(password)
final = m2.digest()
itoa64 = './0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
rearranged = ''
seq = ((0, 6, 12), (1, 7, 13), (2, 8, 14), (3, 9, 15), (4, 10, 5))
for a, b, c in seq:
v = ord(final[a]) << 16 | ord(final[b]) << 8 | ord(final[c])
for i in range(4):
rearranged += itoa64[v & 0x3f]
v >>= 6
v = ord(final[11])
for i in range(2):
rearranged += itoa64[v & 0x3f]
v >>= 6
return magic + salt + '$' + rearranged | python | def apache_md5crypt(password, salt, magic='$apr1$'):
"""
Calculates the Apache-style MD5 hash of a password
"""
password = password.encode('utf-8')
salt = salt.encode('utf-8')
magic = magic.encode('utf-8')
m = md5()
m.update(password + magic + salt)
mixin = md5(password + salt + password).digest()
for i in range(0, len(password)):
m.update(mixin[i % 16])
i = len(password)
while i:
if i & 1:
m.update('\x00')
else:
m.update(password[0])
i >>= 1
final = m.digest()
for i in range(1000):
m2 = md5()
if i & 1:
m2.update(password)
else:
m2.update(final)
if i % 3:
m2.update(salt)
if i % 7:
m2.update(password)
if i & 1:
m2.update(final)
else:
m2.update(password)
final = m2.digest()
itoa64 = './0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
rearranged = ''
seq = ((0, 6, 12), (1, 7, 13), (2, 8, 14), (3, 9, 15), (4, 10, 5))
for a, b, c in seq:
v = ord(final[a]) << 16 | ord(final[b]) << 8 | ord(final[c])
for i in range(4):
rearranged += itoa64[v & 0x3f]
v >>= 6
v = ord(final[11])
for i in range(2):
rearranged += itoa64[v & 0x3f]
v >>= 6
return magic + salt + '$' + rearranged | [
"def",
"apache_md5crypt",
"(",
"password",
",",
"salt",
",",
"magic",
"=",
"'$apr1$'",
")",
":",
"password",
"=",
"password",
".",
"encode",
"(",
"'utf-8'",
")",
"salt",
"=",
"salt",
".",
"encode",
"(",
"'utf-8'",
")",
"magic",
"=",
"magic",
".",
"encode",
"(",
"'utf-8'",
")",
"m",
"=",
"md5",
"(",
")",
"m",
".",
"update",
"(",
"password",
"+",
"magic",
"+",
"salt",
")",
"mixin",
"=",
"md5",
"(",
"password",
"+",
"salt",
"+",
"password",
")",
".",
"digest",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"password",
")",
")",
":",
"m",
".",
"update",
"(",
"mixin",
"[",
"i",
"%",
"16",
"]",
")",
"i",
"=",
"len",
"(",
"password",
")",
"while",
"i",
":",
"if",
"i",
"&",
"1",
":",
"m",
".",
"update",
"(",
"'\\x00'",
")",
"else",
":",
"m",
".",
"update",
"(",
"password",
"[",
"0",
"]",
")",
"i",
">>=",
"1",
"final",
"=",
"m",
".",
"digest",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"1000",
")",
":",
"m2",
"=",
"md5",
"(",
")",
"if",
"i",
"&",
"1",
":",
"m2",
".",
"update",
"(",
"password",
")",
"else",
":",
"m2",
".",
"update",
"(",
"final",
")",
"if",
"i",
"%",
"3",
":",
"m2",
".",
"update",
"(",
"salt",
")",
"if",
"i",
"%",
"7",
":",
"m2",
".",
"update",
"(",
"password",
")",
"if",
"i",
"&",
"1",
":",
"m2",
".",
"update",
"(",
"final",
")",
"else",
":",
"m2",
".",
"update",
"(",
"password",
")",
"final",
"=",
"m2",
".",
"digest",
"(",
")",
"itoa64",
"=",
"'./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'",
"rearranged",
"=",
"''",
"seq",
"=",
"(",
"(",
"0",
",",
"6",
",",
"12",
")",
",",
"(",
"1",
",",
"7",
",",
"13",
")",
",",
"(",
"2",
",",
"8",
",",
"14",
")",
",",
"(",
"3",
",",
"9",
",",
"15",
")",
",",
"(",
"4",
",",
"10",
",",
"5",
")",
")",
"for",
"a",
",",
"b",
",",
"c",
"in",
"seq",
":",
"v",
"=",
"ord",
"(",
"final",
"[",
"a",
"]",
")",
"<<",
"16",
"|",
"ord",
"(",
"final",
"[",
"b",
"]",
")",
"<<",
"8",
"|",
"ord",
"(",
"final",
"[",
"c",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"4",
")",
":",
"rearranged",
"+=",
"itoa64",
"[",
"v",
"&",
"0x3f",
"]",
"v",
">>=",
"6",
"v",
"=",
"ord",
"(",
"final",
"[",
"11",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"rearranged",
"+=",
"itoa64",
"[",
"v",
"&",
"0x3f",
"]",
"v",
">>=",
"6",
"return",
"magic",
"+",
"salt",
"+",
"'$'",
"+",
"rearranged"
] | Calculates the Apache-style MD5 hash of a password | [
"Calculates",
"the",
"Apache",
"-",
"style",
"MD5",
"hash",
"of",
"a",
"password"
] | bb89bee3fa7d88de3147ae338048624e01de710b | https://github.com/tomekwojcik/flask-htauth/blob/bb89bee3fa7d88de3147ae338048624e01de710b/flask_htauth/htpasswd.py#L10-L70 |
250,705 | jut-io/jut-python-tools | jut/api/data_engine.py | get_juttle_data_url | def get_juttle_data_url(deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
return the juttle data url
"""
return get_data_url(deployment_name,
endpoint_type='juttle',
app_url=app_url,
token_manager=token_manager) | python | def get_juttle_data_url(deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
return the juttle data url
"""
return get_data_url(deployment_name,
endpoint_type='juttle',
app_url=app_url,
token_manager=token_manager) | [
"def",
"get_juttle_data_url",
"(",
"deployment_name",
",",
"token_manager",
"=",
"None",
",",
"app_url",
"=",
"defaults",
".",
"APP_URL",
")",
":",
"return",
"get_data_url",
"(",
"deployment_name",
",",
"endpoint_type",
"=",
"'juttle'",
",",
"app_url",
"=",
"app_url",
",",
"token_manager",
"=",
"token_manager",
")"
] | return the juttle data url | [
"return",
"the",
"juttle",
"data",
"url"
] | 65574d23f51a7bbced9bb25010d02da5ca5d906f | https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/api/data_engine.py#L79-L89 |
250,706 | jut-io/jut-python-tools | jut/api/data_engine.py | get_import_data_url | def get_import_data_url(deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
return the import data url
"""
return get_data_url(deployment_name,
endpoint_type='http-import',
app_url=app_url,
token_manager=token_manager) | python | def get_import_data_url(deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
return the import data url
"""
return get_data_url(deployment_name,
endpoint_type='http-import',
app_url=app_url,
token_manager=token_manager) | [
"def",
"get_import_data_url",
"(",
"deployment_name",
",",
"token_manager",
"=",
"None",
",",
"app_url",
"=",
"defaults",
".",
"APP_URL",
")",
":",
"return",
"get_data_url",
"(",
"deployment_name",
",",
"endpoint_type",
"=",
"'http-import'",
",",
"app_url",
"=",
"app_url",
",",
"token_manager",
"=",
"token_manager",
")"
] | return the import data url | [
"return",
"the",
"import",
"data",
"url"
] | 65574d23f51a7bbced9bb25010d02da5ca5d906f | https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/api/data_engine.py#L92-L102 |
250,707 | jut-io/jut-python-tools | jut/api/data_engine.py | __wss_connect | def __wss_connect(data_url,
token_manager,
job_id=None):
"""
Establish the websocket connection to the data engine. When job_id is
provided we're basically establishing a websocket to an existing
program that was already started using the jobs API
job_id: job id of a running program
"""
url = '%s/api/v1/juttle/channel' % data_url.replace('https://', 'wss://')
token_obj = {
"accessToken": token_manager.get_access_token()
}
if job_id != None:
token_obj['job_id'] = job_id
if is_debug_enabled():
debug("connecting to %s", url)
websocket = create_connection(url)
websocket.settimeout(10)
if is_debug_enabled():
debug("sent %s", json.dumps(token_obj))
websocket.send(json.dumps(token_obj))
return websocket | python | def __wss_connect(data_url,
token_manager,
job_id=None):
"""
Establish the websocket connection to the data engine. When job_id is
provided we're basically establishing a websocket to an existing
program that was already started using the jobs API
job_id: job id of a running program
"""
url = '%s/api/v1/juttle/channel' % data_url.replace('https://', 'wss://')
token_obj = {
"accessToken": token_manager.get_access_token()
}
if job_id != None:
token_obj['job_id'] = job_id
if is_debug_enabled():
debug("connecting to %s", url)
websocket = create_connection(url)
websocket.settimeout(10)
if is_debug_enabled():
debug("sent %s", json.dumps(token_obj))
websocket.send(json.dumps(token_obj))
return websocket | [
"def",
"__wss_connect",
"(",
"data_url",
",",
"token_manager",
",",
"job_id",
"=",
"None",
")",
":",
"url",
"=",
"'%s/api/v1/juttle/channel'",
"%",
"data_url",
".",
"replace",
"(",
"'https://'",
",",
"'wss://'",
")",
"token_obj",
"=",
"{",
"\"accessToken\"",
":",
"token_manager",
".",
"get_access_token",
"(",
")",
"}",
"if",
"job_id",
"!=",
"None",
":",
"token_obj",
"[",
"'job_id'",
"]",
"=",
"job_id",
"if",
"is_debug_enabled",
"(",
")",
":",
"debug",
"(",
"\"connecting to %s\"",
",",
"url",
")",
"websocket",
"=",
"create_connection",
"(",
"url",
")",
"websocket",
".",
"settimeout",
"(",
"10",
")",
"if",
"is_debug_enabled",
"(",
")",
":",
"debug",
"(",
"\"sent %s\"",
",",
"json",
".",
"dumps",
"(",
"token_obj",
")",
")",
"websocket",
".",
"send",
"(",
"json",
".",
"dumps",
"(",
"token_obj",
")",
")",
"return",
"websocket"
] | Establish the websocket connection to the data engine. When job_id is
provided we're basically establishing a websocket to an existing
program that was already started using the jobs API
job_id: job id of a running program | [
"Establish",
"the",
"websocket",
"connection",
"to",
"the",
"data",
"engine",
".",
"When",
"job_id",
"is",
"provided",
"we",
"re",
"basically",
"establishing",
"a",
"websocket",
"to",
"an",
"existing",
"program",
"that",
"was",
"already",
"started",
"using",
"the",
"jobs",
"API"
] | 65574d23f51a7bbced9bb25010d02da5ca5d906f | https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/api/data_engine.py#L124-L153 |
250,708 | jut-io/jut-python-tools | jut/api/data_engine.py | connect_job | def connect_job(job_id,
deployment_name,
token_manager=None,
app_url=defaults.APP_URL,
persist=False,
websocket=None,
data_url=None):
"""
connect to a running Juttle program by job_id
"""
if data_url == None:
data_url = get_data_url_for_job(job_id,
deployment_name,
token_manager=token_manager,
app_url=app_url)
if websocket == None:
websocket = __wss_connect(data_url,
token_manager,
job_id=job_id)
pong = json.dumps({
'pong': True
})
if not persist:
job_finished = False
while not job_finished:
try:
data = websocket.recv()
if data:
payload = json.loads(data)
if is_debug_enabled():
printable_payload = dict(payload)
if 'points' in payload:
# don't want to print out all the outputs when in
# debug mode
del printable_payload['points']
printable_payload['points'] = 'NOT SHOWN'
debug('received %s' % json.dumps(printable_payload))
if 'ping' in payload.keys():
# ping/pong (ie heartbeat) mechanism
websocket.send(pong)
if is_debug_enabled():
debug('sent %s' % json.dumps(pong))
if 'job_end' in payload.keys() and payload['job_end'] == True:
job_finished = True
if token_manager.is_access_token_expired():
debug('refreshing access token')
token_obj = {
"accessToken": token_manager.get_access_token()
}
# refresh authentication token
websocket.send(json.dumps(token_obj))
if 'error' in payload:
if payload['error'] == 'NONEXISTENT-JOB':
raise JutException('Job "%s" no longer running' % job_id)
# return all channel messages
yield payload
else:
debug('payload was "%s", forcing websocket reconnect' % data)
raise IOError()
except IOError:
if is_debug_enabled():
traceback.print_exc()
#
# We'll retry for just under 30s since internally we stop
# running non persistent programs after 30s of not heartbeating
# with the client
#
retry = 1
while retry <= 5:
try:
debug('network error reconnecting to job %s, '
'try %s of 5' % (job_id, retry))
websocket = __wss_connect(data_url, token_manager, job_id=job_id)
break
except socket.error:
if is_debug_enabled():
traceback.print_exc()
retry += 1
time.sleep(5)
debug('network error reconnecting to job %s, '
'try %s of 5' % (job_id, retry))
websocket = __wss_connect(data_url, token_manager, job_id=job_id)
websocket.close() | python | def connect_job(job_id,
deployment_name,
token_manager=None,
app_url=defaults.APP_URL,
persist=False,
websocket=None,
data_url=None):
"""
connect to a running Juttle program by job_id
"""
if data_url == None:
data_url = get_data_url_for_job(job_id,
deployment_name,
token_manager=token_manager,
app_url=app_url)
if websocket == None:
websocket = __wss_connect(data_url,
token_manager,
job_id=job_id)
pong = json.dumps({
'pong': True
})
if not persist:
job_finished = False
while not job_finished:
try:
data = websocket.recv()
if data:
payload = json.loads(data)
if is_debug_enabled():
printable_payload = dict(payload)
if 'points' in payload:
# don't want to print out all the outputs when in
# debug mode
del printable_payload['points']
printable_payload['points'] = 'NOT SHOWN'
debug('received %s' % json.dumps(printable_payload))
if 'ping' in payload.keys():
# ping/pong (ie heartbeat) mechanism
websocket.send(pong)
if is_debug_enabled():
debug('sent %s' % json.dumps(pong))
if 'job_end' in payload.keys() and payload['job_end'] == True:
job_finished = True
if token_manager.is_access_token_expired():
debug('refreshing access token')
token_obj = {
"accessToken": token_manager.get_access_token()
}
# refresh authentication token
websocket.send(json.dumps(token_obj))
if 'error' in payload:
if payload['error'] == 'NONEXISTENT-JOB':
raise JutException('Job "%s" no longer running' % job_id)
# return all channel messages
yield payload
else:
debug('payload was "%s", forcing websocket reconnect' % data)
raise IOError()
except IOError:
if is_debug_enabled():
traceback.print_exc()
#
# We'll retry for just under 30s since internally we stop
# running non persistent programs after 30s of not heartbeating
# with the client
#
retry = 1
while retry <= 5:
try:
debug('network error reconnecting to job %s, '
'try %s of 5' % (job_id, retry))
websocket = __wss_connect(data_url, token_manager, job_id=job_id)
break
except socket.error:
if is_debug_enabled():
traceback.print_exc()
retry += 1
time.sleep(5)
debug('network error reconnecting to job %s, '
'try %s of 5' % (job_id, retry))
websocket = __wss_connect(data_url, token_manager, job_id=job_id)
websocket.close() | [
"def",
"connect_job",
"(",
"job_id",
",",
"deployment_name",
",",
"token_manager",
"=",
"None",
",",
"app_url",
"=",
"defaults",
".",
"APP_URL",
",",
"persist",
"=",
"False",
",",
"websocket",
"=",
"None",
",",
"data_url",
"=",
"None",
")",
":",
"if",
"data_url",
"==",
"None",
":",
"data_url",
"=",
"get_data_url_for_job",
"(",
"job_id",
",",
"deployment_name",
",",
"token_manager",
"=",
"token_manager",
",",
"app_url",
"=",
"app_url",
")",
"if",
"websocket",
"==",
"None",
":",
"websocket",
"=",
"__wss_connect",
"(",
"data_url",
",",
"token_manager",
",",
"job_id",
"=",
"job_id",
")",
"pong",
"=",
"json",
".",
"dumps",
"(",
"{",
"'pong'",
":",
"True",
"}",
")",
"if",
"not",
"persist",
":",
"job_finished",
"=",
"False",
"while",
"not",
"job_finished",
":",
"try",
":",
"data",
"=",
"websocket",
".",
"recv",
"(",
")",
"if",
"data",
":",
"payload",
"=",
"json",
".",
"loads",
"(",
"data",
")",
"if",
"is_debug_enabled",
"(",
")",
":",
"printable_payload",
"=",
"dict",
"(",
"payload",
")",
"if",
"'points'",
"in",
"payload",
":",
"# don't want to print out all the outputs when in",
"# debug mode",
"del",
"printable_payload",
"[",
"'points'",
"]",
"printable_payload",
"[",
"'points'",
"]",
"=",
"'NOT SHOWN'",
"debug",
"(",
"'received %s'",
"%",
"json",
".",
"dumps",
"(",
"printable_payload",
")",
")",
"if",
"'ping'",
"in",
"payload",
".",
"keys",
"(",
")",
":",
"# ping/pong (ie heartbeat) mechanism",
"websocket",
".",
"send",
"(",
"pong",
")",
"if",
"is_debug_enabled",
"(",
")",
":",
"debug",
"(",
"'sent %s'",
"%",
"json",
".",
"dumps",
"(",
"pong",
")",
")",
"if",
"'job_end'",
"in",
"payload",
".",
"keys",
"(",
")",
"and",
"payload",
"[",
"'job_end'",
"]",
"==",
"True",
":",
"job_finished",
"=",
"True",
"if",
"token_manager",
".",
"is_access_token_expired",
"(",
")",
":",
"debug",
"(",
"'refreshing access token'",
")",
"token_obj",
"=",
"{",
"\"accessToken\"",
":",
"token_manager",
".",
"get_access_token",
"(",
")",
"}",
"# refresh authentication token",
"websocket",
".",
"send",
"(",
"json",
".",
"dumps",
"(",
"token_obj",
")",
")",
"if",
"'error'",
"in",
"payload",
":",
"if",
"payload",
"[",
"'error'",
"]",
"==",
"'NONEXISTENT-JOB'",
":",
"raise",
"JutException",
"(",
"'Job \"%s\" no longer running'",
"%",
"job_id",
")",
"# return all channel messages",
"yield",
"payload",
"else",
":",
"debug",
"(",
"'payload was \"%s\", forcing websocket reconnect'",
"%",
"data",
")",
"raise",
"IOError",
"(",
")",
"except",
"IOError",
":",
"if",
"is_debug_enabled",
"(",
")",
":",
"traceback",
".",
"print_exc",
"(",
")",
"#",
"# We'll retry for just under 30s since internally we stop",
"# running non persistent programs after 30s of not heartbeating",
"# with the client",
"#",
"retry",
"=",
"1",
"while",
"retry",
"<=",
"5",
":",
"try",
":",
"debug",
"(",
"'network error reconnecting to job %s, '",
"'try %s of 5'",
"%",
"(",
"job_id",
",",
"retry",
")",
")",
"websocket",
"=",
"__wss_connect",
"(",
"data_url",
",",
"token_manager",
",",
"job_id",
"=",
"job_id",
")",
"break",
"except",
"socket",
".",
"error",
":",
"if",
"is_debug_enabled",
"(",
")",
":",
"traceback",
".",
"print_exc",
"(",
")",
"retry",
"+=",
"1",
"time",
".",
"sleep",
"(",
"5",
")",
"debug",
"(",
"'network error reconnecting to job %s, '",
"'try %s of 5'",
"%",
"(",
"job_id",
",",
"retry",
")",
")",
"websocket",
"=",
"__wss_connect",
"(",
"data_url",
",",
"token_manager",
",",
"job_id",
"=",
"job_id",
")",
"websocket",
".",
"close",
"(",
")"
] | connect to a running Juttle program by job_id | [
"connect",
"to",
"a",
"running",
"Juttle",
"program",
"by",
"job_id"
] | 65574d23f51a7bbced9bb25010d02da5ca5d906f | https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/api/data_engine.py#L156-L260 |
250,709 | jut-io/jut-python-tools | jut/api/data_engine.py | get_jobs | def get_jobs(deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
return list of currently running jobs
"""
headers = token_manager.get_access_token_headers()
data_urls = get_data_urls(deployment_name,
app_url=app_url,
token_manager=token_manager)
jobs = []
for data_url in data_urls:
url = '%s/api/v1/jobs' % data_url
response = requests.get(url, headers=headers)
if response.status_code == 200:
# saving the data_url for the specific job so you know where to
# connect if you want to interact with that job
these_jobs = response.json()['jobs']
for job in these_jobs:
job['data_url'] = data_url
jobs += these_jobs
else:
raise JutException('Error %s: %s' % (response.status_code, response.text))
return jobs | python | def get_jobs(deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
return list of currently running jobs
"""
headers = token_manager.get_access_token_headers()
data_urls = get_data_urls(deployment_name,
app_url=app_url,
token_manager=token_manager)
jobs = []
for data_url in data_urls:
url = '%s/api/v1/jobs' % data_url
response = requests.get(url, headers=headers)
if response.status_code == 200:
# saving the data_url for the specific job so you know where to
# connect if you want to interact with that job
these_jobs = response.json()['jobs']
for job in these_jobs:
job['data_url'] = data_url
jobs += these_jobs
else:
raise JutException('Error %s: %s' % (response.status_code, response.text))
return jobs | [
"def",
"get_jobs",
"(",
"deployment_name",
",",
"token_manager",
"=",
"None",
",",
"app_url",
"=",
"defaults",
".",
"APP_URL",
")",
":",
"headers",
"=",
"token_manager",
".",
"get_access_token_headers",
"(",
")",
"data_urls",
"=",
"get_data_urls",
"(",
"deployment_name",
",",
"app_url",
"=",
"app_url",
",",
"token_manager",
"=",
"token_manager",
")",
"jobs",
"=",
"[",
"]",
"for",
"data_url",
"in",
"data_urls",
":",
"url",
"=",
"'%s/api/v1/jobs'",
"%",
"data_url",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"headers",
")",
"if",
"response",
".",
"status_code",
"==",
"200",
":",
"# saving the data_url for the specific job so you know where to",
"# connect if you want to interact with that job",
"these_jobs",
"=",
"response",
".",
"json",
"(",
")",
"[",
"'jobs'",
"]",
"for",
"job",
"in",
"these_jobs",
":",
"job",
"[",
"'data_url'",
"]",
"=",
"data_url",
"jobs",
"+=",
"these_jobs",
"else",
":",
"raise",
"JutException",
"(",
"'Error %s: %s'",
"%",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
")",
"return",
"jobs"
] | return list of currently running jobs | [
"return",
"list",
"of",
"currently",
"running",
"jobs"
] | 65574d23f51a7bbced9bb25010d02da5ca5d906f | https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/api/data_engine.py#L395-L425 |
250,710 | jut-io/jut-python-tools | jut/api/data_engine.py | get_job_details | def get_job_details(job_id,
deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
return job details for a specific job id
"""
jobs = get_jobs(deployment_name,
token_manager=token_manager,
app_url=app_url)
for job in jobs:
if job['id'] == job_id:
return job
raise JutException('Unable to find job with id "%s"' % job_id) | python | def get_job_details(job_id,
deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
return job details for a specific job id
"""
jobs = get_jobs(deployment_name,
token_manager=token_manager,
app_url=app_url)
for job in jobs:
if job['id'] == job_id:
return job
raise JutException('Unable to find job with id "%s"' % job_id) | [
"def",
"get_job_details",
"(",
"job_id",
",",
"deployment_name",
",",
"token_manager",
"=",
"None",
",",
"app_url",
"=",
"defaults",
".",
"APP_URL",
")",
":",
"jobs",
"=",
"get_jobs",
"(",
"deployment_name",
",",
"token_manager",
"=",
"token_manager",
",",
"app_url",
"=",
"app_url",
")",
"for",
"job",
"in",
"jobs",
":",
"if",
"job",
"[",
"'id'",
"]",
"==",
"job_id",
":",
"return",
"job",
"raise",
"JutException",
"(",
"'Unable to find job with id \"%s\"'",
"%",
"job_id",
")"
] | return job details for a specific job id | [
"return",
"job",
"details",
"for",
"a",
"specific",
"job",
"id"
] | 65574d23f51a7bbced9bb25010d02da5ca5d906f | https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/api/data_engine.py#L428-L445 |
250,711 | jut-io/jut-python-tools | jut/api/data_engine.py | delete_job | def delete_job(job_id,
deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
delete a job with a specific job id
"""
headers = token_manager.get_access_token_headers()
data_url = get_data_url_for_job(job_id,
deployment_name,
token_manager=token_manager,
app_url=app_url)
url = '%s/api/v1/jobs/%s' % (data_url, job_id)
response = requests.delete(url, headers=headers)
if response.status_code != 200:
raise JutException('Error %s: %s' % (response.status_code, response.text)) | python | def delete_job(job_id,
deployment_name,
token_manager=None,
app_url=defaults.APP_URL):
"""
delete a job with a specific job id
"""
headers = token_manager.get_access_token_headers()
data_url = get_data_url_for_job(job_id,
deployment_name,
token_manager=token_manager,
app_url=app_url)
url = '%s/api/v1/jobs/%s' % (data_url, job_id)
response = requests.delete(url, headers=headers)
if response.status_code != 200:
raise JutException('Error %s: %s' % (response.status_code, response.text)) | [
"def",
"delete_job",
"(",
"job_id",
",",
"deployment_name",
",",
"token_manager",
"=",
"None",
",",
"app_url",
"=",
"defaults",
".",
"APP_URL",
")",
":",
"headers",
"=",
"token_manager",
".",
"get_access_token_headers",
"(",
")",
"data_url",
"=",
"get_data_url_for_job",
"(",
"job_id",
",",
"deployment_name",
",",
"token_manager",
"=",
"token_manager",
",",
"app_url",
"=",
"app_url",
")",
"url",
"=",
"'%s/api/v1/jobs/%s'",
"%",
"(",
"data_url",
",",
"job_id",
")",
"response",
"=",
"requests",
".",
"delete",
"(",
"url",
",",
"headers",
"=",
"headers",
")",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"raise",
"JutException",
"(",
"'Error %s: %s'",
"%",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"text",
")",
")"
] | delete a job with a specific job id | [
"delete",
"a",
"job",
"with",
"a",
"specific",
"job",
"id"
] | 65574d23f51a7bbced9bb25010d02da5ca5d906f | https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/api/data_engine.py#L448-L466 |
250,712 | abalkin/tz | tzdata-pkg/zic/zic.py | lines | def lines(input):
"""Remove comments and empty lines"""
for raw_line in input:
line = raw_line.strip()
if line and not line.startswith('#'):
yield strip_comments(line) | python | def lines(input):
"""Remove comments and empty lines"""
for raw_line in input:
line = raw_line.strip()
if line and not line.startswith('#'):
yield strip_comments(line) | [
"def",
"lines",
"(",
"input",
")",
":",
"for",
"raw_line",
"in",
"input",
":",
"line",
"=",
"raw_line",
".",
"strip",
"(",
")",
"if",
"line",
"and",
"not",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"yield",
"strip_comments",
"(",
"line",
")"
] | Remove comments and empty lines | [
"Remove",
"comments",
"and",
"empty",
"lines"
] | f25fca6afbf1abd46fd7aeb978282823c7dab5ab | https://github.com/abalkin/tz/blob/f25fca6afbf1abd46fd7aeb978282823c7dab5ab/tzdata-pkg/zic/zic.py#L20-L25 |
250,713 | cirruscluster/cirruscluster | cirruscluster/ext/ansible/runner/connection_plugins/local.py | Connection.exec_command | def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh'):
''' run a command on the local host '''
if not self.runner.sudo or not sudoable:
if executable:
local_cmd = [executable, '-c', cmd]
else:
local_cmd = cmd
else:
local_cmd, prompt = utils.make_sudo_cmd(sudo_user, executable, cmd)
vvv("EXEC %s" % (local_cmd), host=self.host)
p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
cwd=self.runner.basedir, executable=executable or None,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if self.runner.sudo and sudoable and self.runner.sudo_pass:
fcntl.fcntl(p.stdout, fcntl.F_SETFL,
fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL,
fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
sudo_output = ''
while not sudo_output.endswith(prompt):
rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
[p.stdout, p.stderr], self.runner.timeout)
if p.stdout in rfd:
chunk = p.stdout.read()
elif p.stderr in rfd:
chunk = p.stderr.read()
else:
stdout, stderr = p.communicate()
raise errors.AnsibleError('timeout waiting for sudo password prompt:\n' + sudo_output)
if not chunk:
stdout, stderr = p.communicate()
raise errors.AnsibleError('sudo output closed while waiting for password prompt:\n' + sudo_output)
sudo_output += chunk
p.stdin.write(self.runner.sudo_pass + '\n')
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
stdout, stderr = p.communicate()
return (p.returncode, '', stdout, stderr) | python | def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh'):
''' run a command on the local host '''
if not self.runner.sudo or not sudoable:
if executable:
local_cmd = [executable, '-c', cmd]
else:
local_cmd = cmd
else:
local_cmd, prompt = utils.make_sudo_cmd(sudo_user, executable, cmd)
vvv("EXEC %s" % (local_cmd), host=self.host)
p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
cwd=self.runner.basedir, executable=executable or None,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if self.runner.sudo and sudoable and self.runner.sudo_pass:
fcntl.fcntl(p.stdout, fcntl.F_SETFL,
fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL,
fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
sudo_output = ''
while not sudo_output.endswith(prompt):
rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
[p.stdout, p.stderr], self.runner.timeout)
if p.stdout in rfd:
chunk = p.stdout.read()
elif p.stderr in rfd:
chunk = p.stderr.read()
else:
stdout, stderr = p.communicate()
raise errors.AnsibleError('timeout waiting for sudo password prompt:\n' + sudo_output)
if not chunk:
stdout, stderr = p.communicate()
raise errors.AnsibleError('sudo output closed while waiting for password prompt:\n' + sudo_output)
sudo_output += chunk
p.stdin.write(self.runner.sudo_pass + '\n')
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
stdout, stderr = p.communicate()
return (p.returncode, '', stdout, stderr) | [
"def",
"exec_command",
"(",
"self",
",",
"cmd",
",",
"tmp_path",
",",
"sudo_user",
",",
"sudoable",
"=",
"False",
",",
"executable",
"=",
"'/bin/sh'",
")",
":",
"if",
"not",
"self",
".",
"runner",
".",
"sudo",
"or",
"not",
"sudoable",
":",
"if",
"executable",
":",
"local_cmd",
"=",
"[",
"executable",
",",
"'-c'",
",",
"cmd",
"]",
"else",
":",
"local_cmd",
"=",
"cmd",
"else",
":",
"local_cmd",
",",
"prompt",
"=",
"utils",
".",
"make_sudo_cmd",
"(",
"sudo_user",
",",
"executable",
",",
"cmd",
")",
"vvv",
"(",
"\"EXEC %s\"",
"%",
"(",
"local_cmd",
")",
",",
"host",
"=",
"self",
".",
"host",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"local_cmd",
",",
"shell",
"=",
"isinstance",
"(",
"local_cmd",
",",
"basestring",
")",
",",
"cwd",
"=",
"self",
".",
"runner",
".",
"basedir",
",",
"executable",
"=",
"executable",
"or",
"None",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"if",
"self",
".",
"runner",
".",
"sudo",
"and",
"sudoable",
"and",
"self",
".",
"runner",
".",
"sudo_pass",
":",
"fcntl",
".",
"fcntl",
"(",
"p",
".",
"stdout",
",",
"fcntl",
".",
"F_SETFL",
",",
"fcntl",
".",
"fcntl",
"(",
"p",
".",
"stdout",
",",
"fcntl",
".",
"F_GETFL",
")",
"|",
"os",
".",
"O_NONBLOCK",
")",
"fcntl",
".",
"fcntl",
"(",
"p",
".",
"stderr",
",",
"fcntl",
".",
"F_SETFL",
",",
"fcntl",
".",
"fcntl",
"(",
"p",
".",
"stderr",
",",
"fcntl",
".",
"F_GETFL",
")",
"|",
"os",
".",
"O_NONBLOCK",
")",
"sudo_output",
"=",
"''",
"while",
"not",
"sudo_output",
".",
"endswith",
"(",
"prompt",
")",
":",
"rfd",
",",
"wfd",
",",
"efd",
"=",
"select",
".",
"select",
"(",
"[",
"p",
".",
"stdout",
",",
"p",
".",
"stderr",
"]",
",",
"[",
"]",
",",
"[",
"p",
".",
"stdout",
",",
"p",
".",
"stderr",
"]",
",",
"self",
".",
"runner",
".",
"timeout",
")",
"if",
"p",
".",
"stdout",
"in",
"rfd",
":",
"chunk",
"=",
"p",
".",
"stdout",
".",
"read",
"(",
")",
"elif",
"p",
".",
"stderr",
"in",
"rfd",
":",
"chunk",
"=",
"p",
".",
"stderr",
".",
"read",
"(",
")",
"else",
":",
"stdout",
",",
"stderr",
"=",
"p",
".",
"communicate",
"(",
")",
"raise",
"errors",
".",
"AnsibleError",
"(",
"'timeout waiting for sudo password prompt:\\n'",
"+",
"sudo_output",
")",
"if",
"not",
"chunk",
":",
"stdout",
",",
"stderr",
"=",
"p",
".",
"communicate",
"(",
")",
"raise",
"errors",
".",
"AnsibleError",
"(",
"'sudo output closed while waiting for password prompt:\\n'",
"+",
"sudo_output",
")",
"sudo_output",
"+=",
"chunk",
"p",
".",
"stdin",
".",
"write",
"(",
"self",
".",
"runner",
".",
"sudo_pass",
"+",
"'\\n'",
")",
"fcntl",
".",
"fcntl",
"(",
"p",
".",
"stdout",
",",
"fcntl",
".",
"F_SETFL",
",",
"fcntl",
".",
"fcntl",
"(",
"p",
".",
"stdout",
",",
"fcntl",
".",
"F_GETFL",
")",
"&",
"~",
"os",
".",
"O_NONBLOCK",
")",
"fcntl",
".",
"fcntl",
"(",
"p",
".",
"stderr",
",",
"fcntl",
".",
"F_SETFL",
",",
"fcntl",
".",
"fcntl",
"(",
"p",
".",
"stderr",
",",
"fcntl",
".",
"F_GETFL",
")",
"&",
"~",
"os",
".",
"O_NONBLOCK",
")",
"stdout",
",",
"stderr",
"=",
"p",
".",
"communicate",
"(",
")",
"return",
"(",
"p",
".",
"returncode",
",",
"''",
",",
"stdout",
",",
"stderr",
")"
] | run a command on the local host | [
"run",
"a",
"command",
"on",
"the",
"local",
"host"
] | 977409929dd81322d886425cdced10608117d5d7 | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/connection_plugins/local.py#L43-L85 |
250,714 | cirruscluster/cirruscluster | cirruscluster/ext/ansible/runner/connection_plugins/local.py | Connection.put_file | def put_file(self, in_path, out_path):
''' transfer a file from local to local '''
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
try:
shutil.copyfile(in_path, out_path)
except shutil.Error:
traceback.print_exc()
raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
except IOError:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file to %s" % out_path) | python | def put_file(self, in_path, out_path):
''' transfer a file from local to local '''
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
try:
shutil.copyfile(in_path, out_path)
except shutil.Error:
traceback.print_exc()
raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
except IOError:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file to %s" % out_path) | [
"def",
"put_file",
"(",
"self",
",",
"in_path",
",",
"out_path",
")",
":",
"vvv",
"(",
"\"PUT %s TO %s\"",
"%",
"(",
"in_path",
",",
"out_path",
")",
",",
"host",
"=",
"self",
".",
"host",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"in_path",
")",
":",
"raise",
"errors",
".",
"AnsibleFileNotFound",
"(",
"\"file or module does not exist: %s\"",
"%",
"in_path",
")",
"try",
":",
"shutil",
".",
"copyfile",
"(",
"in_path",
",",
"out_path",
")",
"except",
"shutil",
".",
"Error",
":",
"traceback",
".",
"print_exc",
"(",
")",
"raise",
"errors",
".",
"AnsibleError",
"(",
"\"failed to copy: %s and %s are the same\"",
"%",
"(",
"in_path",
",",
"out_path",
")",
")",
"except",
"IOError",
":",
"traceback",
".",
"print_exc",
"(",
")",
"raise",
"errors",
".",
"AnsibleError",
"(",
"\"failed to transfer file to %s\"",
"%",
"out_path",
")"
] | transfer a file from local to local | [
"transfer",
"a",
"file",
"from",
"local",
"to",
"local"
] | 977409929dd81322d886425cdced10608117d5d7 | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/connection_plugins/local.py#L87-L100 |
250,715 | lipixun/pymime | src/mime/tools/specxmlparser.py | getPythonVarName | def getPythonVarName(name):
"""Get the python variable name
"""
return SUB_REGEX.sub('', name.replace('+', '_').replace('-', '_').replace('.', '_').replace(' ', '').replace('/', '_')).upper() | python | def getPythonVarName(name):
"""Get the python variable name
"""
return SUB_REGEX.sub('', name.replace('+', '_').replace('-', '_').replace('.', '_').replace(' ', '').replace('/', '_')).upper() | [
"def",
"getPythonVarName",
"(",
"name",
")",
":",
"return",
"SUB_REGEX",
".",
"sub",
"(",
"''",
",",
"name",
".",
"replace",
"(",
"'+'",
",",
"'_'",
")",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
".",
"replace",
"(",
"' '",
",",
"''",
")",
".",
"replace",
"(",
"'/'",
",",
"'_'",
")",
")",
".",
"upper",
"(",
")"
] | Get the python variable name | [
"Get",
"the",
"python",
"variable",
"name"
] | 4762cf2e51ba80c21d872f26b8e408b6a6863d26 | https://github.com/lipixun/pymime/blob/4762cf2e51ba80c21d872f26b8e408b6a6863d26/src/mime/tools/specxmlparser.py#L56-L59 |
250,716 | lipixun/pymime | src/mime/tools/specxmlparser.py | Parser.parse | def parse(self, text):
"""Parse the text content
"""
root = ET.fromstring(text)
for elm in root.findall('{http://www.iana.org/assignments}registry'):
for record in elm.findall('{http://www.iana.org/assignments}record'):
for fileElm in record.findall('{http://www.iana.org/assignments}file'):
if fileElm.get('type') == 'template':
mimeType = fileElm.text.strip()
yield mimeType
break | python | def parse(self, text):
"""Parse the text content
"""
root = ET.fromstring(text)
for elm in root.findall('{http://www.iana.org/assignments}registry'):
for record in elm.findall('{http://www.iana.org/assignments}record'):
for fileElm in record.findall('{http://www.iana.org/assignments}file'):
if fileElm.get('type') == 'template':
mimeType = fileElm.text.strip()
yield mimeType
break | [
"def",
"parse",
"(",
"self",
",",
"text",
")",
":",
"root",
"=",
"ET",
".",
"fromstring",
"(",
"text",
")",
"for",
"elm",
"in",
"root",
".",
"findall",
"(",
"'{http://www.iana.org/assignments}registry'",
")",
":",
"for",
"record",
"in",
"elm",
".",
"findall",
"(",
"'{http://www.iana.org/assignments}record'",
")",
":",
"for",
"fileElm",
"in",
"record",
".",
"findall",
"(",
"'{http://www.iana.org/assignments}file'",
")",
":",
"if",
"fileElm",
".",
"get",
"(",
"'type'",
")",
"==",
"'template'",
":",
"mimeType",
"=",
"fileElm",
".",
"text",
".",
"strip",
"(",
")",
"yield",
"mimeType",
"break"
] | Parse the text content | [
"Parse",
"the",
"text",
"content"
] | 4762cf2e51ba80c21d872f26b8e408b6a6863d26 | https://github.com/lipixun/pymime/blob/4762cf2e51ba80c21d872f26b8e408b6a6863d26/src/mime/tools/specxmlparser.py#L32-L42 |
250,717 | lipixun/pymime | src/mime/tools/specxmlparser.py | Parser.parsefile | def parsefile(self, filename):
"""Parse from the file
"""
with open(filename, 'rb') as fd:
return self.parse(fd.read()) | python | def parsefile(self, filename):
"""Parse from the file
"""
with open(filename, 'rb') as fd:
return self.parse(fd.read()) | [
"def",
"parsefile",
"(",
"self",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"fd",
":",
"return",
"self",
".",
"parse",
"(",
"fd",
".",
"read",
"(",
")",
")"
] | Parse from the file | [
"Parse",
"from",
"the",
"file"
] | 4762cf2e51ba80c21d872f26b8e408b6a6863d26 | https://github.com/lipixun/pymime/blob/4762cf2e51ba80c21d872f26b8e408b6a6863d26/src/mime/tools/specxmlparser.py#L44-L48 |
250,718 | FujiMakoto/IPS-Vagrant | ips_vagrant/scrapers/login.py | Login.check | def check(self):
"""
Check if we have an active login session set
@rtype: bool
"""
self.log.debug('Testing for a valid login session')
# If our cookie jar is empty, we obviously don't have a valid login session
if not len(self.cookiejar):
return False
# Test our login session and make sure it's still active
return requests.get(self.TEST_URL, cookies=self.cookiejar).status_code == 200 | python | def check(self):
"""
Check if we have an active login session set
@rtype: bool
"""
self.log.debug('Testing for a valid login session')
# If our cookie jar is empty, we obviously don't have a valid login session
if not len(self.cookiejar):
return False
# Test our login session and make sure it's still active
return requests.get(self.TEST_URL, cookies=self.cookiejar).status_code == 200 | [
"def",
"check",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Testing for a valid login session'",
")",
"# If our cookie jar is empty, we obviously don't have a valid login session",
"if",
"not",
"len",
"(",
"self",
".",
"cookiejar",
")",
":",
"return",
"False",
"# Test our login session and make sure it's still active",
"return",
"requests",
".",
"get",
"(",
"self",
".",
"TEST_URL",
",",
"cookies",
"=",
"self",
".",
"cookiejar",
")",
".",
"status_code",
"==",
"200"
] | Check if we have an active login session set
@rtype: bool | [
"Check",
"if",
"we",
"have",
"an",
"active",
"login",
"session",
"set"
] | 7b1d6d095034dd8befb026d9315ecc6494d52269 | https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/scrapers/login.py#L35-L46 |
250,719 | FujiMakoto/IPS-Vagrant | ips_vagrant/scrapers/login.py | Login.process | def process(self, username, password, remember=True):
"""
Process a login request
@type username: str
@type password: str
@param remember: Save the login session to disk
@type remember: bool
@raise BadLoginException: Login request failed
@return: Session cookies
@rtype: cookielib.LWPCookieJar
"""
self.log.debug('Processing login request')
self.browser.open(self.LOGIN_URL)
self.log.info('Login page loaded: %s', self.browser.title())
self.browser.select_form(nr=0)
# Set the fields
self.log.debug('Username: %s', username)
self.log.debug('Password: %s', (password[0] + '*' * (len(password) - 2) + password[-1]))
self.log.debug('Remember: %s', remember)
self.browser.form[self.USERNAME_FIELD] = username
self.browser.form[self.PASSWORD_FIELD] = password
self.browser.find_control(self.REMEMBER_FIELD).items[0].selected = remember
# Submit the request
self.browser.submit()
self.log.debug('Response code: %s', self.browser.response().code)
self.log.debug('== Cookies ==')
for cookie in self.cookiejar:
self.log.debug(cookie)
self.cookies[cookie.name] = cookie.value
self.log.debug('== End Cookies ==')
# Make sure we successfully logged in
if self.LOGIN_COOKIE not in self.cookies:
raise BadLoginException('No login cookie returned, this probably means an invalid login was provided')
# Should we save our login session?
if remember:
self.log.info('Saving login session to disk')
self.cookiejar.save()
self.log.info('Login request successful')
return self.cookiejar | python | def process(self, username, password, remember=True):
"""
Process a login request
@type username: str
@type password: str
@param remember: Save the login session to disk
@type remember: bool
@raise BadLoginException: Login request failed
@return: Session cookies
@rtype: cookielib.LWPCookieJar
"""
self.log.debug('Processing login request')
self.browser.open(self.LOGIN_URL)
self.log.info('Login page loaded: %s', self.browser.title())
self.browser.select_form(nr=0)
# Set the fields
self.log.debug('Username: %s', username)
self.log.debug('Password: %s', (password[0] + '*' * (len(password) - 2) + password[-1]))
self.log.debug('Remember: %s', remember)
self.browser.form[self.USERNAME_FIELD] = username
self.browser.form[self.PASSWORD_FIELD] = password
self.browser.find_control(self.REMEMBER_FIELD).items[0].selected = remember
# Submit the request
self.browser.submit()
self.log.debug('Response code: %s', self.browser.response().code)
self.log.debug('== Cookies ==')
for cookie in self.cookiejar:
self.log.debug(cookie)
self.cookies[cookie.name] = cookie.value
self.log.debug('== End Cookies ==')
# Make sure we successfully logged in
if self.LOGIN_COOKIE not in self.cookies:
raise BadLoginException('No login cookie returned, this probably means an invalid login was provided')
# Should we save our login session?
if remember:
self.log.info('Saving login session to disk')
self.cookiejar.save()
self.log.info('Login request successful')
return self.cookiejar | [
"def",
"process",
"(",
"self",
",",
"username",
",",
"password",
",",
"remember",
"=",
"True",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Processing login request'",
")",
"self",
".",
"browser",
".",
"open",
"(",
"self",
".",
"LOGIN_URL",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Login page loaded: %s'",
",",
"self",
".",
"browser",
".",
"title",
"(",
")",
")",
"self",
".",
"browser",
".",
"select_form",
"(",
"nr",
"=",
"0",
")",
"# Set the fields",
"self",
".",
"log",
".",
"debug",
"(",
"'Username: %s'",
",",
"username",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'Password: %s'",
",",
"(",
"password",
"[",
"0",
"]",
"+",
"'*'",
"*",
"(",
"len",
"(",
"password",
")",
"-",
"2",
")",
"+",
"password",
"[",
"-",
"1",
"]",
")",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'Remember: %s'",
",",
"remember",
")",
"self",
".",
"browser",
".",
"form",
"[",
"self",
".",
"USERNAME_FIELD",
"]",
"=",
"username",
"self",
".",
"browser",
".",
"form",
"[",
"self",
".",
"PASSWORD_FIELD",
"]",
"=",
"password",
"self",
".",
"browser",
".",
"find_control",
"(",
"self",
".",
"REMEMBER_FIELD",
")",
".",
"items",
"[",
"0",
"]",
".",
"selected",
"=",
"remember",
"# Submit the request",
"self",
".",
"browser",
".",
"submit",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'Response code: %s'",
",",
"self",
".",
"browser",
".",
"response",
"(",
")",
".",
"code",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'== Cookies =='",
")",
"for",
"cookie",
"in",
"self",
".",
"cookiejar",
":",
"self",
".",
"log",
".",
"debug",
"(",
"cookie",
")",
"self",
".",
"cookies",
"[",
"cookie",
".",
"name",
"]",
"=",
"cookie",
".",
"value",
"self",
".",
"log",
".",
"debug",
"(",
"'== End Cookies =='",
")",
"# Make sure we successfully logged in",
"if",
"self",
".",
"LOGIN_COOKIE",
"not",
"in",
"self",
".",
"cookies",
":",
"raise",
"BadLoginException",
"(",
"'No login cookie returned, this probably means an invalid login was provided'",
")",
"# Should we save our login session?",
"if",
"remember",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Saving login session to disk'",
")",
"self",
".",
"cookiejar",
".",
"save",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Login request successful'",
")",
"return",
"self",
".",
"cookiejar"
] | Process a login request
@type username: str
@type password: str
@param remember: Save the login session to disk
@type remember: bool
@raise BadLoginException: Login request failed
@return: Session cookies
@rtype: cookielib.LWPCookieJar | [
"Process",
"a",
"login",
"request"
] | 7b1d6d095034dd8befb026d9315ecc6494d52269 | https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/scrapers/login.py#L48-L94 |
250,720 | WhereSoftwareGoesToDie/pymarquise | marquise/marquise_cffi.py | get_libmarquise_header | def get_libmarquise_header():
"""Read the libmarquise header to extract definitions."""
# Header file is packaged in the same place as the rest of the
# module.
header_path = os.path.join(os.path.dirname(__file__), "marquise.h")
with open(header_path) as header:
libmarquise_header_lines = header.readlines()
libmarquise_header_lines = [ line for line in libmarquise_header_lines if not line.startswith('#include ') and not line.startswith('#define ') ]
libmarquise_header_lines = [ line for line in libmarquise_header_lines if not line.startswith('#include ') ]
# We can't #include glib so FFI doesn't know what a GTree is. Leave it for
# later and let the C compiler resolve it when we call FFI.verify()
libmarquise_header_lines = [ line.replace("GTree *sd_hashes;", "...;") for line in libmarquise_header_lines ]
return ''.join(libmarquise_header_lines) | python | def get_libmarquise_header():
"""Read the libmarquise header to extract definitions."""
# Header file is packaged in the same place as the rest of the
# module.
header_path = os.path.join(os.path.dirname(__file__), "marquise.h")
with open(header_path) as header:
libmarquise_header_lines = header.readlines()
libmarquise_header_lines = [ line for line in libmarquise_header_lines if not line.startswith('#include ') and not line.startswith('#define ') ]
libmarquise_header_lines = [ line for line in libmarquise_header_lines if not line.startswith('#include ') ]
# We can't #include glib so FFI doesn't know what a GTree is. Leave it for
# later and let the C compiler resolve it when we call FFI.verify()
libmarquise_header_lines = [ line.replace("GTree *sd_hashes;", "...;") for line in libmarquise_header_lines ]
return ''.join(libmarquise_header_lines) | [
"def",
"get_libmarquise_header",
"(",
")",
":",
"# Header file is packaged in the same place as the rest of the",
"# module.",
"header_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"\"marquise.h\"",
")",
"with",
"open",
"(",
"header_path",
")",
"as",
"header",
":",
"libmarquise_header_lines",
"=",
"header",
".",
"readlines",
"(",
")",
"libmarquise_header_lines",
"=",
"[",
"line",
"for",
"line",
"in",
"libmarquise_header_lines",
"if",
"not",
"line",
".",
"startswith",
"(",
"'#include '",
")",
"and",
"not",
"line",
".",
"startswith",
"(",
"'#define '",
")",
"]",
"libmarquise_header_lines",
"=",
"[",
"line",
"for",
"line",
"in",
"libmarquise_header_lines",
"if",
"not",
"line",
".",
"startswith",
"(",
"'#include '",
")",
"]",
"# We can't #include glib so FFI doesn't know what a GTree is. Leave it for",
"# later and let the C compiler resolve it when we call FFI.verify()",
"libmarquise_header_lines",
"=",
"[",
"line",
".",
"replace",
"(",
"\"GTree *sd_hashes;\"",
",",
"\"...;\"",
")",
"for",
"line",
"in",
"libmarquise_header_lines",
"]",
"return",
"''",
".",
"join",
"(",
"libmarquise_header_lines",
")"
] | Read the libmarquise header to extract definitions. | [
"Read",
"the",
"libmarquise",
"header",
"to",
"extract",
"definitions",
"."
] | 67e52df70c50ed53ad315a64fea430a9567e2b1b | https://github.com/WhereSoftwareGoesToDie/pymarquise/blob/67e52df70c50ed53ad315a64fea430a9567e2b1b/marquise/marquise_cffi.py#L35-L48 |
250,721 | fred49/linshare-api | linshareapi/user/threads.py | Threads2.head | def head(self, uuid):
""" Get one thread."""
url = "%(base)s/%(uuid)s" % {
'base': self.local_base_url,
'uuid': uuid
}
return self.core.head(url) | python | def head(self, uuid):
""" Get one thread."""
url = "%(base)s/%(uuid)s" % {
'base': self.local_base_url,
'uuid': uuid
}
return self.core.head(url) | [
"def",
"head",
"(",
"self",
",",
"uuid",
")",
":",
"url",
"=",
"\"%(base)s/%(uuid)s\"",
"%",
"{",
"'base'",
":",
"self",
".",
"local_base_url",
",",
"'uuid'",
":",
"uuid",
"}",
"return",
"self",
".",
"core",
".",
"head",
"(",
"url",
")"
] | Get one thread. | [
"Get",
"one",
"thread",
"."
] | be646c25aa8ba3718abb6869c620b157d53d6e41 | https://github.com/fred49/linshare-api/blob/be646c25aa8ba3718abb6869c620b157d53d6e41/linshareapi/user/threads.py#L106-L112 |
250,722 | refinery29/chassis | chassis/util/encoders.py | ModelJSONEncoder.default | def default(self, obj): # pylint: disable=method-hidden
"""Use the default behavior unless the object to be encoded has a
`strftime` attribute."""
if hasattr(obj, 'strftime'):
return obj.strftime("%Y-%m-%dT%H:%M:%SZ")
elif hasattr(obj, 'get_public_dict'):
return obj.get_public_dict()
else:
return json.JSONEncoder.default(self, obj) | python | def default(self, obj): # pylint: disable=method-hidden
"""Use the default behavior unless the object to be encoded has a
`strftime` attribute."""
if hasattr(obj, 'strftime'):
return obj.strftime("%Y-%m-%dT%H:%M:%SZ")
elif hasattr(obj, 'get_public_dict'):
return obj.get_public_dict()
else:
return json.JSONEncoder.default(self, obj) | [
"def",
"default",
"(",
"self",
",",
"obj",
")",
":",
"# pylint: disable=method-hidden",
"if",
"hasattr",
"(",
"obj",
",",
"'strftime'",
")",
":",
"return",
"obj",
".",
"strftime",
"(",
"\"%Y-%m-%dT%H:%M:%SZ\"",
")",
"elif",
"hasattr",
"(",
"obj",
",",
"'get_public_dict'",
")",
":",
"return",
"obj",
".",
"get_public_dict",
"(",
")",
"else",
":",
"return",
"json",
".",
"JSONEncoder",
".",
"default",
"(",
"self",
",",
"obj",
")"
] | Use the default behavior unless the object to be encoded has a
`strftime` attribute. | [
"Use",
"the",
"default",
"behavior",
"unless",
"the",
"object",
"to",
"be",
"encoded",
"has",
"a",
"strftime",
"attribute",
"."
] | 1238d5214cbb8f3e1fe7c0dc2fa72f45bf085192 | https://github.com/refinery29/chassis/blob/1238d5214cbb8f3e1fe7c0dc2fa72f45bf085192/chassis/util/encoders.py#L9-L18 |
250,723 | fedora-infra/fmn.rules | fmn/rules/taskotron.py | taskotron_task | def taskotron_task(config, message, task=None):
""" Particular taskotron task
With this rule, you can limit messages to only those of particular
`taskotron <https://taskotron.fedoraproject.org/>`_ task.
You can specify several tasks by separating them with a comma ',',
i.e.: ``dist.depcheck,dist.rpmlint``.
"""
# We only operate on taskotron messages, first off.
if not taskotron_result_new(config, message):
return False
if not task:
return False
tasks = [item.strip().lower() for item in task.split(',')]
return message['msg']['task'].get('name').lower() in tasks | python | def taskotron_task(config, message, task=None):
""" Particular taskotron task
With this rule, you can limit messages to only those of particular
`taskotron <https://taskotron.fedoraproject.org/>`_ task.
You can specify several tasks by separating them with a comma ',',
i.e.: ``dist.depcheck,dist.rpmlint``.
"""
# We only operate on taskotron messages, first off.
if not taskotron_result_new(config, message):
return False
if not task:
return False
tasks = [item.strip().lower() for item in task.split(',')]
return message['msg']['task'].get('name').lower() in tasks | [
"def",
"taskotron_task",
"(",
"config",
",",
"message",
",",
"task",
"=",
"None",
")",
":",
"# We only operate on taskotron messages, first off.",
"if",
"not",
"taskotron_result_new",
"(",
"config",
",",
"message",
")",
":",
"return",
"False",
"if",
"not",
"task",
":",
"return",
"False",
"tasks",
"=",
"[",
"item",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"for",
"item",
"in",
"task",
".",
"split",
"(",
"','",
")",
"]",
"return",
"message",
"[",
"'msg'",
"]",
"[",
"'task'",
"]",
".",
"get",
"(",
"'name'",
")",
".",
"lower",
"(",
")",
"in",
"tasks"
] | Particular taskotron task
With this rule, you can limit messages to only those of particular
`taskotron <https://taskotron.fedoraproject.org/>`_ task.
You can specify several tasks by separating them with a comma ',',
i.e.: ``dist.depcheck,dist.rpmlint``. | [
"Particular",
"taskotron",
"task"
] | f9ec790619fcc8b41803077c4dec094e5127fc24 | https://github.com/fedora-infra/fmn.rules/blob/f9ec790619fcc8b41803077c4dec094e5127fc24/fmn/rules/taskotron.py#L15-L33 |
250,724 | fedora-infra/fmn.rules | fmn/rules/taskotron.py | taskotron_changed_outcome | def taskotron_changed_outcome(config, message):
""" Taskotron task outcome changed
With this rule, you can limit messages to only those task results
with changed outcomes. This is useful when an object (a build,
an update, etc) gets retested and either the object itself or the
environment changes and the task outcome is now different (e.g.
FAILED -> PASSED).
"""
# We only operate on taskotron messages, first off.
if not taskotron_result_new(config, message):
return False
outcome = message['msg']['result'].get('outcome')
prev_outcome = message['msg']['result'].get('prev_outcome')
return prev_outcome is not None and outcome != prev_outcome | python | def taskotron_changed_outcome(config, message):
""" Taskotron task outcome changed
With this rule, you can limit messages to only those task results
with changed outcomes. This is useful when an object (a build,
an update, etc) gets retested and either the object itself or the
environment changes and the task outcome is now different (e.g.
FAILED -> PASSED).
"""
# We only operate on taskotron messages, first off.
if not taskotron_result_new(config, message):
return False
outcome = message['msg']['result'].get('outcome')
prev_outcome = message['msg']['result'].get('prev_outcome')
return prev_outcome is not None and outcome != prev_outcome | [
"def",
"taskotron_changed_outcome",
"(",
"config",
",",
"message",
")",
":",
"# We only operate on taskotron messages, first off.",
"if",
"not",
"taskotron_result_new",
"(",
"config",
",",
"message",
")",
":",
"return",
"False",
"outcome",
"=",
"message",
"[",
"'msg'",
"]",
"[",
"'result'",
"]",
".",
"get",
"(",
"'outcome'",
")",
"prev_outcome",
"=",
"message",
"[",
"'msg'",
"]",
"[",
"'result'",
"]",
".",
"get",
"(",
"'prev_outcome'",
")",
"return",
"prev_outcome",
"is",
"not",
"None",
"and",
"outcome",
"!=",
"prev_outcome"
] | Taskotron task outcome changed
With this rule, you can limit messages to only those task results
with changed outcomes. This is useful when an object (a build,
an update, etc) gets retested and either the object itself or the
environment changes and the task outcome is now different (e.g.
FAILED -> PASSED). | [
"Taskotron",
"task",
"outcome",
"changed"
] | f9ec790619fcc8b41803077c4dec094e5127fc24 | https://github.com/fedora-infra/fmn.rules/blob/f9ec790619fcc8b41803077c4dec094e5127fc24/fmn/rules/taskotron.py#L37-L54 |
250,725 | fedora-infra/fmn.rules | fmn/rules/taskotron.py | taskotron_task_outcome | def taskotron_task_outcome(config, message, outcome=None):
""" Particular taskotron task outcome
With this rule, you can limit messages to only those of particular
`taskotron <https://taskotron.fedoraproject.org/>`_ task outcome.
You can specify several outcomes by separating them with a comma ',',
i.e.: ``PASSED,FAILED``.
The full list of supported outcomes can be found in the libtaskotron
`documentation <https://docs.qadevel.cloud.fedoraproject.org/
libtaskotron/latest/resultyaml.html#minimal-version>`_.
"""
# We only operate on taskotron messages, first off.
if not taskotron_result_new(config, message):
return False
if not outcome:
return False
outcomes = [item.strip().lower() for item in outcome.split(',')]
return message['msg']['result'].get('outcome').lower() in outcomes | python | def taskotron_task_outcome(config, message, outcome=None):
""" Particular taskotron task outcome
With this rule, you can limit messages to only those of particular
`taskotron <https://taskotron.fedoraproject.org/>`_ task outcome.
You can specify several outcomes by separating them with a comma ',',
i.e.: ``PASSED,FAILED``.
The full list of supported outcomes can be found in the libtaskotron
`documentation <https://docs.qadevel.cloud.fedoraproject.org/
libtaskotron/latest/resultyaml.html#minimal-version>`_.
"""
# We only operate on taskotron messages, first off.
if not taskotron_result_new(config, message):
return False
if not outcome:
return False
outcomes = [item.strip().lower() for item in outcome.split(',')]
return message['msg']['result'].get('outcome').lower() in outcomes | [
"def",
"taskotron_task_outcome",
"(",
"config",
",",
"message",
",",
"outcome",
"=",
"None",
")",
":",
"# We only operate on taskotron messages, first off.",
"if",
"not",
"taskotron_result_new",
"(",
"config",
",",
"message",
")",
":",
"return",
"False",
"if",
"not",
"outcome",
":",
"return",
"False",
"outcomes",
"=",
"[",
"item",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"for",
"item",
"in",
"outcome",
".",
"split",
"(",
"','",
")",
"]",
"return",
"message",
"[",
"'msg'",
"]",
"[",
"'result'",
"]",
".",
"get",
"(",
"'outcome'",
")",
".",
"lower",
"(",
")",
"in",
"outcomes"
] | Particular taskotron task outcome
With this rule, you can limit messages to only those of particular
`taskotron <https://taskotron.fedoraproject.org/>`_ task outcome.
You can specify several outcomes by separating them with a comma ',',
i.e.: ``PASSED,FAILED``.
The full list of supported outcomes can be found in the libtaskotron
`documentation <https://docs.qadevel.cloud.fedoraproject.org/
libtaskotron/latest/resultyaml.html#minimal-version>`_. | [
"Particular",
"taskotron",
"task",
"outcome"
] | f9ec790619fcc8b41803077c4dec094e5127fc24 | https://github.com/fedora-infra/fmn.rules/blob/f9ec790619fcc8b41803077c4dec094e5127fc24/fmn/rules/taskotron.py#L58-L80 |
250,726 | fedora-infra/fmn.rules | fmn/rules/taskotron.py | taskotron_release_critical_task | def taskotron_release_critical_task(config, message):
""" Release-critical taskotron tasks
With this rule, you can limit messages to only those of
release-critical
`taskotron <https://taskotron.fedoraproject.org/>`_ task.
These are the tasks which are deemed extremely important
by the distribution, and their failure should be carefully
inspected. Currently these tasks are ``dist.depcheck`` and
``dist.upgradepath``.
"""
# We only operate on taskotron messages, first off.
if not taskotron_result_new(config, message):
return False
task = message['msg']['task'].get('name')
return task in ['dist.depcheck', 'dist.upgradepath'] | python | def taskotron_release_critical_task(config, message):
""" Release-critical taskotron tasks
With this rule, you can limit messages to only those of
release-critical
`taskotron <https://taskotron.fedoraproject.org/>`_ task.
These are the tasks which are deemed extremely important
by the distribution, and their failure should be carefully
inspected. Currently these tasks are ``dist.depcheck`` and
``dist.upgradepath``.
"""
# We only operate on taskotron messages, first off.
if not taskotron_result_new(config, message):
return False
task = message['msg']['task'].get('name')
return task in ['dist.depcheck', 'dist.upgradepath'] | [
"def",
"taskotron_release_critical_task",
"(",
"config",
",",
"message",
")",
":",
"# We only operate on taskotron messages, first off.",
"if",
"not",
"taskotron_result_new",
"(",
"config",
",",
"message",
")",
":",
"return",
"False",
"task",
"=",
"message",
"[",
"'msg'",
"]",
"[",
"'task'",
"]",
".",
"get",
"(",
"'name'",
")",
"return",
"task",
"in",
"[",
"'dist.depcheck'",
",",
"'dist.upgradepath'",
"]"
] | Release-critical taskotron tasks
With this rule, you can limit messages to only those of
release-critical
`taskotron <https://taskotron.fedoraproject.org/>`_ task.
These are the tasks which are deemed extremely important
by the distribution, and their failure should be carefully
inspected. Currently these tasks are ``dist.depcheck`` and
``dist.upgradepath``. | [
"Release",
"-",
"critical",
"taskotron",
"tasks"
] | f9ec790619fcc8b41803077c4dec094e5127fc24 | https://github.com/fedora-infra/fmn.rules/blob/f9ec790619fcc8b41803077c4dec094e5127fc24/fmn/rules/taskotron.py#L108-L127 |
250,727 | abhinav/reversible | reversible/tornado/core.py | execute | def execute(action, io_loop=None):
"""Execute the given action and return a Future with the result.
The ``forwards`` and/or ``backwards`` methods for the action may be
synchronous or asynchronous. If asynchronous, that method must return a
Future that will resolve to its result.
See :py:func:`reversible.execute` for more details on the behavior of
``execute``.
:param action:
The action to execute.
:param io_loop:
IOLoop through which asynchronous operations will be executed. If
omitted, the current IOLoop is used.
:returns:
A future containing the result of executing the action.
"""
if not io_loop:
io_loop = IOLoop.current()
output = Future()
def call():
try:
result = _execute(_TornadoAction(action, io_loop))
except Exception:
output.set_exc_info(sys.exc_info())
else:
output.set_result(result)
io_loop.add_callback(greenlet.greenlet(call).switch)
return output | python | def execute(action, io_loop=None):
"""Execute the given action and return a Future with the result.
The ``forwards`` and/or ``backwards`` methods for the action may be
synchronous or asynchronous. If asynchronous, that method must return a
Future that will resolve to its result.
See :py:func:`reversible.execute` for more details on the behavior of
``execute``.
:param action:
The action to execute.
:param io_loop:
IOLoop through which asynchronous operations will be executed. If
omitted, the current IOLoop is used.
:returns:
A future containing the result of executing the action.
"""
if not io_loop:
io_loop = IOLoop.current()
output = Future()
def call():
try:
result = _execute(_TornadoAction(action, io_loop))
except Exception:
output.set_exc_info(sys.exc_info())
else:
output.set_result(result)
io_loop.add_callback(greenlet.greenlet(call).switch)
return output | [
"def",
"execute",
"(",
"action",
",",
"io_loop",
"=",
"None",
")",
":",
"if",
"not",
"io_loop",
":",
"io_loop",
"=",
"IOLoop",
".",
"current",
"(",
")",
"output",
"=",
"Future",
"(",
")",
"def",
"call",
"(",
")",
":",
"try",
":",
"result",
"=",
"_execute",
"(",
"_TornadoAction",
"(",
"action",
",",
"io_loop",
")",
")",
"except",
"Exception",
":",
"output",
".",
"set_exc_info",
"(",
"sys",
".",
"exc_info",
"(",
")",
")",
"else",
":",
"output",
".",
"set_result",
"(",
"result",
")",
"io_loop",
".",
"add_callback",
"(",
"greenlet",
".",
"greenlet",
"(",
"call",
")",
".",
"switch",
")",
"return",
"output"
] | Execute the given action and return a Future with the result.
The ``forwards`` and/or ``backwards`` methods for the action may be
synchronous or asynchronous. If asynchronous, that method must return a
Future that will resolve to its result.
See :py:func:`reversible.execute` for more details on the behavior of
``execute``.
:param action:
The action to execute.
:param io_loop:
IOLoop through which asynchronous operations will be executed. If
omitted, the current IOLoop is used.
:returns:
A future containing the result of executing the action. | [
"Execute",
"the",
"given",
"action",
"and",
"return",
"a",
"Future",
"with",
"the",
"result",
"."
] | 7e28aaf0390f7d4b889c6ac14d7b340f8f314e89 | https://github.com/abhinav/reversible/blob/7e28aaf0390f7d4b889c6ac14d7b340f8f314e89/reversible/tornado/core.py#L79-L112 |
250,728 | KnowledgeLinks/rdfframework | rdfframework/datasets/rdfdatasets.py | RdfDataset.add_triple | def add_triple(self, sub, pred=None, obj=None, **kwargs):
""" Adds a triple to the dataset
args:
sub: The subject of the triple or dictionary contaning a
triple
pred: Optional if supplied in sub, predicate of the triple
obj: Optional if supplied in sub, object of the triple
kwargs:
map: Optional, a ditionary mapping for a supplied dictionary
strip_orphans: Optional, remove triples that have an orphan
blanknode as the object
obj_method: if "list" than the object will be returned in the
form of a list
"""
self.__set_map__(**kwargs)
strip_orphans = kwargs.get("strip_orphans", False)
obj_method = kwargs.get("obj_method")
if isinstance(sub, DictClass) or isinstance(sub, dict):
pred = sub[self.pmap]
obj = sub[self.omap]
sub = sub[self.smap]
pred = pyrdf(pred)
obj = pyrdf(obj)
sub = pyrdf(sub)
# reference existing attr for bnodes and uris
if obj.type in self.relate_obj_types :
if strip_orphans and not self.get(obj):
return
obj = self.get(obj,obj)
try:
self[sub].add_property(pred, obj)
except KeyError:
self[sub] = RdfClassBase(sub, self, **kwargs)
self[sub].add_property(pred, obj) | python | def add_triple(self, sub, pred=None, obj=None, **kwargs):
""" Adds a triple to the dataset
args:
sub: The subject of the triple or dictionary contaning a
triple
pred: Optional if supplied in sub, predicate of the triple
obj: Optional if supplied in sub, object of the triple
kwargs:
map: Optional, a ditionary mapping for a supplied dictionary
strip_orphans: Optional, remove triples that have an orphan
blanknode as the object
obj_method: if "list" than the object will be returned in the
form of a list
"""
self.__set_map__(**kwargs)
strip_orphans = kwargs.get("strip_orphans", False)
obj_method = kwargs.get("obj_method")
if isinstance(sub, DictClass) or isinstance(sub, dict):
pred = sub[self.pmap]
obj = sub[self.omap]
sub = sub[self.smap]
pred = pyrdf(pred)
obj = pyrdf(obj)
sub = pyrdf(sub)
# reference existing attr for bnodes and uris
if obj.type in self.relate_obj_types :
if strip_orphans and not self.get(obj):
return
obj = self.get(obj,obj)
try:
self[sub].add_property(pred, obj)
except KeyError:
self[sub] = RdfClassBase(sub, self, **kwargs)
self[sub].add_property(pred, obj) | [
"def",
"add_triple",
"(",
"self",
",",
"sub",
",",
"pred",
"=",
"None",
",",
"obj",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"__set_map__",
"(",
"*",
"*",
"kwargs",
")",
"strip_orphans",
"=",
"kwargs",
".",
"get",
"(",
"\"strip_orphans\"",
",",
"False",
")",
"obj_method",
"=",
"kwargs",
".",
"get",
"(",
"\"obj_method\"",
")",
"if",
"isinstance",
"(",
"sub",
",",
"DictClass",
")",
"or",
"isinstance",
"(",
"sub",
",",
"dict",
")",
":",
"pred",
"=",
"sub",
"[",
"self",
".",
"pmap",
"]",
"obj",
"=",
"sub",
"[",
"self",
".",
"omap",
"]",
"sub",
"=",
"sub",
"[",
"self",
".",
"smap",
"]",
"pred",
"=",
"pyrdf",
"(",
"pred",
")",
"obj",
"=",
"pyrdf",
"(",
"obj",
")",
"sub",
"=",
"pyrdf",
"(",
"sub",
")",
"# reference existing attr for bnodes and uris",
"if",
"obj",
".",
"type",
"in",
"self",
".",
"relate_obj_types",
":",
"if",
"strip_orphans",
"and",
"not",
"self",
".",
"get",
"(",
"obj",
")",
":",
"return",
"obj",
"=",
"self",
".",
"get",
"(",
"obj",
",",
"obj",
")",
"try",
":",
"self",
"[",
"sub",
"]",
".",
"add_property",
"(",
"pred",
",",
"obj",
")",
"except",
"KeyError",
":",
"self",
"[",
"sub",
"]",
"=",
"RdfClassBase",
"(",
"sub",
",",
"self",
",",
"*",
"*",
"kwargs",
")",
"self",
"[",
"sub",
"]",
".",
"add_property",
"(",
"pred",
",",
"obj",
")"
] | Adds a triple to the dataset
args:
sub: The subject of the triple or dictionary contaning a
triple
pred: Optional if supplied in sub, predicate of the triple
obj: Optional if supplied in sub, object of the triple
kwargs:
map: Optional, a ditionary mapping for a supplied dictionary
strip_orphans: Optional, remove triples that have an orphan
blanknode as the object
obj_method: if "list" than the object will be returned in the
form of a list | [
"Adds",
"a",
"triple",
"to",
"the",
"dataset"
] | 9ec32dcc4bed51650a4b392cc5c15100fef7923a | https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datasets/rdfdatasets.py#L108-L144 |
250,729 | KnowledgeLinks/rdfframework | rdfframework/datasets/rdfdatasets.py | RdfDataset.load_data | def load_data(self, data, **kwargs):
"""
Bulk adds rdf data to the class
args:
data: the data to be loaded
kwargs:
strip_orphans: True or False - remove triples that have an
orphan blanknode as the object
obj_method: "list", or None: if "list" the object of a method
will be in the form of a list.
"""
self.__set_map__(**kwargs)
start = datetime.datetime.now()
log.debug("Dataload stated")
if isinstance(data, list):
data = self._convert_results(data, **kwargs)
class_types = self.__group_data__(data, **kwargs)
# generate classes and add attributes to the data
self._generate_classes(class_types, self.non_defined, **kwargs)
# add triples to the dataset
for triple in data:
self.add_triple(sub=triple, **kwargs)
log.debug("Dataload completed in '%s'",
(datetime.datetime.now() - start)) | python | def load_data(self, data, **kwargs):
"""
Bulk adds rdf data to the class
args:
data: the data to be loaded
kwargs:
strip_orphans: True or False - remove triples that have an
orphan blanknode as the object
obj_method: "list", or None: if "list" the object of a method
will be in the form of a list.
"""
self.__set_map__(**kwargs)
start = datetime.datetime.now()
log.debug("Dataload stated")
if isinstance(data, list):
data = self._convert_results(data, **kwargs)
class_types = self.__group_data__(data, **kwargs)
# generate classes and add attributes to the data
self._generate_classes(class_types, self.non_defined, **kwargs)
# add triples to the dataset
for triple in data:
self.add_triple(sub=triple, **kwargs)
log.debug("Dataload completed in '%s'",
(datetime.datetime.now() - start)) | [
"def",
"load_data",
"(",
"self",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"__set_map__",
"(",
"*",
"*",
"kwargs",
")",
"start",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"log",
".",
"debug",
"(",
"\"Dataload stated\"",
")",
"if",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"data",
"=",
"self",
".",
"_convert_results",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
"class_types",
"=",
"self",
".",
"__group_data__",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
"# generate classes and add attributes to the data",
"self",
".",
"_generate_classes",
"(",
"class_types",
",",
"self",
".",
"non_defined",
",",
"*",
"*",
"kwargs",
")",
"# add triples to the dataset",
"for",
"triple",
"in",
"data",
":",
"self",
".",
"add_triple",
"(",
"sub",
"=",
"triple",
",",
"*",
"*",
"kwargs",
")",
"log",
".",
"debug",
"(",
"\"Dataload completed in '%s'\"",
",",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"start",
")",
")"
] | Bulk adds rdf data to the class
args:
data: the data to be loaded
kwargs:
strip_orphans: True or False - remove triples that have an
orphan blanknode as the object
obj_method: "list", or None: if "list" the object of a method
will be in the form of a list. | [
"Bulk",
"adds",
"rdf",
"data",
"to",
"the",
"class"
] | 9ec32dcc4bed51650a4b392cc5c15100fef7923a | https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datasets/rdfdatasets.py#L211-L237 |
250,730 | KnowledgeLinks/rdfframework | rdfframework/datasets/rdfdatasets.py | RdfDataset.add_rmap_item | def add_rmap_item(self, subj, pred, obj):
"""
adds a triple to the inverted dataset index
"""
def add_item(self, subj, pred, obj):
try:
self.rmap[obj][pred].append(subj)
except KeyError:
try:
self.rmap[obj][pred] = [subj]
except KeyError:
self.rmap[obj] = {pred: [subj]}
if isinstance(obj, list):
for item in obj:
add_item(self, subj, pred, item)
else:
add_item(self, subj, pred, obj) | python | def add_rmap_item(self, subj, pred, obj):
"""
adds a triple to the inverted dataset index
"""
def add_item(self, subj, pred, obj):
try:
self.rmap[obj][pred].append(subj)
except KeyError:
try:
self.rmap[obj][pred] = [subj]
except KeyError:
self.rmap[obj] = {pred: [subj]}
if isinstance(obj, list):
for item in obj:
add_item(self, subj, pred, item)
else:
add_item(self, subj, pred, obj) | [
"def",
"add_rmap_item",
"(",
"self",
",",
"subj",
",",
"pred",
",",
"obj",
")",
":",
"def",
"add_item",
"(",
"self",
",",
"subj",
",",
"pred",
",",
"obj",
")",
":",
"try",
":",
"self",
".",
"rmap",
"[",
"obj",
"]",
"[",
"pred",
"]",
".",
"append",
"(",
"subj",
")",
"except",
"KeyError",
":",
"try",
":",
"self",
".",
"rmap",
"[",
"obj",
"]",
"[",
"pred",
"]",
"=",
"[",
"subj",
"]",
"except",
"KeyError",
":",
"self",
".",
"rmap",
"[",
"obj",
"]",
"=",
"{",
"pred",
":",
"[",
"subj",
"]",
"}",
"if",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"for",
"item",
"in",
"obj",
":",
"add_item",
"(",
"self",
",",
"subj",
",",
"pred",
",",
"item",
")",
"else",
":",
"add_item",
"(",
"self",
",",
"subj",
",",
"pred",
",",
"obj",
")"
] | adds a triple to the inverted dataset index | [
"adds",
"a",
"triple",
"to",
"the",
"inverted",
"dataset",
"index"
] | 9ec32dcc4bed51650a4b392cc5c15100fef7923a | https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datasets/rdfdatasets.py#L327-L344 |
250,731 | KnowledgeLinks/rdfframework | rdfframework/datasets/rdfdatasets.py | RdfDataset._generate_classes | def _generate_classes(self, class_types, non_defined, **kwargs):
""" creates the class for each class in the data set
args:
class_types: list of class_types in the dataset
non_defined: list of subjects that have no defined class
"""
# kwargs['dataset'] = self
for class_type in class_types:
self[class_type[self.smap]] = self._get_rdfclass(class_type,
**kwargs)\
(class_type,
self,
**kwargs)
self.add_rmap_item(self[class_type[self.smap]],
class_type[self.pmap],
class_type[self.omap])
for class_type in non_defined:
self[class_type] = RdfClassBase(class_type, self, **kwargs)
self.add_rmap_item(self[class_type], __a__, None)
self.__set_classes__
try:
self.base_class = self[self.base_uri]
except KeyError:
self.base_class = None | python | def _generate_classes(self, class_types, non_defined, **kwargs):
""" creates the class for each class in the data set
args:
class_types: list of class_types in the dataset
non_defined: list of subjects that have no defined class
"""
# kwargs['dataset'] = self
for class_type in class_types:
self[class_type[self.smap]] = self._get_rdfclass(class_type,
**kwargs)\
(class_type,
self,
**kwargs)
self.add_rmap_item(self[class_type[self.smap]],
class_type[self.pmap],
class_type[self.omap])
for class_type in non_defined:
self[class_type] = RdfClassBase(class_type, self, **kwargs)
self.add_rmap_item(self[class_type], __a__, None)
self.__set_classes__
try:
self.base_class = self[self.base_uri]
except KeyError:
self.base_class = None | [
"def",
"_generate_classes",
"(",
"self",
",",
"class_types",
",",
"non_defined",
",",
"*",
"*",
"kwargs",
")",
":",
"# kwargs['dataset'] = self",
"for",
"class_type",
"in",
"class_types",
":",
"self",
"[",
"class_type",
"[",
"self",
".",
"smap",
"]",
"]",
"=",
"self",
".",
"_get_rdfclass",
"(",
"class_type",
",",
"*",
"*",
"kwargs",
")",
"(",
"class_type",
",",
"self",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"add_rmap_item",
"(",
"self",
"[",
"class_type",
"[",
"self",
".",
"smap",
"]",
"]",
",",
"class_type",
"[",
"self",
".",
"pmap",
"]",
",",
"class_type",
"[",
"self",
".",
"omap",
"]",
")",
"for",
"class_type",
"in",
"non_defined",
":",
"self",
"[",
"class_type",
"]",
"=",
"RdfClassBase",
"(",
"class_type",
",",
"self",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"add_rmap_item",
"(",
"self",
"[",
"class_type",
"]",
",",
"__a__",
",",
"None",
")",
"self",
".",
"__set_classes__",
"try",
":",
"self",
".",
"base_class",
"=",
"self",
"[",
"self",
".",
"base_uri",
"]",
"except",
"KeyError",
":",
"self",
".",
"base_class",
"=",
"None"
] | creates the class for each class in the data set
args:
class_types: list of class_types in the dataset
non_defined: list of subjects that have no defined class | [
"creates",
"the",
"class",
"for",
"each",
"class",
"in",
"the",
"data",
"set"
] | 9ec32dcc4bed51650a4b392cc5c15100fef7923a | https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datasets/rdfdatasets.py#L346-L372 |
250,732 | KnowledgeLinks/rdfframework | rdfframework/datasets/rdfdatasets.py | RdfDataset._get_rdfclass | def _get_rdfclass(self, class_type, **kwargs):
""" returns the instanticated class from the class list
args:
class_type: dictionary with rdf_types
"""
def select_class(class_name):
""" finds the class in the rdfclass Module"""
try:
return getattr(MODULE.rdfclass, class_name.pyuri)
except AttributeError:
return RdfClassBase
if kwargs.get("def_load"):
return RdfClassBase
if isinstance(class_type[self.omap], list):
bases = [select_class(class_name)
for class_name in class_type[self.omap]]
bases = [base for base in bases if base != RdfClassBase]
if len(bases) == 0:
return RdfClassBase
elif len(bases) == 1:
return bases[0]
else:
bases = remove_parents(bases)
if len(bases) == 1:
return bases[0]
else:
name = "_".join(sorted(class_type[self.omap]))
# if the the class has already been created return it
if hasattr(MODULE.rdfclass, name):
return getattr(MODULE.rdfclass, name)
new_class = type(name,
tuple(bases),
{})
new_class.hierarchy = list_hierarchy(class_type[self.omap][0],
bases)
new_class.class_names = sorted([base.__name__ \
for base in bases \
if base not in [RdfClassBase, dict]])
setattr(MODULE.rdfclass, name, new_class)
return new_class
else:
return select_class(class_type[self.omap]) | python | def _get_rdfclass(self, class_type, **kwargs):
""" returns the instanticated class from the class list
args:
class_type: dictionary with rdf_types
"""
def select_class(class_name):
""" finds the class in the rdfclass Module"""
try:
return getattr(MODULE.rdfclass, class_name.pyuri)
except AttributeError:
return RdfClassBase
if kwargs.get("def_load"):
return RdfClassBase
if isinstance(class_type[self.omap], list):
bases = [select_class(class_name)
for class_name in class_type[self.omap]]
bases = [base for base in bases if base != RdfClassBase]
if len(bases) == 0:
return RdfClassBase
elif len(bases) == 1:
return bases[0]
else:
bases = remove_parents(bases)
if len(bases) == 1:
return bases[0]
else:
name = "_".join(sorted(class_type[self.omap]))
# if the the class has already been created return it
if hasattr(MODULE.rdfclass, name):
return getattr(MODULE.rdfclass, name)
new_class = type(name,
tuple(bases),
{})
new_class.hierarchy = list_hierarchy(class_type[self.omap][0],
bases)
new_class.class_names = sorted([base.__name__ \
for base in bases \
if base not in [RdfClassBase, dict]])
setattr(MODULE.rdfclass, name, new_class)
return new_class
else:
return select_class(class_type[self.omap]) | [
"def",
"_get_rdfclass",
"(",
"self",
",",
"class_type",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"select_class",
"(",
"class_name",
")",
":",
"\"\"\" finds the class in the rdfclass Module\"\"\"",
"try",
":",
"return",
"getattr",
"(",
"MODULE",
".",
"rdfclass",
",",
"class_name",
".",
"pyuri",
")",
"except",
"AttributeError",
":",
"return",
"RdfClassBase",
"if",
"kwargs",
".",
"get",
"(",
"\"def_load\"",
")",
":",
"return",
"RdfClassBase",
"if",
"isinstance",
"(",
"class_type",
"[",
"self",
".",
"omap",
"]",
",",
"list",
")",
":",
"bases",
"=",
"[",
"select_class",
"(",
"class_name",
")",
"for",
"class_name",
"in",
"class_type",
"[",
"self",
".",
"omap",
"]",
"]",
"bases",
"=",
"[",
"base",
"for",
"base",
"in",
"bases",
"if",
"base",
"!=",
"RdfClassBase",
"]",
"if",
"len",
"(",
"bases",
")",
"==",
"0",
":",
"return",
"RdfClassBase",
"elif",
"len",
"(",
"bases",
")",
"==",
"1",
":",
"return",
"bases",
"[",
"0",
"]",
"else",
":",
"bases",
"=",
"remove_parents",
"(",
"bases",
")",
"if",
"len",
"(",
"bases",
")",
"==",
"1",
":",
"return",
"bases",
"[",
"0",
"]",
"else",
":",
"name",
"=",
"\"_\"",
".",
"join",
"(",
"sorted",
"(",
"class_type",
"[",
"self",
".",
"omap",
"]",
")",
")",
"# if the the class has already been created return it",
"if",
"hasattr",
"(",
"MODULE",
".",
"rdfclass",
",",
"name",
")",
":",
"return",
"getattr",
"(",
"MODULE",
".",
"rdfclass",
",",
"name",
")",
"new_class",
"=",
"type",
"(",
"name",
",",
"tuple",
"(",
"bases",
")",
",",
"{",
"}",
")",
"new_class",
".",
"hierarchy",
"=",
"list_hierarchy",
"(",
"class_type",
"[",
"self",
".",
"omap",
"]",
"[",
"0",
"]",
",",
"bases",
")",
"new_class",
".",
"class_names",
"=",
"sorted",
"(",
"[",
"base",
".",
"__name__",
"for",
"base",
"in",
"bases",
"if",
"base",
"not",
"in",
"[",
"RdfClassBase",
",",
"dict",
"]",
"]",
")",
"setattr",
"(",
"MODULE",
".",
"rdfclass",
",",
"name",
",",
"new_class",
")",
"return",
"new_class",
"else",
":",
"return",
"select_class",
"(",
"class_type",
"[",
"self",
".",
"omap",
"]",
")"
] | returns the instanticated class from the class list
args:
class_type: dictionary with rdf_types | [
"returns",
"the",
"instanticated",
"class",
"from",
"the",
"class",
"list"
] | 9ec32dcc4bed51650a4b392cc5c15100fef7923a | https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datasets/rdfdatasets.py#L374-L418 |
250,733 | shaypal5/utilitime | utilitime/time_interval.py | TimeInterval.from_timedelta | def from_timedelta(cls, datetime_obj, duration):
"""Create a new TimeInterval object from a start point and a duration.
If duration is positive, datetime_obj is the start of the interval;
if duration is negative, datetime_obj is the end of the interval.
Parameters
----------
datetime_obj : datetime.datetime
duration : datetime.timedelta
Returns
-------
neutils.time.TimeInterval
"""
if duration.total_seconds() > 0:
return TimeInterval(datetime_obj, datetime_obj + duration)
else:
return TimeInterval(datetime_obj + duration, datetime_obj) | python | def from_timedelta(cls, datetime_obj, duration):
"""Create a new TimeInterval object from a start point and a duration.
If duration is positive, datetime_obj is the start of the interval;
if duration is negative, datetime_obj is the end of the interval.
Parameters
----------
datetime_obj : datetime.datetime
duration : datetime.timedelta
Returns
-------
neutils.time.TimeInterval
"""
if duration.total_seconds() > 0:
return TimeInterval(datetime_obj, datetime_obj + duration)
else:
return TimeInterval(datetime_obj + duration, datetime_obj) | [
"def",
"from_timedelta",
"(",
"cls",
",",
"datetime_obj",
",",
"duration",
")",
":",
"if",
"duration",
".",
"total_seconds",
"(",
")",
">",
"0",
":",
"return",
"TimeInterval",
"(",
"datetime_obj",
",",
"datetime_obj",
"+",
"duration",
")",
"else",
":",
"return",
"TimeInterval",
"(",
"datetime_obj",
"+",
"duration",
",",
"datetime_obj",
")"
] | Create a new TimeInterval object from a start point and a duration.
If duration is positive, datetime_obj is the start of the interval;
if duration is negative, datetime_obj is the end of the interval.
Parameters
----------
datetime_obj : datetime.datetime
duration : datetime.timedelta
Returns
-------
neutils.time.TimeInterval | [
"Create",
"a",
"new",
"TimeInterval",
"object",
"from",
"a",
"start",
"point",
"and",
"a",
"duration",
"."
] | 554ca05fa83c2dbf5d6cf9c9cfa6b03ee6cdb609 | https://github.com/shaypal5/utilitime/blob/554ca05fa83c2dbf5d6cf9c9cfa6b03ee6cdb609/utilitime/time_interval.py#L19-L37 |
250,734 | tomnor/channelpack | channelpack/pullxl.py | _get_startstop | def _get_startstop(sheet, startcell=None, stopcell=None):
"""
Return two StartStop objects, based on the sheet and startcell and
stopcell.
sheet: xlrd.sheet.Sheet instance
Ready for use.
startcell: str or None
If given, a spread sheet style notation of the cell where data
start, ("F9").
stopcell: str or None
A spread sheet style notation of the cell where data end,
("F9"). startcell and stopcell can be used in any combination.
"""
start = StartStop(0, 0) # row, col
stop = StartStop(sheet.nrows, sheet.ncols)
if startcell:
m = re.match(XLNOT_RX, startcell)
start.row = int(m.group(2)) - 1
start.col = letter2num(m.group(1), zbase=True)
if stopcell:
m = re.match(XLNOT_RX, stopcell)
stop.row = int(m.group(2))
# Stop number is exclusive
stop.col = letter2num(m.group(1), zbase=False)
return [start, stop] | python | def _get_startstop(sheet, startcell=None, stopcell=None):
"""
Return two StartStop objects, based on the sheet and startcell and
stopcell.
sheet: xlrd.sheet.Sheet instance
Ready for use.
startcell: str or None
If given, a spread sheet style notation of the cell where data
start, ("F9").
stopcell: str or None
A spread sheet style notation of the cell where data end,
("F9"). startcell and stopcell can be used in any combination.
"""
start = StartStop(0, 0) # row, col
stop = StartStop(sheet.nrows, sheet.ncols)
if startcell:
m = re.match(XLNOT_RX, startcell)
start.row = int(m.group(2)) - 1
start.col = letter2num(m.group(1), zbase=True)
if stopcell:
m = re.match(XLNOT_RX, stopcell)
stop.row = int(m.group(2))
# Stop number is exclusive
stop.col = letter2num(m.group(1), zbase=False)
return [start, stop] | [
"def",
"_get_startstop",
"(",
"sheet",
",",
"startcell",
"=",
"None",
",",
"stopcell",
"=",
"None",
")",
":",
"start",
"=",
"StartStop",
"(",
"0",
",",
"0",
")",
"# row, col",
"stop",
"=",
"StartStop",
"(",
"sheet",
".",
"nrows",
",",
"sheet",
".",
"ncols",
")",
"if",
"startcell",
":",
"m",
"=",
"re",
".",
"match",
"(",
"XLNOT_RX",
",",
"startcell",
")",
"start",
".",
"row",
"=",
"int",
"(",
"m",
".",
"group",
"(",
"2",
")",
")",
"-",
"1",
"start",
".",
"col",
"=",
"letter2num",
"(",
"m",
".",
"group",
"(",
"1",
")",
",",
"zbase",
"=",
"True",
")",
"if",
"stopcell",
":",
"m",
"=",
"re",
".",
"match",
"(",
"XLNOT_RX",
",",
"stopcell",
")",
"stop",
".",
"row",
"=",
"int",
"(",
"m",
".",
"group",
"(",
"2",
")",
")",
"# Stop number is exclusive",
"stop",
".",
"col",
"=",
"letter2num",
"(",
"m",
".",
"group",
"(",
"1",
")",
",",
"zbase",
"=",
"False",
")",
"return",
"[",
"start",
",",
"stop",
"]"
] | Return two StartStop objects, based on the sheet and startcell and
stopcell.
sheet: xlrd.sheet.Sheet instance
Ready for use.
startcell: str or None
If given, a spread sheet style notation of the cell where data
start, ("F9").
stopcell: str or None
A spread sheet style notation of the cell where data end,
("F9"). startcell and stopcell can be used in any combination. | [
"Return",
"two",
"StartStop",
"objects",
"based",
"on",
"the",
"sheet",
"and",
"startcell",
"and",
"stopcell",
"."
] | 9ad3cd11c698aed4c0fc178385b2ba38a7d0efae | https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pullxl.py#L88-L120 |
250,735 | tomnor/channelpack | channelpack/pullxl.py | prepread | def prepread(sheet, header=True, startcell=None, stopcell=None):
"""Return four StartStop objects, defining the outer bounds of
header row and data range, respectively. If header is False, the
first two items will be None.
--> [headstart, headstop, datstart, datstop]
sheet: xlrd.sheet.Sheet instance
Ready for use.
header: bool or str
True if the defined data range includes a header with field
names. Else False - the whole range is data. If a string, it is
spread sheet style notation of the startcell for the header
("F9"). The "width" of this record is the same as for the data.
startcell: str or None
If given, a spread sheet style notation of the cell where reading
start, ("F9").
stopcell: str or None
A spread sheet style notation of the cell where data end,
("F9").
startcell and stopcell can both be None, either one specified or
both specified.
Note to self: consider making possible to specify headers in a column.
"""
datstart, datstop = _get_startstop(sheet, startcell, stopcell)
headstart, headstop = StartStop(0, 0), StartStop(0, 0) # Holders
def typicalprep():
headstart.row, headstart.col = datstart.row, datstart.col
headstop.row, headstop.col = datstart.row + 1, datstop.col
# Tick the data start row by 1:
datstart.row += 1
def offsetheaderprep():
headstart.row, headstart.col = headrow, headcol
headstop.row = headrow + 1
headstop.col = headcol + (datstop.col - datstart.col) # stop > start
if header is True: # Simply the toprow of the table.
typicalprep()
return [headstart, headstop, datstart, datstop]
elif header: # Then it is a string if not False. ("F9")
m = re.match(XLNOT_RX, header)
headrow = int(m.group(2)) - 1
headcol = letter2num(m.group(1), zbase=True)
if headrow == datstart.row and headcol == datstart.col:
typicalprep()
return [headstart, headstop, datstart, datstop]
elif headrow == datstart.row:
typicalprep()
offsetheaderprep()
return [headstart, headstop, datstart, datstop]
else:
offsetheaderprep()
return [headstart, headstop, datstart, datstop]
else: # header is False
return [None, None, datstart, datstop] | python | def prepread(sheet, header=True, startcell=None, stopcell=None):
"""Return four StartStop objects, defining the outer bounds of
header row and data range, respectively. If header is False, the
first two items will be None.
--> [headstart, headstop, datstart, datstop]
sheet: xlrd.sheet.Sheet instance
Ready for use.
header: bool or str
True if the defined data range includes a header with field
names. Else False - the whole range is data. If a string, it is
spread sheet style notation of the startcell for the header
("F9"). The "width" of this record is the same as for the data.
startcell: str or None
If given, a spread sheet style notation of the cell where reading
start, ("F9").
stopcell: str or None
A spread sheet style notation of the cell where data end,
("F9").
startcell and stopcell can both be None, either one specified or
both specified.
Note to self: consider making possible to specify headers in a column.
"""
datstart, datstop = _get_startstop(sheet, startcell, stopcell)
headstart, headstop = StartStop(0, 0), StartStop(0, 0) # Holders
def typicalprep():
headstart.row, headstart.col = datstart.row, datstart.col
headstop.row, headstop.col = datstart.row + 1, datstop.col
# Tick the data start row by 1:
datstart.row += 1
def offsetheaderprep():
headstart.row, headstart.col = headrow, headcol
headstop.row = headrow + 1
headstop.col = headcol + (datstop.col - datstart.col) # stop > start
if header is True: # Simply the toprow of the table.
typicalprep()
return [headstart, headstop, datstart, datstop]
elif header: # Then it is a string if not False. ("F9")
m = re.match(XLNOT_RX, header)
headrow = int(m.group(2)) - 1
headcol = letter2num(m.group(1), zbase=True)
if headrow == datstart.row and headcol == datstart.col:
typicalprep()
return [headstart, headstop, datstart, datstop]
elif headrow == datstart.row:
typicalprep()
offsetheaderprep()
return [headstart, headstop, datstart, datstop]
else:
offsetheaderprep()
return [headstart, headstop, datstart, datstop]
else: # header is False
return [None, None, datstart, datstop] | [
"def",
"prepread",
"(",
"sheet",
",",
"header",
"=",
"True",
",",
"startcell",
"=",
"None",
",",
"stopcell",
"=",
"None",
")",
":",
"datstart",
",",
"datstop",
"=",
"_get_startstop",
"(",
"sheet",
",",
"startcell",
",",
"stopcell",
")",
"headstart",
",",
"headstop",
"=",
"StartStop",
"(",
"0",
",",
"0",
")",
",",
"StartStop",
"(",
"0",
",",
"0",
")",
"# Holders",
"def",
"typicalprep",
"(",
")",
":",
"headstart",
".",
"row",
",",
"headstart",
".",
"col",
"=",
"datstart",
".",
"row",
",",
"datstart",
".",
"col",
"headstop",
".",
"row",
",",
"headstop",
".",
"col",
"=",
"datstart",
".",
"row",
"+",
"1",
",",
"datstop",
".",
"col",
"# Tick the data start row by 1:",
"datstart",
".",
"row",
"+=",
"1",
"def",
"offsetheaderprep",
"(",
")",
":",
"headstart",
".",
"row",
",",
"headstart",
".",
"col",
"=",
"headrow",
",",
"headcol",
"headstop",
".",
"row",
"=",
"headrow",
"+",
"1",
"headstop",
".",
"col",
"=",
"headcol",
"+",
"(",
"datstop",
".",
"col",
"-",
"datstart",
".",
"col",
")",
"# stop > start",
"if",
"header",
"is",
"True",
":",
"# Simply the toprow of the table.",
"typicalprep",
"(",
")",
"return",
"[",
"headstart",
",",
"headstop",
",",
"datstart",
",",
"datstop",
"]",
"elif",
"header",
":",
"# Then it is a string if not False. (\"F9\")",
"m",
"=",
"re",
".",
"match",
"(",
"XLNOT_RX",
",",
"header",
")",
"headrow",
"=",
"int",
"(",
"m",
".",
"group",
"(",
"2",
")",
")",
"-",
"1",
"headcol",
"=",
"letter2num",
"(",
"m",
".",
"group",
"(",
"1",
")",
",",
"zbase",
"=",
"True",
")",
"if",
"headrow",
"==",
"datstart",
".",
"row",
"and",
"headcol",
"==",
"datstart",
".",
"col",
":",
"typicalprep",
"(",
")",
"return",
"[",
"headstart",
",",
"headstop",
",",
"datstart",
",",
"datstop",
"]",
"elif",
"headrow",
"==",
"datstart",
".",
"row",
":",
"typicalprep",
"(",
")",
"offsetheaderprep",
"(",
")",
"return",
"[",
"headstart",
",",
"headstop",
",",
"datstart",
",",
"datstop",
"]",
"else",
":",
"offsetheaderprep",
"(",
")",
"return",
"[",
"headstart",
",",
"headstop",
",",
"datstart",
",",
"datstop",
"]",
"else",
":",
"# header is False",
"return",
"[",
"None",
",",
"None",
",",
"datstart",
",",
"datstop",
"]"
] | Return four StartStop objects, defining the outer bounds of
header row and data range, respectively. If header is False, the
first two items will be None.
--> [headstart, headstop, datstart, datstop]
sheet: xlrd.sheet.Sheet instance
Ready for use.
header: bool or str
True if the defined data range includes a header with field
names. Else False - the whole range is data. If a string, it is
spread sheet style notation of the startcell for the header
("F9"). The "width" of this record is the same as for the data.
startcell: str or None
If given, a spread sheet style notation of the cell where reading
start, ("F9").
stopcell: str or None
A spread sheet style notation of the cell where data end,
("F9").
startcell and stopcell can both be None, either one specified or
both specified.
Note to self: consider making possible to specify headers in a column. | [
"Return",
"four",
"StartStop",
"objects",
"defining",
"the",
"outer",
"bounds",
"of",
"header",
"row",
"and",
"data",
"range",
"respectively",
".",
"If",
"header",
"is",
"False",
"the",
"first",
"two",
"items",
"will",
"be",
"None",
"."
] | 9ad3cd11c698aed4c0fc178385b2ba38a7d0efae | https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pullxl.py#L123-L185 |
250,736 | tomnor/channelpack | channelpack/pullxl.py | sheetheader | def sheetheader(sheet, startstops, usecols=None):
"""Return the channel names in a list suitable as an argument to
ChannelPack's `set_channel_names` method. Return None if first two
StartStops are None.
This function is slightly confusing, because it shall be called with
the same parameters as sheet_asdict. But knowing that, it should be
convenient.
sheet: xlrd.sheet.Sheet instance
Ready for use.
startstops: list
Four StartStop objects defining the data to read. See
:func:`~channelpack.pullxl.prepread`, returning such a list.
usecols: str or sequence of ints or None
The columns to use, 0-based. 0 is the spread sheet column
"A". Can be given as a string also - 'C:E, H' for columns C, D,
E and H.
"""
headstart, headstop, dstart, dstop = startstops
if headstart is None:
return None
assert headstop.row - headstart.row == 1, ('Field names must be in '
'same row so far. Or '
'this is a bug')
header = []
# One need to make same offsets within start and stop as in usecols:
usecols = _sanitize_usecols(usecols)
cols = usecols or range(dstart.col, dstop.col)
headcols = [c + (headstart.col - dstart.col) for c in cols]
for col in headcols:
fieldname = sheet.cell(headstart.row, col).value
header.append(unicode(fieldname))
return header | python | def sheetheader(sheet, startstops, usecols=None):
"""Return the channel names in a list suitable as an argument to
ChannelPack's `set_channel_names` method. Return None if first two
StartStops are None.
This function is slightly confusing, because it shall be called with
the same parameters as sheet_asdict. But knowing that, it should be
convenient.
sheet: xlrd.sheet.Sheet instance
Ready for use.
startstops: list
Four StartStop objects defining the data to read. See
:func:`~channelpack.pullxl.prepread`, returning such a list.
usecols: str or sequence of ints or None
The columns to use, 0-based. 0 is the spread sheet column
"A". Can be given as a string also - 'C:E, H' for columns C, D,
E and H.
"""
headstart, headstop, dstart, dstop = startstops
if headstart is None:
return None
assert headstop.row - headstart.row == 1, ('Field names must be in '
'same row so far. Or '
'this is a bug')
header = []
# One need to make same offsets within start and stop as in usecols:
usecols = _sanitize_usecols(usecols)
cols = usecols or range(dstart.col, dstop.col)
headcols = [c + (headstart.col - dstart.col) for c in cols]
for col in headcols:
fieldname = sheet.cell(headstart.row, col).value
header.append(unicode(fieldname))
return header | [
"def",
"sheetheader",
"(",
"sheet",
",",
"startstops",
",",
"usecols",
"=",
"None",
")",
":",
"headstart",
",",
"headstop",
",",
"dstart",
",",
"dstop",
"=",
"startstops",
"if",
"headstart",
"is",
"None",
":",
"return",
"None",
"assert",
"headstop",
".",
"row",
"-",
"headstart",
".",
"row",
"==",
"1",
",",
"(",
"'Field names must be in '",
"'same row so far. Or '",
"'this is a bug'",
")",
"header",
"=",
"[",
"]",
"# One need to make same offsets within start and stop as in usecols:",
"usecols",
"=",
"_sanitize_usecols",
"(",
"usecols",
")",
"cols",
"=",
"usecols",
"or",
"range",
"(",
"dstart",
".",
"col",
",",
"dstop",
".",
"col",
")",
"headcols",
"=",
"[",
"c",
"+",
"(",
"headstart",
".",
"col",
"-",
"dstart",
".",
"col",
")",
"for",
"c",
"in",
"cols",
"]",
"for",
"col",
"in",
"headcols",
":",
"fieldname",
"=",
"sheet",
".",
"cell",
"(",
"headstart",
".",
"row",
",",
"col",
")",
".",
"value",
"header",
".",
"append",
"(",
"unicode",
"(",
"fieldname",
")",
")",
"return",
"header"
] | Return the channel names in a list suitable as an argument to
ChannelPack's `set_channel_names` method. Return None if first two
StartStops are None.
This function is slightly confusing, because it shall be called with
the same parameters as sheet_asdict. But knowing that, it should be
convenient.
sheet: xlrd.sheet.Sheet instance
Ready for use.
startstops: list
Four StartStop objects defining the data to read. See
:func:`~channelpack.pullxl.prepread`, returning such a list.
usecols: str or sequence of ints or None
The columns to use, 0-based. 0 is the spread sheet column
"A". Can be given as a string also - 'C:E, H' for columns C, D,
E and H. | [
"Return",
"the",
"channel",
"names",
"in",
"a",
"list",
"suitable",
"as",
"an",
"argument",
"to",
"ChannelPack",
"s",
"set_channel_names",
"method",
".",
"Return",
"None",
"if",
"first",
"two",
"StartStops",
"are",
"None",
"."
] | 9ad3cd11c698aed4c0fc178385b2ba38a7d0efae | https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pullxl.py#L188-L226 |
250,737 | tomnor/channelpack | channelpack/pullxl.py | _sanitize_usecols | def _sanitize_usecols(usecols):
"""Make a tuple of sorted integers and return it. Return None if
usecols is None"""
if usecols is None:
return None
try:
pats = usecols.split(',')
pats = [p.strip() for p in pats if p]
except AttributeError:
usecols = [int(c) for c in usecols] # Make error if mix.
usecols.sort()
return tuple(usecols) # Assume sane sequence of integers.
cols = []
for pat in pats:
if ':' in pat:
c1, c2 = pat.split(':')
n1 = letter2num(c1, zbase=True)
n2 = letter2num(c2, zbase=False)
cols += range(n1, n2)
else:
cols += [letter2num(pat, zbase=True)]
# Remove duplicates:
cols = list(set(cols))
cols.sort()
return tuple(cols) | python | def _sanitize_usecols(usecols):
"""Make a tuple of sorted integers and return it. Return None if
usecols is None"""
if usecols is None:
return None
try:
pats = usecols.split(',')
pats = [p.strip() for p in pats if p]
except AttributeError:
usecols = [int(c) for c in usecols] # Make error if mix.
usecols.sort()
return tuple(usecols) # Assume sane sequence of integers.
cols = []
for pat in pats:
if ':' in pat:
c1, c2 = pat.split(':')
n1 = letter2num(c1, zbase=True)
n2 = letter2num(c2, zbase=False)
cols += range(n1, n2)
else:
cols += [letter2num(pat, zbase=True)]
# Remove duplicates:
cols = list(set(cols))
cols.sort()
return tuple(cols) | [
"def",
"_sanitize_usecols",
"(",
"usecols",
")",
":",
"if",
"usecols",
"is",
"None",
":",
"return",
"None",
"try",
":",
"pats",
"=",
"usecols",
".",
"split",
"(",
"','",
")",
"pats",
"=",
"[",
"p",
".",
"strip",
"(",
")",
"for",
"p",
"in",
"pats",
"if",
"p",
"]",
"except",
"AttributeError",
":",
"usecols",
"=",
"[",
"int",
"(",
"c",
")",
"for",
"c",
"in",
"usecols",
"]",
"# Make error if mix.",
"usecols",
".",
"sort",
"(",
")",
"return",
"tuple",
"(",
"usecols",
")",
"# Assume sane sequence of integers.",
"cols",
"=",
"[",
"]",
"for",
"pat",
"in",
"pats",
":",
"if",
"':'",
"in",
"pat",
":",
"c1",
",",
"c2",
"=",
"pat",
".",
"split",
"(",
"':'",
")",
"n1",
"=",
"letter2num",
"(",
"c1",
",",
"zbase",
"=",
"True",
")",
"n2",
"=",
"letter2num",
"(",
"c2",
",",
"zbase",
"=",
"False",
")",
"cols",
"+=",
"range",
"(",
"n1",
",",
"n2",
")",
"else",
":",
"cols",
"+=",
"[",
"letter2num",
"(",
"pat",
",",
"zbase",
"=",
"True",
")",
"]",
"# Remove duplicates:",
"cols",
"=",
"list",
"(",
"set",
"(",
"cols",
")",
")",
"cols",
".",
"sort",
"(",
")",
"return",
"tuple",
"(",
"cols",
")"
] | Make a tuple of sorted integers and return it. Return None if
usecols is None | [
"Make",
"a",
"tuple",
"of",
"sorted",
"integers",
"and",
"return",
"it",
".",
"Return",
"None",
"if",
"usecols",
"is",
"None"
] | 9ad3cd11c698aed4c0fc178385b2ba38a7d0efae | https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pullxl.py#L353-L381 |
250,738 | tomnor/channelpack | channelpack/pullxl.py | letter2num | def letter2num(letters, zbase=False):
"""A = 1, C = 3 and so on. Convert spreadsheet style column
enumeration to a number.
Answers:
A = 1, Z = 26, AA = 27, AZ = 52, ZZ = 702, AMJ = 1024
>>> from channelpack.pullxl import letter2num
>>> letter2num('A') == 1
True
>>> letter2num('Z') == 26
True
>>> letter2num('AZ') == 52
True
>>> letter2num('ZZ') == 702
True
>>> letter2num('AMJ') == 1024
True
>>> letter2num('AMJ', zbase=True) == 1023
True
>>> letter2num('A', zbase=True) == 0
True
"""
letters = letters.upper()
res = 0
weight = len(letters) - 1
assert weight >= 0, letters
for i, c in enumerate(letters):
assert 65 <= ord(c) <= 90, c # A-Z
res += (ord(c) - 64) * 26**(weight - i)
if not zbase:
return res
return res - 1 | python | def letter2num(letters, zbase=False):
"""A = 1, C = 3 and so on. Convert spreadsheet style column
enumeration to a number.
Answers:
A = 1, Z = 26, AA = 27, AZ = 52, ZZ = 702, AMJ = 1024
>>> from channelpack.pullxl import letter2num
>>> letter2num('A') == 1
True
>>> letter2num('Z') == 26
True
>>> letter2num('AZ') == 52
True
>>> letter2num('ZZ') == 702
True
>>> letter2num('AMJ') == 1024
True
>>> letter2num('AMJ', zbase=True) == 1023
True
>>> letter2num('A', zbase=True) == 0
True
"""
letters = letters.upper()
res = 0
weight = len(letters) - 1
assert weight >= 0, letters
for i, c in enumerate(letters):
assert 65 <= ord(c) <= 90, c # A-Z
res += (ord(c) - 64) * 26**(weight - i)
if not zbase:
return res
return res - 1 | [
"def",
"letter2num",
"(",
"letters",
",",
"zbase",
"=",
"False",
")",
":",
"letters",
"=",
"letters",
".",
"upper",
"(",
")",
"res",
"=",
"0",
"weight",
"=",
"len",
"(",
"letters",
")",
"-",
"1",
"assert",
"weight",
">=",
"0",
",",
"letters",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"letters",
")",
":",
"assert",
"65",
"<=",
"ord",
"(",
"c",
")",
"<=",
"90",
",",
"c",
"# A-Z",
"res",
"+=",
"(",
"ord",
"(",
"c",
")",
"-",
"64",
")",
"*",
"26",
"**",
"(",
"weight",
"-",
"i",
")",
"if",
"not",
"zbase",
":",
"return",
"res",
"return",
"res",
"-",
"1"
] | A = 1, C = 3 and so on. Convert spreadsheet style column
enumeration to a number.
Answers:
A = 1, Z = 26, AA = 27, AZ = 52, ZZ = 702, AMJ = 1024
>>> from channelpack.pullxl import letter2num
>>> letter2num('A') == 1
True
>>> letter2num('Z') == 26
True
>>> letter2num('AZ') == 52
True
>>> letter2num('ZZ') == 702
True
>>> letter2num('AMJ') == 1024
True
>>> letter2num('AMJ', zbase=True) == 1023
True
>>> letter2num('A', zbase=True) == 0
True | [
"A",
"=",
"1",
"C",
"=",
"3",
"and",
"so",
"on",
".",
"Convert",
"spreadsheet",
"style",
"column",
"enumeration",
"to",
"a",
"number",
"."
] | 9ad3cd11c698aed4c0fc178385b2ba38a7d0efae | https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pullxl.py#L384-L419 |
250,739 | tomnor/channelpack | channelpack/pullxl.py | fromxldate | def fromxldate(xldate, datemode=1):
"""Return a python datetime object
xldate: float
The xl number.
datemode: int
0: 1900-based, 1: 1904-based. See xlrd documentation.
"""
t = xlrd.xldate_as_tuple(xldate, datemode)
return datetime.datetime(*t) | python | def fromxldate(xldate, datemode=1):
"""Return a python datetime object
xldate: float
The xl number.
datemode: int
0: 1900-based, 1: 1904-based. See xlrd documentation.
"""
t = xlrd.xldate_as_tuple(xldate, datemode)
return datetime.datetime(*t) | [
"def",
"fromxldate",
"(",
"xldate",
",",
"datemode",
"=",
"1",
")",
":",
"t",
"=",
"xlrd",
".",
"xldate_as_tuple",
"(",
"xldate",
",",
"datemode",
")",
"return",
"datetime",
".",
"datetime",
"(",
"*",
"t",
")"
] | Return a python datetime object
xldate: float
The xl number.
datemode: int
0: 1900-based, 1: 1904-based. See xlrd documentation. | [
"Return",
"a",
"python",
"datetime",
"object"
] | 9ad3cd11c698aed4c0fc178385b2ba38a7d0efae | https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pullxl.py#L435-L446 |
250,740 | kcolford/txt2boil | txt2boil/__init__.py | language | def language(fname, is_ext=False):
"""Return an instance of the language class that fname is suited for.
Searches through the module langs for the class that matches up
with fname. If is_ext is True then fname will be taken to be
the extension for a language.
"""
global _langmapping
# Normalize the fname so that it looks like an extension.
if is_ext:
fname = '.' + fname
_, ext = os.path.splitext(fname)
return _langmapping[ext]() | python | def language(fname, is_ext=False):
"""Return an instance of the language class that fname is suited for.
Searches through the module langs for the class that matches up
with fname. If is_ext is True then fname will be taken to be
the extension for a language.
"""
global _langmapping
# Normalize the fname so that it looks like an extension.
if is_ext:
fname = '.' + fname
_, ext = os.path.splitext(fname)
return _langmapping[ext]() | [
"def",
"language",
"(",
"fname",
",",
"is_ext",
"=",
"False",
")",
":",
"global",
"_langmapping",
"# Normalize the fname so that it looks like an extension.",
"if",
"is_ext",
":",
"fname",
"=",
"'.'",
"+",
"fname",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fname",
")",
"return",
"_langmapping",
"[",
"ext",
"]",
"(",
")"
] | Return an instance of the language class that fname is suited for.
Searches through the module langs for the class that matches up
with fname. If is_ext is True then fname will be taken to be
the extension for a language. | [
"Return",
"an",
"instance",
"of",
"the",
"language",
"class",
"that",
"fname",
"is",
"suited",
"for",
"."
] | 853a47bb8db27c0224531f24dfd02839c983d027 | https://github.com/kcolford/txt2boil/blob/853a47bb8db27c0224531f24dfd02839c983d027/txt2boil/__init__.py#L42-L58 |
250,741 | sys-git/certifiable | certifiable/core.py | certify_text | def certify_text(
value, min_length=None, max_length=None, nonprintable=True, required=True,
):
"""
Certifier for human readable string values.
:param unicode value:
The string to be certified.
:param int min_length:
The minimum length of the string.
:param int max_length:
The maximum acceptable length for the string. By default, the length is not checked.
:param nonprintable:
Whether the string can contain non-printable characters. Non-printable characters are
allowed by default.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The value is invalid
"""
certify_params(
(_certify_int_param, 'max_length', max_length, dict(negative=False, required=False)),
(_certify_int_param, 'min_length', min_length, dict(negative=False, required=False)),
(certify_bool, 'nonprintable', nonprintable),
)
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, six.text_type):
raise CertifierTypeError(
message="expected unicode string, but value is of type {cls!r}".format(
cls=value.__class__.__name__),
value=value,
required=required,
)
if min_length is not None and len(value) < min_length:
raise CertifierValueError(
message="{length} is shorter than minimum acceptable {min}".format(
length=len(value), min=min_length),
value=value,
required=required,
)
if max_length is not None and len(value) > max_length:
raise CertifierValueError(
message="{length} is longer than maximum acceptable {max}".format(
length=len(value), max=max_length),
value=value,
required=required,
)
_certify_printable(
value=value,
nonprintable=nonprintable,
required=required,
) | python | def certify_text(
value, min_length=None, max_length=None, nonprintable=True, required=True,
):
"""
Certifier for human readable string values.
:param unicode value:
The string to be certified.
:param int min_length:
The minimum length of the string.
:param int max_length:
The maximum acceptable length for the string. By default, the length is not checked.
:param nonprintable:
Whether the string can contain non-printable characters. Non-printable characters are
allowed by default.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The value is invalid
"""
certify_params(
(_certify_int_param, 'max_length', max_length, dict(negative=False, required=False)),
(_certify_int_param, 'min_length', min_length, dict(negative=False, required=False)),
(certify_bool, 'nonprintable', nonprintable),
)
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, six.text_type):
raise CertifierTypeError(
message="expected unicode string, but value is of type {cls!r}".format(
cls=value.__class__.__name__),
value=value,
required=required,
)
if min_length is not None and len(value) < min_length:
raise CertifierValueError(
message="{length} is shorter than minimum acceptable {min}".format(
length=len(value), min=min_length),
value=value,
required=required,
)
if max_length is not None and len(value) > max_length:
raise CertifierValueError(
message="{length} is longer than maximum acceptable {max}".format(
length=len(value), max=max_length),
value=value,
required=required,
)
_certify_printable(
value=value,
nonprintable=nonprintable,
required=required,
) | [
"def",
"certify_text",
"(",
"value",
",",
"min_length",
"=",
"None",
",",
"max_length",
"=",
"None",
",",
"nonprintable",
"=",
"True",
",",
"required",
"=",
"True",
",",
")",
":",
"certify_params",
"(",
"(",
"_certify_int_param",
",",
"'max_length'",
",",
"max_length",
",",
"dict",
"(",
"negative",
"=",
"False",
",",
"required",
"=",
"False",
")",
")",
",",
"(",
"_certify_int_param",
",",
"'min_length'",
",",
"min_length",
",",
"dict",
"(",
"negative",
"=",
"False",
",",
"required",
"=",
"False",
")",
")",
",",
"(",
"certify_bool",
",",
"'nonprintable'",
",",
"nonprintable",
")",
",",
")",
"if",
"certify_required",
"(",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
":",
"return",
"if",
"not",
"isinstance",
"(",
"value",
",",
"six",
".",
"text_type",
")",
":",
"raise",
"CertifierTypeError",
"(",
"message",
"=",
"\"expected unicode string, but value is of type {cls!r}\"",
".",
"format",
"(",
"cls",
"=",
"value",
".",
"__class__",
".",
"__name__",
")",
",",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
"if",
"min_length",
"is",
"not",
"None",
"and",
"len",
"(",
"value",
")",
"<",
"min_length",
":",
"raise",
"CertifierValueError",
"(",
"message",
"=",
"\"{length} is shorter than minimum acceptable {min}\"",
".",
"format",
"(",
"length",
"=",
"len",
"(",
"value",
")",
",",
"min",
"=",
"min_length",
")",
",",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
"if",
"max_length",
"is",
"not",
"None",
"and",
"len",
"(",
"value",
")",
">",
"max_length",
":",
"raise",
"CertifierValueError",
"(",
"message",
"=",
"\"{length} is longer than maximum acceptable {max}\"",
".",
"format",
"(",
"length",
"=",
"len",
"(",
"value",
")",
",",
"max",
"=",
"max_length",
")",
",",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
"_certify_printable",
"(",
"value",
"=",
"value",
",",
"nonprintable",
"=",
"nonprintable",
",",
"required",
"=",
"required",
",",
")"
] | Certifier for human readable string values.
:param unicode value:
The string to be certified.
:param int min_length:
The minimum length of the string.
:param int max_length:
The maximum acceptable length for the string. By default, the length is not checked.
:param nonprintable:
Whether the string can contain non-printable characters. Non-printable characters are
allowed by default.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The value is invalid | [
"Certifier",
"for",
"human",
"readable",
"string",
"values",
"."
] | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/core.py#L74-L135 |
250,742 | sys-git/certifiable | certifiable/core.py | certify_int | def certify_int(value, min_value=None, max_value=None, required=True):
"""
Certifier for integer values.
:param six.integer_types value:
The number to be certified.
:param int min_value:
The minimum acceptable value for the number.
:param int max_value:
The maximum acceptable value for the number.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The value is invalid
"""
certify_params(
(_certify_int_param, 'max_length', max_value, dict(negative=True, required=False)),
(_certify_int_param, 'min_length', min_value, dict(negative=True, required=False)),
)
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, six.integer_types):
raise CertifierTypeError(
message="expected integer, but value is of type {cls!r}".format(
cls=value.__class__.__name__),
value=value,
required=required,
)
if min_value is not None and value < min_value:
raise CertifierValueError(
message="{value} is less than minimum acceptable {min}".format(
value=value, min=min_value),
value=value,
required=required,
)
if max_value is not None and value > max_value:
raise CertifierValueError(
message="{value} is more than the maximum acceptable {max}".format(
value=value, max=max_value),
value=value,
required=required,
) | python | def certify_int(value, min_value=None, max_value=None, required=True):
"""
Certifier for integer values.
:param six.integer_types value:
The number to be certified.
:param int min_value:
The minimum acceptable value for the number.
:param int max_value:
The maximum acceptable value for the number.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The value is invalid
"""
certify_params(
(_certify_int_param, 'max_length', max_value, dict(negative=True, required=False)),
(_certify_int_param, 'min_length', min_value, dict(negative=True, required=False)),
)
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, six.integer_types):
raise CertifierTypeError(
message="expected integer, but value is of type {cls!r}".format(
cls=value.__class__.__name__),
value=value,
required=required,
)
if min_value is not None and value < min_value:
raise CertifierValueError(
message="{value} is less than minimum acceptable {min}".format(
value=value, min=min_value),
value=value,
required=required,
)
if max_value is not None and value > max_value:
raise CertifierValueError(
message="{value} is more than the maximum acceptable {max}".format(
value=value, max=max_value),
value=value,
required=required,
) | [
"def",
"certify_int",
"(",
"value",
",",
"min_value",
"=",
"None",
",",
"max_value",
"=",
"None",
",",
"required",
"=",
"True",
")",
":",
"certify_params",
"(",
"(",
"_certify_int_param",
",",
"'max_length'",
",",
"max_value",
",",
"dict",
"(",
"negative",
"=",
"True",
",",
"required",
"=",
"False",
")",
")",
",",
"(",
"_certify_int_param",
",",
"'min_length'",
",",
"min_value",
",",
"dict",
"(",
"negative",
"=",
"True",
",",
"required",
"=",
"False",
")",
")",
",",
")",
"if",
"certify_required",
"(",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
":",
"return",
"if",
"not",
"isinstance",
"(",
"value",
",",
"six",
".",
"integer_types",
")",
":",
"raise",
"CertifierTypeError",
"(",
"message",
"=",
"\"expected integer, but value is of type {cls!r}\"",
".",
"format",
"(",
"cls",
"=",
"value",
".",
"__class__",
".",
"__name__",
")",
",",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
"if",
"min_value",
"is",
"not",
"None",
"and",
"value",
"<",
"min_value",
":",
"raise",
"CertifierValueError",
"(",
"message",
"=",
"\"{value} is less than minimum acceptable {min}\"",
".",
"format",
"(",
"value",
"=",
"value",
",",
"min",
"=",
"min_value",
")",
",",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
"if",
"max_value",
"is",
"not",
"None",
"and",
"value",
">",
"max_value",
":",
"raise",
"CertifierValueError",
"(",
"message",
"=",
"\"{value} is more than the maximum acceptable {max}\"",
".",
"format",
"(",
"value",
"=",
"value",
",",
"max",
"=",
"max_value",
")",
",",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")"
] | Certifier for integer values.
:param six.integer_types value:
The number to be certified.
:param int min_value:
The minimum acceptable value for the number.
:param int max_value:
The maximum acceptable value for the number.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The value is invalid | [
"Certifier",
"for",
"integer",
"values",
"."
] | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/core.py#L204-L253 |
250,743 | sys-git/certifiable | certifiable/core.py | certify_bool | def certify_bool(value, required=True):
"""
Certifier for boolean values.
:param value:
The value to be certified.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
"""
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, bool):
raise CertifierTypeError(
message="expected bool, but value is of type {cls!r}".format(
cls=value.__class__.__name__),
value=value,
required=required,
) | python | def certify_bool(value, required=True):
"""
Certifier for boolean values.
:param value:
The value to be certified.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
"""
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, bool):
raise CertifierTypeError(
message="expected bool, but value is of type {cls!r}".format(
cls=value.__class__.__name__),
value=value,
required=required,
) | [
"def",
"certify_bool",
"(",
"value",
",",
"required",
"=",
"True",
")",
":",
"if",
"certify_required",
"(",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
":",
"return",
"if",
"not",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"raise",
"CertifierTypeError",
"(",
"message",
"=",
"\"expected bool, but value is of type {cls!r}\"",
".",
"format",
"(",
"cls",
"=",
"value",
".",
"__class__",
".",
"__name__",
")",
",",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")"
] | Certifier for boolean values.
:param value:
The value to be certified.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid | [
"Certifier",
"for",
"boolean",
"values",
"."
] | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/core.py#L310-L333 |
250,744 | sys-git/certifiable | certifiable/core.py | certify_bytes | def certify_bytes(value, min_length=None, max_length=None, required=True):
"""
Certifier for bytestring values.
Should not be used for certifying human readable strings, Please use `certify_string` instead.
:param bytes|str value:
The string to be certified.
:param int min_length:
The minimum length of the string.
:param int max_length:
The maximum acceptable length for the string. By default, the length is
not checked.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The value is invalid
"""
certify_params(
(_certify_int_param, 'min_value', min_length, dict(negative=False, required=False)),
(_certify_int_param, 'max_value', max_length, dict(negative=False, required=False)),
)
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, six.binary_type):
raise CertifierTypeError(
message="expected byte string, but value is of type {cls!r}".format(
cls=value.__class__.__name__),
value=value,
required=required,
)
if min_length is not None and len(value) < min_length:
raise CertifierValueError(
message="{length} is shorter than minimum acceptable {min}".format(
length=len(value), min=min_length),
value=value,
required=required,
)
if max_length is not None and len(value) > max_length:
raise CertifierValueError(
message="{length} is longer than maximum acceptable {max}".format(
length=len(value), max=max_length),
value=value,
required=required,
) | python | def certify_bytes(value, min_length=None, max_length=None, required=True):
"""
Certifier for bytestring values.
Should not be used for certifying human readable strings, Please use `certify_string` instead.
:param bytes|str value:
The string to be certified.
:param int min_length:
The minimum length of the string.
:param int max_length:
The maximum acceptable length for the string. By default, the length is
not checked.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The value is invalid
"""
certify_params(
(_certify_int_param, 'min_value', min_length, dict(negative=False, required=False)),
(_certify_int_param, 'max_value', max_length, dict(negative=False, required=False)),
)
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, six.binary_type):
raise CertifierTypeError(
message="expected byte string, but value is of type {cls!r}".format(
cls=value.__class__.__name__),
value=value,
required=required,
)
if min_length is not None and len(value) < min_length:
raise CertifierValueError(
message="{length} is shorter than minimum acceptable {min}".format(
length=len(value), min=min_length),
value=value,
required=required,
)
if max_length is not None and len(value) > max_length:
raise CertifierValueError(
message="{length} is longer than maximum acceptable {max}".format(
length=len(value), max=max_length),
value=value,
required=required,
) | [
"def",
"certify_bytes",
"(",
"value",
",",
"min_length",
"=",
"None",
",",
"max_length",
"=",
"None",
",",
"required",
"=",
"True",
")",
":",
"certify_params",
"(",
"(",
"_certify_int_param",
",",
"'min_value'",
",",
"min_length",
",",
"dict",
"(",
"negative",
"=",
"False",
",",
"required",
"=",
"False",
")",
")",
",",
"(",
"_certify_int_param",
",",
"'max_value'",
",",
"max_length",
",",
"dict",
"(",
"negative",
"=",
"False",
",",
"required",
"=",
"False",
")",
")",
",",
")",
"if",
"certify_required",
"(",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
":",
"return",
"if",
"not",
"isinstance",
"(",
"value",
",",
"six",
".",
"binary_type",
")",
":",
"raise",
"CertifierTypeError",
"(",
"message",
"=",
"\"expected byte string, but value is of type {cls!r}\"",
".",
"format",
"(",
"cls",
"=",
"value",
".",
"__class__",
".",
"__name__",
")",
",",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
"if",
"min_length",
"is",
"not",
"None",
"and",
"len",
"(",
"value",
")",
"<",
"min_length",
":",
"raise",
"CertifierValueError",
"(",
"message",
"=",
"\"{length} is shorter than minimum acceptable {min}\"",
".",
"format",
"(",
"length",
"=",
"len",
"(",
"value",
")",
",",
"min",
"=",
"min_length",
")",
",",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
"if",
"max_length",
"is",
"not",
"None",
"and",
"len",
"(",
"value",
")",
">",
"max_length",
":",
"raise",
"CertifierValueError",
"(",
"message",
"=",
"\"{length} is longer than maximum acceptable {max}\"",
".",
"format",
"(",
"length",
"=",
"len",
"(",
"value",
")",
",",
"max",
"=",
"max_length",
")",
",",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")"
] | Certifier for bytestring values.
Should not be used for certifying human readable strings, Please use `certify_string` instead.
:param bytes|str value:
The string to be certified.
:param int min_length:
The minimum length of the string.
:param int max_length:
The maximum acceptable length for the string. By default, the length is
not checked.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The value is invalid | [
"Certifier",
"for",
"bytestring",
"values",
"."
] | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/core.py#L337-L390 |
250,745 | sys-git/certifiable | certifiable/core.py | certify_enum | def certify_enum(value, kind=None, required=True):
"""
Certifier for enum.
:param value:
The value to be certified.
:param kind:
The enum type that value should be an instance of.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
"""
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, kind):
raise CertifierTypeError(
message="expected {expected!r}, but value is of type {actual!r}".format(
expected=kind.__name__, actual=value.__class__.__name__),
value=value,
required=required,
) | python | def certify_enum(value, kind=None, required=True):
"""
Certifier for enum.
:param value:
The value to be certified.
:param kind:
The enum type that value should be an instance of.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
"""
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, kind):
raise CertifierTypeError(
message="expected {expected!r}, but value is of type {actual!r}".format(
expected=kind.__name__, actual=value.__class__.__name__),
value=value,
required=required,
) | [
"def",
"certify_enum",
"(",
"value",
",",
"kind",
"=",
"None",
",",
"required",
"=",
"True",
")",
":",
"if",
"certify_required",
"(",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
":",
"return",
"if",
"not",
"isinstance",
"(",
"value",
",",
"kind",
")",
":",
"raise",
"CertifierTypeError",
"(",
"message",
"=",
"\"expected {expected!r}, but value is of type {actual!r}\"",
".",
"format",
"(",
"expected",
"=",
"kind",
".",
"__name__",
",",
"actual",
"=",
"value",
".",
"__class__",
".",
"__name__",
")",
",",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")"
] | Certifier for enum.
:param value:
The value to be certified.
:param kind:
The enum type that value should be an instance of.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid | [
"Certifier",
"for",
"enum",
"."
] | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/core.py#L394-L419 |
250,746 | sys-git/certifiable | certifiable/core.py | certify_enum_value | def certify_enum_value(value, kind=None, required=True):
"""
Certifier for enum values.
:param value:
The value to be certified.
:param kind:
The enum type that value should be an instance of.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierValueError:
The type is invalid
"""
if certify_required(
value=value,
required=required,
):
return
try:
kind(value)
except: # noqa
raise CertifierValueError(
message="value {value!r} is not a valid member of {enum!r}".format(
value=value, enum=kind.__name__),
value=value,
required=required,
) | python | def certify_enum_value(value, kind=None, required=True):
"""
Certifier for enum values.
:param value:
The value to be certified.
:param kind:
The enum type that value should be an instance of.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierValueError:
The type is invalid
"""
if certify_required(
value=value,
required=required,
):
return
try:
kind(value)
except: # noqa
raise CertifierValueError(
message="value {value!r} is not a valid member of {enum!r}".format(
value=value, enum=kind.__name__),
value=value,
required=required,
) | [
"def",
"certify_enum_value",
"(",
"value",
",",
"kind",
"=",
"None",
",",
"required",
"=",
"True",
")",
":",
"if",
"certify_required",
"(",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
":",
"return",
"try",
":",
"kind",
"(",
"value",
")",
"except",
":",
"# noqa",
"raise",
"CertifierValueError",
"(",
"message",
"=",
"\"value {value!r} is not a valid member of {enum!r}\"",
".",
"format",
"(",
"value",
"=",
"value",
",",
"enum",
"=",
"kind",
".",
"__name__",
")",
",",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")"
] | Certifier for enum values.
:param value:
The value to be certified.
:param kind:
The enum type that value should be an instance of.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierValueError:
The type is invalid | [
"Certifier",
"for",
"enum",
"values",
"."
] | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/core.py#L423-L450 |
250,747 | sys-git/certifiable | certifiable/core.py | certify_object | def certify_object(value, kind=None, required=True):
"""
Certifier for class object.
:param object value:
The object to certify.
:param object kind:
The type of the model that the value is expected to evaluate to.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The value is invalid
"""
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, kind):
try:
name = value.__class__.__name__
except: # noqa # pragma: no cover
name = type(value).__name__
try:
expected = kind.__class__.__name__
except: # noqa # pragma: no cover
expected = type(kind).__name__
raise CertifierValueError(
message="Expected object {expected!r}, but got {actual!r}".format(
expected=expected, actual=name),
value=value,
required=required,
) | python | def certify_object(value, kind=None, required=True):
"""
Certifier for class object.
:param object value:
The object to certify.
:param object kind:
The type of the model that the value is expected to evaluate to.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The value is invalid
"""
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, kind):
try:
name = value.__class__.__name__
except: # noqa # pragma: no cover
name = type(value).__name__
try:
expected = kind.__class__.__name__
except: # noqa # pragma: no cover
expected = type(kind).__name__
raise CertifierValueError(
message="Expected object {expected!r}, but got {actual!r}".format(
expected=expected, actual=name),
value=value,
required=required,
) | [
"def",
"certify_object",
"(",
"value",
",",
"kind",
"=",
"None",
",",
"required",
"=",
"True",
")",
":",
"if",
"certify_required",
"(",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
":",
"return",
"if",
"not",
"isinstance",
"(",
"value",
",",
"kind",
")",
":",
"try",
":",
"name",
"=",
"value",
".",
"__class__",
".",
"__name__",
"except",
":",
"# noqa # pragma: no cover",
"name",
"=",
"type",
"(",
"value",
")",
".",
"__name__",
"try",
":",
"expected",
"=",
"kind",
".",
"__class__",
".",
"__name__",
"except",
":",
"# noqa # pragma: no cover",
"expected",
"=",
"type",
"(",
"kind",
")",
".",
"__name__",
"raise",
"CertifierValueError",
"(",
"message",
"=",
"\"Expected object {expected!r}, but got {actual!r}\"",
".",
"format",
"(",
"expected",
"=",
"expected",
",",
"actual",
"=",
"name",
")",
",",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")"
] | Certifier for class object.
:param object value:
The object to certify.
:param object kind:
The type of the model that the value is expected to evaluate to.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid
:raises CertifierValueError:
The value is invalid | [
"Certifier",
"for",
"class",
"object",
"."
] | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/core.py#L454-L491 |
250,748 | sys-git/certifiable | certifiable/core.py | certify_time | def certify_time(value, required=True):
"""
Certifier for datetime.time values.
:param value:
The value to be certified.
:param bool required:
Whether the value can be `None` Defaults to True.
:raises CertifierTypeError:
The type is invalid
"""
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, time):
raise CertifierTypeError(
message="expected timestamp (time), but value is of type {cls!r}".format(
cls=value.__class__.__name__),
value=value,
required=required,
) | python | def certify_time(value, required=True):
"""
Certifier for datetime.time values.
:param value:
The value to be certified.
:param bool required:
Whether the value can be `None` Defaults to True.
:raises CertifierTypeError:
The type is invalid
"""
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, time):
raise CertifierTypeError(
message="expected timestamp (time), but value is of type {cls!r}".format(
cls=value.__class__.__name__),
value=value,
required=required,
) | [
"def",
"certify_time",
"(",
"value",
",",
"required",
"=",
"True",
")",
":",
"if",
"certify_required",
"(",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")",
":",
"return",
"if",
"not",
"isinstance",
"(",
"value",
",",
"time",
")",
":",
"raise",
"CertifierTypeError",
"(",
"message",
"=",
"\"expected timestamp (time), but value is of type {cls!r}\"",
".",
"format",
"(",
"cls",
"=",
"value",
".",
"__class__",
".",
"__name__",
")",
",",
"value",
"=",
"value",
",",
"required",
"=",
"required",
",",
")"
] | Certifier for datetime.time values.
:param value:
The value to be certified.
:param bool required:
Whether the value can be `None` Defaults to True.
:raises CertifierTypeError:
The type is invalid | [
"Certifier",
"for",
"datetime",
".",
"time",
"values",
"."
] | a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8 | https://github.com/sys-git/certifiable/blob/a3c33c0d4f3ac2c53be9eded3fae633fa5f697f8/certifiable/core.py#L549-L572 |
250,749 | jeremylow/pyshk | pyshk/models.py | User.AsDict | def AsDict(self, dt=True):
"""
A dict representation of this User instance.
The return value uses the same key names as the JSON representation.
Args:
dt (bool): If True, return dates as python datetime objects. If
False, return dates as ISO strings.
Return:
A dict representing this User instance
"""
data = {}
if self.name:
data['name'] = self.name
data['mlkshk_url'] = self.mlkshk_url
if self.profile_image_url:
data['profile_image_url'] = self.profile_image_url
if self.id:
data['id'] = self.id
if self.about:
data['about'] = self.about
if self.website:
data['website'] = self.website
if self.shakes:
data['shakes'] = [shk.AsDict(dt=dt) for shk in self.shakes]
data['shake_count'] = self.shake_count
return data | python | def AsDict(self, dt=True):
"""
A dict representation of this User instance.
The return value uses the same key names as the JSON representation.
Args:
dt (bool): If True, return dates as python datetime objects. If
False, return dates as ISO strings.
Return:
A dict representing this User instance
"""
data = {}
if self.name:
data['name'] = self.name
data['mlkshk_url'] = self.mlkshk_url
if self.profile_image_url:
data['profile_image_url'] = self.profile_image_url
if self.id:
data['id'] = self.id
if self.about:
data['about'] = self.about
if self.website:
data['website'] = self.website
if self.shakes:
data['shakes'] = [shk.AsDict(dt=dt) for shk in self.shakes]
data['shake_count'] = self.shake_count
return data | [
"def",
"AsDict",
"(",
"self",
",",
"dt",
"=",
"True",
")",
":",
"data",
"=",
"{",
"}",
"if",
"self",
".",
"name",
":",
"data",
"[",
"'name'",
"]",
"=",
"self",
".",
"name",
"data",
"[",
"'mlkshk_url'",
"]",
"=",
"self",
".",
"mlkshk_url",
"if",
"self",
".",
"profile_image_url",
":",
"data",
"[",
"'profile_image_url'",
"]",
"=",
"self",
".",
"profile_image_url",
"if",
"self",
".",
"id",
":",
"data",
"[",
"'id'",
"]",
"=",
"self",
".",
"id",
"if",
"self",
".",
"about",
":",
"data",
"[",
"'about'",
"]",
"=",
"self",
".",
"about",
"if",
"self",
".",
"website",
":",
"data",
"[",
"'website'",
"]",
"=",
"self",
".",
"website",
"if",
"self",
".",
"shakes",
":",
"data",
"[",
"'shakes'",
"]",
"=",
"[",
"shk",
".",
"AsDict",
"(",
"dt",
"=",
"dt",
")",
"for",
"shk",
"in",
"self",
".",
"shakes",
"]",
"data",
"[",
"'shake_count'",
"]",
"=",
"self",
".",
"shake_count",
"return",
"data"
] | A dict representation of this User instance.
The return value uses the same key names as the JSON representation.
Args:
dt (bool): If True, return dates as python datetime objects. If
False, return dates as ISO strings.
Return:
A dict representing this User instance | [
"A",
"dict",
"representation",
"of",
"this",
"User",
"instance",
"."
] | 3ab92f6706397cde7a18367266eba9e0f1ada868 | https://github.com/jeremylow/pyshk/blob/3ab92f6706397cde7a18367266eba9e0f1ada868/pyshk/models.py#L51-L79 |
250,750 | jeremylow/pyshk | pyshk/models.py | User.AsJsonString | def AsJsonString(self):
"""A JSON string representation of this User instance.
Returns:
A JSON string representation of this User instance
"""
return json.dumps(self.AsDict(dt=False), sort_keys=True) | python | def AsJsonString(self):
"""A JSON string representation of this User instance.
Returns:
A JSON string representation of this User instance
"""
return json.dumps(self.AsDict(dt=False), sort_keys=True) | [
"def",
"AsJsonString",
"(",
"self",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"self",
".",
"AsDict",
"(",
"dt",
"=",
"False",
")",
",",
"sort_keys",
"=",
"True",
")"
] | A JSON string representation of this User instance.
Returns:
A JSON string representation of this User instance | [
"A",
"JSON",
"string",
"representation",
"of",
"this",
"User",
"instance",
"."
] | 3ab92f6706397cde7a18367266eba9e0f1ada868 | https://github.com/jeremylow/pyshk/blob/3ab92f6706397cde7a18367266eba9e0f1ada868/pyshk/models.py#L81-L87 |
250,751 | jeremylow/pyshk | pyshk/models.py | User.NewFromJSON | def NewFromJSON(data):
"""
Create a new User instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a user.
Returns:
A User instance.
"""
if data.get('shakes', None):
shakes = [Shake.NewFromJSON(shk) for shk in data.get('shakes')]
else:
shakes = None
return User(
id=data.get('id', None),
name=data.get('name', None),
profile_image_url=data.get('profile_image_url', None),
about=data.get('about', None),
website=data.get('website', None),
shakes=shakes) | python | def NewFromJSON(data):
"""
Create a new User instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a user.
Returns:
A User instance.
"""
if data.get('shakes', None):
shakes = [Shake.NewFromJSON(shk) for shk in data.get('shakes')]
else:
shakes = None
return User(
id=data.get('id', None),
name=data.get('name', None),
profile_image_url=data.get('profile_image_url', None),
about=data.get('about', None),
website=data.get('website', None),
shakes=shakes) | [
"def",
"NewFromJSON",
"(",
"data",
")",
":",
"if",
"data",
".",
"get",
"(",
"'shakes'",
",",
"None",
")",
":",
"shakes",
"=",
"[",
"Shake",
".",
"NewFromJSON",
"(",
"shk",
")",
"for",
"shk",
"in",
"data",
".",
"get",
"(",
"'shakes'",
")",
"]",
"else",
":",
"shakes",
"=",
"None",
"return",
"User",
"(",
"id",
"=",
"data",
".",
"get",
"(",
"'id'",
",",
"None",
")",
",",
"name",
"=",
"data",
".",
"get",
"(",
"'name'",
",",
"None",
")",
",",
"profile_image_url",
"=",
"data",
".",
"get",
"(",
"'profile_image_url'",
",",
"None",
")",
",",
"about",
"=",
"data",
".",
"get",
"(",
"'about'",
",",
"None",
")",
",",
"website",
"=",
"data",
".",
"get",
"(",
"'website'",
",",
"None",
")",
",",
"shakes",
"=",
"shakes",
")"
] | Create a new User instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a user.
Returns:
A User instance. | [
"Create",
"a",
"new",
"User",
"instance",
"from",
"a",
"JSON",
"dict",
"."
] | 3ab92f6706397cde7a18367266eba9e0f1ada868 | https://github.com/jeremylow/pyshk/blob/3ab92f6706397cde7a18367266eba9e0f1ada868/pyshk/models.py#L90-L111 |
250,752 | jeremylow/pyshk | pyshk/models.py | Comment.AsDict | def AsDict(self, dt=True):
"""
A dict representation of this Comment instance.
The return value uses the same key names as the JSON representation.
Args:
dt (bool): If True, return dates as python datetime objects. If
False, return dates as ISO strings.
Return:
A dict representing this Comment instance
"""
data = {}
if self.body:
data['body'] = self.body
if self.posted_at:
data['posted_at'] = self.posted_at
if self.user:
data['user'] = self.user.AsDict()
return data | python | def AsDict(self, dt=True):
"""
A dict representation of this Comment instance.
The return value uses the same key names as the JSON representation.
Args:
dt (bool): If True, return dates as python datetime objects. If
False, return dates as ISO strings.
Return:
A dict representing this Comment instance
"""
data = {}
if self.body:
data['body'] = self.body
if self.posted_at:
data['posted_at'] = self.posted_at
if self.user:
data['user'] = self.user.AsDict()
return data | [
"def",
"AsDict",
"(",
"self",
",",
"dt",
"=",
"True",
")",
":",
"data",
"=",
"{",
"}",
"if",
"self",
".",
"body",
":",
"data",
"[",
"'body'",
"]",
"=",
"self",
".",
"body",
"if",
"self",
".",
"posted_at",
":",
"data",
"[",
"'posted_at'",
"]",
"=",
"self",
".",
"posted_at",
"if",
"self",
".",
"user",
":",
"data",
"[",
"'user'",
"]",
"=",
"self",
".",
"user",
".",
"AsDict",
"(",
")",
"return",
"data"
] | A dict representation of this Comment instance.
The return value uses the same key names as the JSON representation.
Args:
dt (bool): If True, return dates as python datetime objects. If
False, return dates as ISO strings.
Return:
A dict representing this Comment instance | [
"A",
"dict",
"representation",
"of",
"this",
"Comment",
"instance",
"."
] | 3ab92f6706397cde7a18367266eba9e0f1ada868 | https://github.com/jeremylow/pyshk/blob/3ab92f6706397cde7a18367266eba9e0f1ada868/pyshk/models.py#L159-L181 |
250,753 | jeremylow/pyshk | pyshk/models.py | Comment.NewFromJSON | def NewFromJSON(data):
"""
Create a new Comment instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a Comment.
Returns:
A Comment instance.
"""
return Comment(
body=data.get('body', None),
posted_at=data.get('posted_at', None),
user=User.NewFromJSON(data.get('user', None))
) | python | def NewFromJSON(data):
"""
Create a new Comment instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a Comment.
Returns:
A Comment instance.
"""
return Comment(
body=data.get('body', None),
posted_at=data.get('posted_at', None),
user=User.NewFromJSON(data.get('user', None))
) | [
"def",
"NewFromJSON",
"(",
"data",
")",
":",
"return",
"Comment",
"(",
"body",
"=",
"data",
".",
"get",
"(",
"'body'",
",",
"None",
")",
",",
"posted_at",
"=",
"data",
".",
"get",
"(",
"'posted_at'",
",",
"None",
")",
",",
"user",
"=",
"User",
".",
"NewFromJSON",
"(",
"data",
".",
"get",
"(",
"'user'",
",",
"None",
")",
")",
")"
] | Create a new Comment instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a Comment.
Returns:
A Comment instance. | [
"Create",
"a",
"new",
"Comment",
"instance",
"from",
"a",
"JSON",
"dict",
"."
] | 3ab92f6706397cde7a18367266eba9e0f1ada868 | https://github.com/jeremylow/pyshk/blob/3ab92f6706397cde7a18367266eba9e0f1ada868/pyshk/models.py#L193-L207 |
250,754 | jeremylow/pyshk | pyshk/models.py | Shake.NewFromJSON | def NewFromJSON(data):
"""
Create a new Shake instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a Shake.
Returns:
A Shake instance.
"""
s = Shake(
id=data.get('id', None),
name=data.get('name', None),
url=data.get('url', None),
thumbnail_url=data.get('thumbnail_url', None),
description=data.get('description', None),
type=data.get('type', None),
created_at=data.get('created_at', None),
updated_at=data.get('updated_at', None)
)
if data.get('owner', None):
s.owner = User.NewFromJSON(data.get('owner', None))
return s | python | def NewFromJSON(data):
"""
Create a new Shake instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a Shake.
Returns:
A Shake instance.
"""
s = Shake(
id=data.get('id', None),
name=data.get('name', None),
url=data.get('url', None),
thumbnail_url=data.get('thumbnail_url', None),
description=data.get('description', None),
type=data.get('type', None),
created_at=data.get('created_at', None),
updated_at=data.get('updated_at', None)
)
if data.get('owner', None):
s.owner = User.NewFromJSON(data.get('owner', None))
return s | [
"def",
"NewFromJSON",
"(",
"data",
")",
":",
"s",
"=",
"Shake",
"(",
"id",
"=",
"data",
".",
"get",
"(",
"'id'",
",",
"None",
")",
",",
"name",
"=",
"data",
".",
"get",
"(",
"'name'",
",",
"None",
")",
",",
"url",
"=",
"data",
".",
"get",
"(",
"'url'",
",",
"None",
")",
",",
"thumbnail_url",
"=",
"data",
".",
"get",
"(",
"'thumbnail_url'",
",",
"None",
")",
",",
"description",
"=",
"data",
".",
"get",
"(",
"'description'",
",",
"None",
")",
",",
"type",
"=",
"data",
".",
"get",
"(",
"'type'",
",",
"None",
")",
",",
"created_at",
"=",
"data",
".",
"get",
"(",
"'created_at'",
",",
"None",
")",
",",
"updated_at",
"=",
"data",
".",
"get",
"(",
"'updated_at'",
",",
"None",
")",
")",
"if",
"data",
".",
"get",
"(",
"'owner'",
",",
"None",
")",
":",
"s",
".",
"owner",
"=",
"User",
".",
"NewFromJSON",
"(",
"data",
".",
"get",
"(",
"'owner'",
",",
"None",
")",
")",
"return",
"s"
] | Create a new Shake instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a Shake.
Returns:
A Shake instance. | [
"Create",
"a",
"new",
"Shake",
"instance",
"from",
"a",
"JSON",
"dict",
"."
] | 3ab92f6706397cde7a18367266eba9e0f1ada868 | https://github.com/jeremylow/pyshk/blob/3ab92f6706397cde7a18367266eba9e0f1ada868/pyshk/models.py#L332-L354 |
250,755 | jeremylow/pyshk | pyshk/models.py | SharedFile.NewFromJSON | def NewFromJSON(data):
"""
Create a new SharedFile instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a SharedFile.
Returns:
A SharedFile instance.
"""
return SharedFile(
sharekey=data.get('sharekey', None),
name=data.get('name', None),
user=User.NewFromJSON(data.get('user', None)),
title=data.get('title', None),
description=data.get('description', None),
posted_at=data.get('posted_at', None),
permalink=data.get('permalink', None),
width=data.get('width', None),
height=data.get('height', None),
views=data.get('views', 0),
likes=data.get('likes', 0),
saves=data.get('saves', 0),
comments=data.get('comments', None),
nsfw=data.get('nsfw', False),
image_url=data.get('image_url', None),
source_url=data.get('source_url', None),
saved=data.get('saved', False),
liked=data.get('liked', False),
) | python | def NewFromJSON(data):
"""
Create a new SharedFile instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a SharedFile.
Returns:
A SharedFile instance.
"""
return SharedFile(
sharekey=data.get('sharekey', None),
name=data.get('name', None),
user=User.NewFromJSON(data.get('user', None)),
title=data.get('title', None),
description=data.get('description', None),
posted_at=data.get('posted_at', None),
permalink=data.get('permalink', None),
width=data.get('width', None),
height=data.get('height', None),
views=data.get('views', 0),
likes=data.get('likes', 0),
saves=data.get('saves', 0),
comments=data.get('comments', None),
nsfw=data.get('nsfw', False),
image_url=data.get('image_url', None),
source_url=data.get('source_url', None),
saved=data.get('saved', False),
liked=data.get('liked', False),
) | [
"def",
"NewFromJSON",
"(",
"data",
")",
":",
"return",
"SharedFile",
"(",
"sharekey",
"=",
"data",
".",
"get",
"(",
"'sharekey'",
",",
"None",
")",
",",
"name",
"=",
"data",
".",
"get",
"(",
"'name'",
",",
"None",
")",
",",
"user",
"=",
"User",
".",
"NewFromJSON",
"(",
"data",
".",
"get",
"(",
"'user'",
",",
"None",
")",
")",
",",
"title",
"=",
"data",
".",
"get",
"(",
"'title'",
",",
"None",
")",
",",
"description",
"=",
"data",
".",
"get",
"(",
"'description'",
",",
"None",
")",
",",
"posted_at",
"=",
"data",
".",
"get",
"(",
"'posted_at'",
",",
"None",
")",
",",
"permalink",
"=",
"data",
".",
"get",
"(",
"'permalink'",
",",
"None",
")",
",",
"width",
"=",
"data",
".",
"get",
"(",
"'width'",
",",
"None",
")",
",",
"height",
"=",
"data",
".",
"get",
"(",
"'height'",
",",
"None",
")",
",",
"views",
"=",
"data",
".",
"get",
"(",
"'views'",
",",
"0",
")",
",",
"likes",
"=",
"data",
".",
"get",
"(",
"'likes'",
",",
"0",
")",
",",
"saves",
"=",
"data",
".",
"get",
"(",
"'saves'",
",",
"0",
")",
",",
"comments",
"=",
"data",
".",
"get",
"(",
"'comments'",
",",
"None",
")",
",",
"nsfw",
"=",
"data",
".",
"get",
"(",
"'nsfw'",
",",
"False",
")",
",",
"image_url",
"=",
"data",
".",
"get",
"(",
"'image_url'",
",",
"None",
")",
",",
"source_url",
"=",
"data",
".",
"get",
"(",
"'source_url'",
",",
"None",
")",
",",
"saved",
"=",
"data",
".",
"get",
"(",
"'saved'",
",",
"False",
")",
",",
"liked",
"=",
"data",
".",
"get",
"(",
"'liked'",
",",
"False",
")",
",",
")"
] | Create a new SharedFile instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a SharedFile.
Returns:
A SharedFile instance. | [
"Create",
"a",
"new",
"SharedFile",
"instance",
"from",
"a",
"JSON",
"dict",
"."
] | 3ab92f6706397cde7a18367266eba9e0f1ada868 | https://github.com/jeremylow/pyshk/blob/3ab92f6706397cde7a18367266eba9e0f1ada868/pyshk/models.py#L522-L551 |
250,756 | dcramer/peek | peek/collector.py | Collector._start_tracer | def _start_tracer(self, origin):
"""
Start a new Tracer object, and store it in self.tracers.
"""
tracer = self._tracer_class(log=self.log)
tracer.data = self.data
fn = tracer.start(origin)
self.tracers.append(tracer)
return fn | python | def _start_tracer(self, origin):
"""
Start a new Tracer object, and store it in self.tracers.
"""
tracer = self._tracer_class(log=self.log)
tracer.data = self.data
fn = tracer.start(origin)
self.tracers.append(tracer)
return fn | [
"def",
"_start_tracer",
"(",
"self",
",",
"origin",
")",
":",
"tracer",
"=",
"self",
".",
"_tracer_class",
"(",
"log",
"=",
"self",
".",
"log",
")",
"tracer",
".",
"data",
"=",
"self",
".",
"data",
"fn",
"=",
"tracer",
".",
"start",
"(",
"origin",
")",
"self",
".",
"tracers",
".",
"append",
"(",
"tracer",
")",
"return",
"fn"
] | Start a new Tracer object, and store it in self.tracers. | [
"Start",
"a",
"new",
"Tracer",
"object",
"and",
"store",
"it",
"in",
"self",
".",
"tracers",
"."
] | da7c086660fc870c6632c4dc5ccb2ff9bfbee52e | https://github.com/dcramer/peek/blob/da7c086660fc870c6632c4dc5ccb2ff9bfbee52e/peek/collector.py#L22-L30 |
250,757 | dcramer/peek | peek/collector.py | Collector.start | def start(self):
"""
Start collecting trace information.
"""
origin = inspect.stack()[1][0]
self.reset()
# Install the tracer on this thread.
self._start_tracer(origin) | python | def start(self):
"""
Start collecting trace information.
"""
origin = inspect.stack()[1][0]
self.reset()
# Install the tracer on this thread.
self._start_tracer(origin) | [
"def",
"start",
"(",
"self",
")",
":",
"origin",
"=",
"inspect",
".",
"stack",
"(",
")",
"[",
"1",
"]",
"[",
"0",
"]",
"self",
".",
"reset",
"(",
")",
"# Install the tracer on this thread.",
"self",
".",
"_start_tracer",
"(",
"origin",
")"
] | Start collecting trace information. | [
"Start",
"collecting",
"trace",
"information",
"."
] | da7c086660fc870c6632c4dc5ccb2ff9bfbee52e | https://github.com/dcramer/peek/blob/da7c086660fc870c6632c4dc5ccb2ff9bfbee52e/peek/collector.py#L56-L65 |
250,758 | emilssolmanis/tapes | tapes/registry.py | Registry.gauge | def gauge(self, name, producer):
"""Creates or gets an existing gauge.
:param name: The name
:return: The created or existing gauge for the given name
"""
return self._get_or_add_stat(name, functools.partial(Gauge, producer)) | python | def gauge(self, name, producer):
"""Creates or gets an existing gauge.
:param name: The name
:return: The created or existing gauge for the given name
"""
return self._get_or_add_stat(name, functools.partial(Gauge, producer)) | [
"def",
"gauge",
"(",
"self",
",",
"name",
",",
"producer",
")",
":",
"return",
"self",
".",
"_get_or_add_stat",
"(",
"name",
",",
"functools",
".",
"partial",
"(",
"Gauge",
",",
"producer",
")",
")"
] | Creates or gets an existing gauge.
:param name: The name
:return: The created or existing gauge for the given name | [
"Creates",
"or",
"gets",
"an",
"existing",
"gauge",
"."
] | 7797fc9ebcb359cb1ba5085570e3cab5ebcd1d3c | https://github.com/emilssolmanis/tapes/blob/7797fc9ebcb359cb1ba5085570e3cab5ebcd1d3c/tapes/registry.py#L83-L89 |
250,759 | emilssolmanis/tapes | tapes/registry.py | Registry.get_stats | def get_stats(self):
"""Retrieves the current values of the metrics associated with this registry, formatted as a dict.
The metrics form a hierarchy, their names are split on '.'. The returned dict is an `addict`, so you can
use it as either a regular dict or via attributes, e.g.,
>>> import tapes
>>> registry = tapes.Registry()
>>> timer = registry.timer('my.timer')
>>> stats = registry.get_stats()
>>> print(stats['my']['timer']['count'])
0
>>> print(stats.my.timer.count)
0
:return: The values of the metrics associated with this registry
"""
def _get_value(stats):
try:
return Dict((k, _get_value(v)) for k, v in stats.items())
except AttributeError:
return Dict(stats.get_values())
return _get_value(self.stats) | python | def get_stats(self):
"""Retrieves the current values of the metrics associated with this registry, formatted as a dict.
The metrics form a hierarchy, their names are split on '.'. The returned dict is an `addict`, so you can
use it as either a regular dict or via attributes, e.g.,
>>> import tapes
>>> registry = tapes.Registry()
>>> timer = registry.timer('my.timer')
>>> stats = registry.get_stats()
>>> print(stats['my']['timer']['count'])
0
>>> print(stats.my.timer.count)
0
:return: The values of the metrics associated with this registry
"""
def _get_value(stats):
try:
return Dict((k, _get_value(v)) for k, v in stats.items())
except AttributeError:
return Dict(stats.get_values())
return _get_value(self.stats) | [
"def",
"get_stats",
"(",
"self",
")",
":",
"def",
"_get_value",
"(",
"stats",
")",
":",
"try",
":",
"return",
"Dict",
"(",
"(",
"k",
",",
"_get_value",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"stats",
".",
"items",
"(",
")",
")",
"except",
"AttributeError",
":",
"return",
"Dict",
"(",
"stats",
".",
"get_values",
"(",
")",
")",
"return",
"_get_value",
"(",
"self",
".",
"stats",
")"
] | Retrieves the current values of the metrics associated with this registry, formatted as a dict.
The metrics form a hierarchy, their names are split on '.'. The returned dict is an `addict`, so you can
use it as either a regular dict or via attributes, e.g.,
>>> import tapes
>>> registry = tapes.Registry()
>>> timer = registry.timer('my.timer')
>>> stats = registry.get_stats()
>>> print(stats['my']['timer']['count'])
0
>>> print(stats.my.timer.count)
0
:return: The values of the metrics associated with this registry | [
"Retrieves",
"the",
"current",
"values",
"of",
"the",
"metrics",
"associated",
"with",
"this",
"registry",
"formatted",
"as",
"a",
"dict",
"."
] | 7797fc9ebcb359cb1ba5085570e3cab5ebcd1d3c | https://github.com/emilssolmanis/tapes/blob/7797fc9ebcb359cb1ba5085570e3cab5ebcd1d3c/tapes/registry.py#L107-L130 |
250,760 | FujiMakoto/IPS-Vagrant | ips_vagrant/downloaders/dev_tools.py | DevToolsManager._populate_ips_versions | def _populate_ips_versions(self):
"""
Populate IPS version data for mapping
@return:
"""
# Get a map of version ID's from our most recent IPS version
ips = IpsManager(self.ctx)
ips = ips.dev_version or ips.latest
with ZipFile(ips.filepath) as zip:
namelist = zip.namelist()
ips_versions_path = os.path.join(namelist[0], 'applications/core/data/versions.json')
if ips_versions_path not in namelist:
raise BadZipfile('Missing versions.json file')
self.ips_versions = json.loads(zip.read(ips_versions_path), object_pairs_hook=OrderedDict)
self.log.debug("%d version ID's loaded from latest IPS release", len(self.ips_versions)) | python | def _populate_ips_versions(self):
"""
Populate IPS version data for mapping
@return:
"""
# Get a map of version ID's from our most recent IPS version
ips = IpsManager(self.ctx)
ips = ips.dev_version or ips.latest
with ZipFile(ips.filepath) as zip:
namelist = zip.namelist()
ips_versions_path = os.path.join(namelist[0], 'applications/core/data/versions.json')
if ips_versions_path not in namelist:
raise BadZipfile('Missing versions.json file')
self.ips_versions = json.loads(zip.read(ips_versions_path), object_pairs_hook=OrderedDict)
self.log.debug("%d version ID's loaded from latest IPS release", len(self.ips_versions)) | [
"def",
"_populate_ips_versions",
"(",
"self",
")",
":",
"# Get a map of version ID's from our most recent IPS version",
"ips",
"=",
"IpsManager",
"(",
"self",
".",
"ctx",
")",
"ips",
"=",
"ips",
".",
"dev_version",
"or",
"ips",
".",
"latest",
"with",
"ZipFile",
"(",
"ips",
".",
"filepath",
")",
"as",
"zip",
":",
"namelist",
"=",
"zip",
".",
"namelist",
"(",
")",
"ips_versions_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"namelist",
"[",
"0",
"]",
",",
"'applications/core/data/versions.json'",
")",
"if",
"ips_versions_path",
"not",
"in",
"namelist",
":",
"raise",
"BadZipfile",
"(",
"'Missing versions.json file'",
")",
"self",
".",
"ips_versions",
"=",
"json",
".",
"loads",
"(",
"zip",
".",
"read",
"(",
"ips_versions_path",
")",
",",
"object_pairs_hook",
"=",
"OrderedDict",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"%d version ID's loaded from latest IPS release\"",
",",
"len",
"(",
"self",
".",
"ips_versions",
")",
")"
] | Populate IPS version data for mapping
@return: | [
"Populate",
"IPS",
"version",
"data",
"for",
"mapping"
] | 7b1d6d095034dd8befb026d9315ecc6494d52269 | https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/downloaders/dev_tools.py#L35-L50 |
250,761 | marteinn/genres | genres/db.py | Db.load | def load(data_path):
"""
Extract data from provided file and return it as a string.
"""
with open(data_path, "r") as data_file:
raw_data = data_file.read()
data_file.close()
return raw_data | python | def load(data_path):
"""
Extract data from provided file and return it as a string.
"""
with open(data_path, "r") as data_file:
raw_data = data_file.read()
data_file.close()
return raw_data | [
"def",
"load",
"(",
"data_path",
")",
":",
"with",
"open",
"(",
"data_path",
",",
"\"r\"",
")",
"as",
"data_file",
":",
"raw_data",
"=",
"data_file",
".",
"read",
"(",
")",
"data_file",
".",
"close",
"(",
")",
"return",
"raw_data"
] | Extract data from provided file and return it as a string. | [
"Extract",
"data",
"from",
"provided",
"file",
"and",
"return",
"it",
"as",
"a",
"string",
"."
] | 4bbc90f7c2c527631380c08b4d99a4e40abed955 | https://github.com/marteinn/genres/blob/4bbc90f7c2c527631380c08b4d99a4e40abed955/genres/db.py#L28-L36 |
250,762 | marteinn/genres | genres/db.py | Db.parse | def parse(self, data):
"""
Split and iterate through the datafile to extract genres, tags
and points.
"""
categories = data.split("\n\n")
reference = {}
reference_points = {}
genre_index = []
tag_index = []
for category in categories:
entries = category.strip().split("\n")
entry_category, entry_points = self._parse_entry(entries[0].lower())
if entry_category.startswith("#"):
continue
for entry in entries:
entry = entry.lower()
if not entry:
continue
# Comment, ignore
if entry.startswith("#"):
continue
# Handle genre
if not entry.startswith("-"):
genre, points = self._parse_entry(entry)
reference[genre] = entry_category
reference_points[genre] = points
genre_index.append(genre)
# Handle tag
else:
tag = entry[1:]
tag, points = self._parse_entry(tag, limit=9.5)
reference[tag] = entry_category
reference_points[tag] = points
tag_index.append(tag)
self.reference = reference
self.genres = genre_index
self.tags = tag_index
self.points = reference_points | python | def parse(self, data):
"""
Split and iterate through the datafile to extract genres, tags
and points.
"""
categories = data.split("\n\n")
reference = {}
reference_points = {}
genre_index = []
tag_index = []
for category in categories:
entries = category.strip().split("\n")
entry_category, entry_points = self._parse_entry(entries[0].lower())
if entry_category.startswith("#"):
continue
for entry in entries:
entry = entry.lower()
if not entry:
continue
# Comment, ignore
if entry.startswith("#"):
continue
# Handle genre
if not entry.startswith("-"):
genre, points = self._parse_entry(entry)
reference[genre] = entry_category
reference_points[genre] = points
genre_index.append(genre)
# Handle tag
else:
tag = entry[1:]
tag, points = self._parse_entry(tag, limit=9.5)
reference[tag] = entry_category
reference_points[tag] = points
tag_index.append(tag)
self.reference = reference
self.genres = genre_index
self.tags = tag_index
self.points = reference_points | [
"def",
"parse",
"(",
"self",
",",
"data",
")",
":",
"categories",
"=",
"data",
".",
"split",
"(",
"\"\\n\\n\"",
")",
"reference",
"=",
"{",
"}",
"reference_points",
"=",
"{",
"}",
"genre_index",
"=",
"[",
"]",
"tag_index",
"=",
"[",
"]",
"for",
"category",
"in",
"categories",
":",
"entries",
"=",
"category",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"entry_category",
",",
"entry_points",
"=",
"self",
".",
"_parse_entry",
"(",
"entries",
"[",
"0",
"]",
".",
"lower",
"(",
")",
")",
"if",
"entry_category",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"continue",
"for",
"entry",
"in",
"entries",
":",
"entry",
"=",
"entry",
".",
"lower",
"(",
")",
"if",
"not",
"entry",
":",
"continue",
"# Comment, ignore",
"if",
"entry",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"continue",
"# Handle genre",
"if",
"not",
"entry",
".",
"startswith",
"(",
"\"-\"",
")",
":",
"genre",
",",
"points",
"=",
"self",
".",
"_parse_entry",
"(",
"entry",
")",
"reference",
"[",
"genre",
"]",
"=",
"entry_category",
"reference_points",
"[",
"genre",
"]",
"=",
"points",
"genre_index",
".",
"append",
"(",
"genre",
")",
"# Handle tag",
"else",
":",
"tag",
"=",
"entry",
"[",
"1",
":",
"]",
"tag",
",",
"points",
"=",
"self",
".",
"_parse_entry",
"(",
"tag",
",",
"limit",
"=",
"9.5",
")",
"reference",
"[",
"tag",
"]",
"=",
"entry_category",
"reference_points",
"[",
"tag",
"]",
"=",
"points",
"tag_index",
".",
"append",
"(",
"tag",
")",
"self",
".",
"reference",
"=",
"reference",
"self",
".",
"genres",
"=",
"genre_index",
"self",
".",
"tags",
"=",
"tag_index",
"self",
".",
"points",
"=",
"reference_points"
] | Split and iterate through the datafile to extract genres, tags
and points. | [
"Split",
"and",
"iterate",
"through",
"the",
"datafile",
"to",
"extract",
"genres",
"tags",
"and",
"points",
"."
] | 4bbc90f7c2c527631380c08b4d99a4e40abed955 | https://github.com/marteinn/genres/blob/4bbc90f7c2c527631380c08b4d99a4e40abed955/genres/db.py#L38-L86 |
250,763 | marteinn/genres | genres/db.py | Db._parse_entry | def _parse_entry(entry, limit=10):
"""
Finds both label and if provided, the points for ranking.
"""
entry = entry.split(",")
label = entry[0]
points = limit
if len(entry) > 1:
proc = float(entry[1].strip())
points = limit * proc
return label, int(points) | python | def _parse_entry(entry, limit=10):
"""
Finds both label and if provided, the points for ranking.
"""
entry = entry.split(",")
label = entry[0]
points = limit
if len(entry) > 1:
proc = float(entry[1].strip())
points = limit * proc
return label, int(points) | [
"def",
"_parse_entry",
"(",
"entry",
",",
"limit",
"=",
"10",
")",
":",
"entry",
"=",
"entry",
".",
"split",
"(",
"\",\"",
")",
"label",
"=",
"entry",
"[",
"0",
"]",
"points",
"=",
"limit",
"if",
"len",
"(",
"entry",
")",
">",
"1",
":",
"proc",
"=",
"float",
"(",
"entry",
"[",
"1",
"]",
".",
"strip",
"(",
")",
")",
"points",
"=",
"limit",
"*",
"proc",
"return",
"label",
",",
"int",
"(",
"points",
")"
] | Finds both label and if provided, the points for ranking. | [
"Finds",
"both",
"label",
"and",
"if",
"provided",
"the",
"points",
"for",
"ranking",
"."
] | 4bbc90f7c2c527631380c08b4d99a4e40abed955 | https://github.com/marteinn/genres/blob/4bbc90f7c2c527631380c08b4d99a4e40abed955/genres/db.py#L89-L102 |
250,764 | minhhoit/yacms | yacms/utils/sites.py | has_site_permission | def has_site_permission(user):
"""
Checks if a staff user has staff-level access for the current site.
The actual permission lookup occurs in ``SitePermissionMiddleware``
which then marks the request with the ``has_site_permission`` flag,
so that we only query the db once per request, so this function
serves as the entry point for everything else to check access. We
also fall back to an ``is_staff`` check if the middleware is not
installed, to ease migration.
"""
mw = "yacms.core.middleware.SitePermissionMiddleware"
if mw not in get_middleware_setting():
from warnings import warn
warn(mw + " missing from settings.MIDDLEWARE - per site"
"permissions not applied")
return user.is_staff and user.is_active
return getattr(user, "has_site_permission", False) | python | def has_site_permission(user):
"""
Checks if a staff user has staff-level access for the current site.
The actual permission lookup occurs in ``SitePermissionMiddleware``
which then marks the request with the ``has_site_permission`` flag,
so that we only query the db once per request, so this function
serves as the entry point for everything else to check access. We
also fall back to an ``is_staff`` check if the middleware is not
installed, to ease migration.
"""
mw = "yacms.core.middleware.SitePermissionMiddleware"
if mw not in get_middleware_setting():
from warnings import warn
warn(mw + " missing from settings.MIDDLEWARE - per site"
"permissions not applied")
return user.is_staff and user.is_active
return getattr(user, "has_site_permission", False) | [
"def",
"has_site_permission",
"(",
"user",
")",
":",
"mw",
"=",
"\"yacms.core.middleware.SitePermissionMiddleware\"",
"if",
"mw",
"not",
"in",
"get_middleware_setting",
"(",
")",
":",
"from",
"warnings",
"import",
"warn",
"warn",
"(",
"mw",
"+",
"\" missing from settings.MIDDLEWARE - per site\"",
"\"permissions not applied\"",
")",
"return",
"user",
".",
"is_staff",
"and",
"user",
".",
"is_active",
"return",
"getattr",
"(",
"user",
",",
"\"has_site_permission\"",
",",
"False",
")"
] | Checks if a staff user has staff-level access for the current site.
The actual permission lookup occurs in ``SitePermissionMiddleware``
which then marks the request with the ``has_site_permission`` flag,
so that we only query the db once per request, so this function
serves as the entry point for everything else to check access. We
also fall back to an ``is_staff`` check if the middleware is not
installed, to ease migration. | [
"Checks",
"if",
"a",
"staff",
"user",
"has",
"staff",
"-",
"level",
"access",
"for",
"the",
"current",
"site",
".",
"The",
"actual",
"permission",
"lookup",
"occurs",
"in",
"SitePermissionMiddleware",
"which",
"then",
"marks",
"the",
"request",
"with",
"the",
"has_site_permission",
"flag",
"so",
"that",
"we",
"only",
"query",
"the",
"db",
"once",
"per",
"request",
"so",
"this",
"function",
"serves",
"as",
"the",
"entry",
"point",
"for",
"everything",
"else",
"to",
"check",
"access",
".",
"We",
"also",
"fall",
"back",
"to",
"an",
"is_staff",
"check",
"if",
"the",
"middleware",
"is",
"not",
"installed",
"to",
"ease",
"migration",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/utils/sites.py#L80-L96 |
250,765 | minhhoit/yacms | yacms/utils/sites.py | host_theme_path | def host_theme_path():
"""
Returns the directory of the theme associated with the given host.
"""
# Set domain to None, which we'll then query for in the first
# iteration of HOST_THEMES. We use the current site_id rather
# than a request object here, as it may differ for admin users.
domain = None
for (host, theme) in settings.HOST_THEMES:
if domain is None:
domain = Site.objects.get(id=current_site_id()).domain
if host.lower() == domain.lower():
try:
__import__(theme)
module = sys.modules[theme]
except ImportError:
pass
else:
return os.path.dirname(os.path.abspath(module.__file__))
return "" | python | def host_theme_path():
"""
Returns the directory of the theme associated with the given host.
"""
# Set domain to None, which we'll then query for in the first
# iteration of HOST_THEMES. We use the current site_id rather
# than a request object here, as it may differ for admin users.
domain = None
for (host, theme) in settings.HOST_THEMES:
if domain is None:
domain = Site.objects.get(id=current_site_id()).domain
if host.lower() == domain.lower():
try:
__import__(theme)
module = sys.modules[theme]
except ImportError:
pass
else:
return os.path.dirname(os.path.abspath(module.__file__))
return "" | [
"def",
"host_theme_path",
"(",
")",
":",
"# Set domain to None, which we'll then query for in the first",
"# iteration of HOST_THEMES. We use the current site_id rather",
"# than a request object here, as it may differ for admin users.",
"domain",
"=",
"None",
"for",
"(",
"host",
",",
"theme",
")",
"in",
"settings",
".",
"HOST_THEMES",
":",
"if",
"domain",
"is",
"None",
":",
"domain",
"=",
"Site",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"current_site_id",
"(",
")",
")",
".",
"domain",
"if",
"host",
".",
"lower",
"(",
")",
"==",
"domain",
".",
"lower",
"(",
")",
":",
"try",
":",
"__import__",
"(",
"theme",
")",
"module",
"=",
"sys",
".",
"modules",
"[",
"theme",
"]",
"except",
"ImportError",
":",
"pass",
"else",
":",
"return",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"module",
".",
"__file__",
")",
")",
"return",
"\"\""
] | Returns the directory of the theme associated with the given host. | [
"Returns",
"the",
"directory",
"of",
"the",
"theme",
"associated",
"with",
"the",
"given",
"host",
"."
] | 2921b706b7107c6e8c5f2bbf790ff11f85a2167f | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/utils/sites.py#L99-L120 |
250,766 | monkeython/scriba | scriba/schemes/data.py | read | def read(url, **args):
"""Loads an object from a data URI."""
info, data = url.path.split(',')
info = data_re.search(info).groupdict()
mediatype = info.setdefault('mediatype', 'text/plain;charset=US-ASCII')
if ';' in mediatype:
mimetype, params = mediatype.split(';', 1)
params = [p.split('=') for p in params.split(';')]
params = dict((k.strip(), v.strip()) for k, v in params)
else:
mimetype, params = mediatype, dict()
data = base64.b64decode(data) if info['base64'] else urllib.unquote(data)
return content_types.get(mimetype).parse(data, **params) | python | def read(url, **args):
"""Loads an object from a data URI."""
info, data = url.path.split(',')
info = data_re.search(info).groupdict()
mediatype = info.setdefault('mediatype', 'text/plain;charset=US-ASCII')
if ';' in mediatype:
mimetype, params = mediatype.split(';', 1)
params = [p.split('=') for p in params.split(';')]
params = dict((k.strip(), v.strip()) for k, v in params)
else:
mimetype, params = mediatype, dict()
data = base64.b64decode(data) if info['base64'] else urllib.unquote(data)
return content_types.get(mimetype).parse(data, **params) | [
"def",
"read",
"(",
"url",
",",
"*",
"*",
"args",
")",
":",
"info",
",",
"data",
"=",
"url",
".",
"path",
".",
"split",
"(",
"','",
")",
"info",
"=",
"data_re",
".",
"search",
"(",
"info",
")",
".",
"groupdict",
"(",
")",
"mediatype",
"=",
"info",
".",
"setdefault",
"(",
"'mediatype'",
",",
"'text/plain;charset=US-ASCII'",
")",
"if",
"';'",
"in",
"mediatype",
":",
"mimetype",
",",
"params",
"=",
"mediatype",
".",
"split",
"(",
"';'",
",",
"1",
")",
"params",
"=",
"[",
"p",
".",
"split",
"(",
"'='",
")",
"for",
"p",
"in",
"params",
".",
"split",
"(",
"';'",
")",
"]",
"params",
"=",
"dict",
"(",
"(",
"k",
".",
"strip",
"(",
")",
",",
"v",
".",
"strip",
"(",
")",
")",
"for",
"k",
",",
"v",
"in",
"params",
")",
"else",
":",
"mimetype",
",",
"params",
"=",
"mediatype",
",",
"dict",
"(",
")",
"data",
"=",
"base64",
".",
"b64decode",
"(",
"data",
")",
"if",
"info",
"[",
"'base64'",
"]",
"else",
"urllib",
".",
"unquote",
"(",
"data",
")",
"return",
"content_types",
".",
"get",
"(",
"mimetype",
")",
".",
"parse",
"(",
"data",
",",
"*",
"*",
"params",
")"
] | Loads an object from a data URI. | [
"Loads",
"an",
"object",
"from",
"a",
"data",
"URI",
"."
] | fb8e7636ed07c3d035433fdd153599ac8b24dfc4 | https://github.com/monkeython/scriba/blob/fb8e7636ed07c3d035433fdd153599ac8b24dfc4/scriba/schemes/data.py#L23-L35 |
250,767 | monkeython/scriba | scriba/schemes/data.py | write | def write(url, object_, **args):
"""Writes an object to a data URI."""
default_content_type = ('text/plain', {'charset': 'US-ASCII'})
content_encoding = args.get('content_encoding', 'base64')
content_type, params = args.get('content_type', default_content_type)
data = content_types.get(content_type).format(object_, **params)
args['data'].write('data:{}'.format(content_type))
for param, value in params.items():
args['data'].write(';{}={}'.format(param, value))
if content_encoding == 'base64':
args['data'].write(';base64,{}'.format(base64.b64decode(data)))
else:
args['data'].write(',{}', urllib.quote(data))
args['data'].seek(0) | python | def write(url, object_, **args):
"""Writes an object to a data URI."""
default_content_type = ('text/plain', {'charset': 'US-ASCII'})
content_encoding = args.get('content_encoding', 'base64')
content_type, params = args.get('content_type', default_content_type)
data = content_types.get(content_type).format(object_, **params)
args['data'].write('data:{}'.format(content_type))
for param, value in params.items():
args['data'].write(';{}={}'.format(param, value))
if content_encoding == 'base64':
args['data'].write(';base64,{}'.format(base64.b64decode(data)))
else:
args['data'].write(',{}', urllib.quote(data))
args['data'].seek(0) | [
"def",
"write",
"(",
"url",
",",
"object_",
",",
"*",
"*",
"args",
")",
":",
"default_content_type",
"=",
"(",
"'text/plain'",
",",
"{",
"'charset'",
":",
"'US-ASCII'",
"}",
")",
"content_encoding",
"=",
"args",
".",
"get",
"(",
"'content_encoding'",
",",
"'base64'",
")",
"content_type",
",",
"params",
"=",
"args",
".",
"get",
"(",
"'content_type'",
",",
"default_content_type",
")",
"data",
"=",
"content_types",
".",
"get",
"(",
"content_type",
")",
".",
"format",
"(",
"object_",
",",
"*",
"*",
"params",
")",
"args",
"[",
"'data'",
"]",
".",
"write",
"(",
"'data:{}'",
".",
"format",
"(",
"content_type",
")",
")",
"for",
"param",
",",
"value",
"in",
"params",
".",
"items",
"(",
")",
":",
"args",
"[",
"'data'",
"]",
".",
"write",
"(",
"';{}={}'",
".",
"format",
"(",
"param",
",",
"value",
")",
")",
"if",
"content_encoding",
"==",
"'base64'",
":",
"args",
"[",
"'data'",
"]",
".",
"write",
"(",
"';base64,{}'",
".",
"format",
"(",
"base64",
".",
"b64decode",
"(",
"data",
")",
")",
")",
"else",
":",
"args",
"[",
"'data'",
"]",
".",
"write",
"(",
"',{}'",
",",
"urllib",
".",
"quote",
"(",
"data",
")",
")",
"args",
"[",
"'data'",
"]",
".",
"seek",
"(",
"0",
")"
] | Writes an object to a data URI. | [
"Writes",
"an",
"object",
"to",
"a",
"data",
"URI",
"."
] | fb8e7636ed07c3d035433fdd153599ac8b24dfc4 | https://github.com/monkeython/scriba/blob/fb8e7636ed07c3d035433fdd153599ac8b24dfc4/scriba/schemes/data.py#L38-L51 |
250,768 | kodexlab/reliure | reliure/utils/__init__.py | deprecated | def deprecated(new_fct_name, logger=None):
""" Decorator to notify that a fct is deprecated
"""
if logger is None:
logger = logging.getLogger("kodex")
nfct_name = new_fct_name
def aux_deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
msg = "DeprecationWarning: use '%s' instead of '%s'." % (new_fct_name, func.__name__)
logger.warning(msg)
warnings.warn(msg, category=DeprecationWarning)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
return aux_deprecated | python | def deprecated(new_fct_name, logger=None):
""" Decorator to notify that a fct is deprecated
"""
if logger is None:
logger = logging.getLogger("kodex")
nfct_name = new_fct_name
def aux_deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
msg = "DeprecationWarning: use '%s' instead of '%s'." % (new_fct_name, func.__name__)
logger.warning(msg)
warnings.warn(msg, category=DeprecationWarning)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
return aux_deprecated | [
"def",
"deprecated",
"(",
"new_fct_name",
",",
"logger",
"=",
"None",
")",
":",
"if",
"logger",
"is",
"None",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"\"kodex\"",
")",
"nfct_name",
"=",
"new_fct_name",
"def",
"aux_deprecated",
"(",
"func",
")",
":",
"\"\"\"This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emmitted\n when the function is used.\"\"\"",
"def",
"newFunc",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"msg",
"=",
"\"DeprecationWarning: use '%s' instead of '%s'.\"",
"%",
"(",
"new_fct_name",
",",
"func",
".",
"__name__",
")",
"logger",
".",
"warning",
"(",
"msg",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"category",
"=",
"DeprecationWarning",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"newFunc",
".",
"__name__",
"=",
"func",
".",
"__name__",
"newFunc",
".",
"__doc__",
"=",
"func",
".",
"__doc__",
"newFunc",
".",
"__dict__",
".",
"update",
"(",
"func",
".",
"__dict__",
")",
"return",
"newFunc",
"return",
"aux_deprecated"
] | Decorator to notify that a fct is deprecated | [
"Decorator",
"to",
"notify",
"that",
"a",
"fct",
"is",
"deprecated"
] | 0450c7a9254c5c003162738458bbe0c49e777ba5 | https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/utils/__init__.py#L52-L71 |
250,769 | gfranxman/utinypass | utinypass/crypto.py | blockgen | def blockgen(bytes, block_size=16):
''' a block generator for pprp '''
for i in range(0, len(bytes), block_size):
block = bytes[i:i + block_size]
block_len = len(block)
if block_len > 0:
yield block
if block_len < block_size:
break | python | def blockgen(bytes, block_size=16):
''' a block generator for pprp '''
for i in range(0, len(bytes), block_size):
block = bytes[i:i + block_size]
block_len = len(block)
if block_len > 0:
yield block
if block_len < block_size:
break | [
"def",
"blockgen",
"(",
"bytes",
",",
"block_size",
"=",
"16",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"bytes",
")",
",",
"block_size",
")",
":",
"block",
"=",
"bytes",
"[",
"i",
":",
"i",
"+",
"block_size",
"]",
"block_len",
"=",
"len",
"(",
"block",
")",
"if",
"block_len",
">",
"0",
":",
"yield",
"block",
"if",
"block_len",
"<",
"block_size",
":",
"break"
] | a block generator for pprp | [
"a",
"block",
"generator",
"for",
"pprp"
] | c49cff25ae408dbbb58ec98d1c87894474011cdf | https://github.com/gfranxman/utinypass/blob/c49cff25ae408dbbb58ec98d1c87894474011cdf/utinypass/crypto.py#L78-L86 |
250,770 | kodexlab/reliure | reliure/utils/log.py | get_basic_logger | def get_basic_logger(level=logging.WARN, scope='reliure'):
""" return a basic logger that print on stdout msg from reliure lib
"""
logger = logging.getLogger(scope)
logger.setLevel(level)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(level)
# create formatter and add it to the handlers
formatter = ColorFormatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s')
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
return logger | python | def get_basic_logger(level=logging.WARN, scope='reliure'):
""" return a basic logger that print on stdout msg from reliure lib
"""
logger = logging.getLogger(scope)
logger.setLevel(level)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(level)
# create formatter and add it to the handlers
formatter = ColorFormatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s')
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
return logger | [
"def",
"get_basic_logger",
"(",
"level",
"=",
"logging",
".",
"WARN",
",",
"scope",
"=",
"'reliure'",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"scope",
")",
"logger",
".",
"setLevel",
"(",
"level",
")",
"# create console handler with a higher log level",
"ch",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"ch",
".",
"setLevel",
"(",
"level",
")",
"# create formatter and add it to the handlers",
"formatter",
"=",
"ColorFormatter",
"(",
"'%(asctime)s:%(levelname)s:%(name)s:%(message)s'",
")",
"ch",
".",
"setFormatter",
"(",
"formatter",
")",
"# add the handlers to the logger",
"logger",
".",
"addHandler",
"(",
"ch",
")",
"return",
"logger"
] | return a basic logger that print on stdout msg from reliure lib | [
"return",
"a",
"basic",
"logger",
"that",
"print",
"on",
"stdout",
"msg",
"from",
"reliure",
"lib"
] | 0450c7a9254c5c003162738458bbe0c49e777ba5 | https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/utils/log.py#L20-L33 |
250,771 | af/turrentine | turrentine/admin.py | ChangeableContentForm.save | def save(self, *args, **kwargs):
"""
Save the created_by and last_modified_by fields based on the current admin user.
"""
if not self.instance.id:
self.instance.created_by = self.user
self.instance.last_modified_by = self.user
return super(ChangeableContentForm, self).save(*args, **kwargs) | python | def save(self, *args, **kwargs):
"""
Save the created_by and last_modified_by fields based on the current admin user.
"""
if not self.instance.id:
self.instance.created_by = self.user
self.instance.last_modified_by = self.user
return super(ChangeableContentForm, self).save(*args, **kwargs) | [
"def",
"save",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"instance",
".",
"id",
":",
"self",
".",
"instance",
".",
"created_by",
"=",
"self",
".",
"user",
"self",
".",
"instance",
".",
"last_modified_by",
"=",
"self",
".",
"user",
"return",
"super",
"(",
"ChangeableContentForm",
",",
"self",
")",
".",
"save",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Save the created_by and last_modified_by fields based on the current admin user. | [
"Save",
"the",
"created_by",
"and",
"last_modified_by",
"fields",
"based",
"on",
"the",
"current",
"admin",
"user",
"."
] | bbbd5139744ccc6264595cc8960784e5c308c009 | https://github.com/af/turrentine/blob/bbbd5139744ccc6264595cc8960784e5c308c009/turrentine/admin.py#L19-L26 |
250,772 | af/turrentine | turrentine/admin.py | PageAdmin.get_urls | def get_urls(self):
"""
Add our preview view to our urls.
"""
urls = super(PageAdmin, self).get_urls()
my_urls = patterns('',
(r'^add/preview$', self.admin_site.admin_view(PagePreviewView.as_view())),
(r'^(?P<id>\d+)/preview$', self.admin_site.admin_view(PagePreviewView.as_view())),
(r'^(?P<id>\d+)/history/(\d+)/preview$', self.admin_site.admin_view(PagePreviewView.as_view())),
)
return my_urls + urls | python | def get_urls(self):
"""
Add our preview view to our urls.
"""
urls = super(PageAdmin, self).get_urls()
my_urls = patterns('',
(r'^add/preview$', self.admin_site.admin_view(PagePreviewView.as_view())),
(r'^(?P<id>\d+)/preview$', self.admin_site.admin_view(PagePreviewView.as_view())),
(r'^(?P<id>\d+)/history/(\d+)/preview$', self.admin_site.admin_view(PagePreviewView.as_view())),
)
return my_urls + urls | [
"def",
"get_urls",
"(",
"self",
")",
":",
"urls",
"=",
"super",
"(",
"PageAdmin",
",",
"self",
")",
".",
"get_urls",
"(",
")",
"my_urls",
"=",
"patterns",
"(",
"''",
",",
"(",
"r'^add/preview$'",
",",
"self",
".",
"admin_site",
".",
"admin_view",
"(",
"PagePreviewView",
".",
"as_view",
"(",
")",
")",
")",
",",
"(",
"r'^(?P<id>\\d+)/preview$'",
",",
"self",
".",
"admin_site",
".",
"admin_view",
"(",
"PagePreviewView",
".",
"as_view",
"(",
")",
")",
")",
",",
"(",
"r'^(?P<id>\\d+)/history/(\\d+)/preview$'",
",",
"self",
".",
"admin_site",
".",
"admin_view",
"(",
"PagePreviewView",
".",
"as_view",
"(",
")",
")",
")",
",",
")",
"return",
"my_urls",
"+",
"urls"
] | Add our preview view to our urls. | [
"Add",
"our",
"preview",
"view",
"to",
"our",
"urls",
"."
] | bbbd5139744ccc6264595cc8960784e5c308c009 | https://github.com/af/turrentine/blob/bbbd5139744ccc6264595cc8960784e5c308c009/turrentine/admin.py#L101-L111 |
250,773 | af/turrentine | turrentine/admin.py | PagePreviewView.get_template_names | def get_template_names(self):
"""
Return the page's specified template name, or a fallback if one hasn't been chosen.
"""
posted_name = self.request.POST.get('template_name')
if posted_name:
return [posted_name,]
else:
return super(PagePreviewView, self).get_template_names() | python | def get_template_names(self):
"""
Return the page's specified template name, or a fallback if one hasn't been chosen.
"""
posted_name = self.request.POST.get('template_name')
if posted_name:
return [posted_name,]
else:
return super(PagePreviewView, self).get_template_names() | [
"def",
"get_template_names",
"(",
"self",
")",
":",
"posted_name",
"=",
"self",
".",
"request",
".",
"POST",
".",
"get",
"(",
"'template_name'",
")",
"if",
"posted_name",
":",
"return",
"[",
"posted_name",
",",
"]",
"else",
":",
"return",
"super",
"(",
"PagePreviewView",
",",
"self",
")",
".",
"get_template_names",
"(",
")"
] | Return the page's specified template name, or a fallback if one hasn't been chosen. | [
"Return",
"the",
"page",
"s",
"specified",
"template",
"name",
"or",
"a",
"fallback",
"if",
"one",
"hasn",
"t",
"been",
"chosen",
"."
] | bbbd5139744ccc6264595cc8960784e5c308c009 | https://github.com/af/turrentine/blob/bbbd5139744ccc6264595cc8960784e5c308c009/turrentine/admin.py#L135-L143 |
250,774 | af/turrentine | turrentine/admin.py | PagePreviewView.post | def post(self, request, *args, **kwargs):
"""
Accepts POST requests, and substitute the data in for the page's attributes.
"""
self.object = self.get_object()
self.object.content = request.POST['content']
self.object.title = request.POST['title']
self.object = self._mark_html_fields_as_safe(self.object)
context = self.get_context_data(object=self.object)
return self.render_to_response(context, content_type=self.get_mimetype()) | python | def post(self, request, *args, **kwargs):
"""
Accepts POST requests, and substitute the data in for the page's attributes.
"""
self.object = self.get_object()
self.object.content = request.POST['content']
self.object.title = request.POST['title']
self.object = self._mark_html_fields_as_safe(self.object)
context = self.get_context_data(object=self.object)
return self.render_to_response(context, content_type=self.get_mimetype()) | [
"def",
"post",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"object",
"=",
"self",
".",
"get_object",
"(",
")",
"self",
".",
"object",
".",
"content",
"=",
"request",
".",
"POST",
"[",
"'content'",
"]",
"self",
".",
"object",
".",
"title",
"=",
"request",
".",
"POST",
"[",
"'title'",
"]",
"self",
".",
"object",
"=",
"self",
".",
"_mark_html_fields_as_safe",
"(",
"self",
".",
"object",
")",
"context",
"=",
"self",
".",
"get_context_data",
"(",
"object",
"=",
"self",
".",
"object",
")",
"return",
"self",
".",
"render_to_response",
"(",
"context",
",",
"content_type",
"=",
"self",
".",
"get_mimetype",
"(",
")",
")"
] | Accepts POST requests, and substitute the data in for the page's attributes. | [
"Accepts",
"POST",
"requests",
"and",
"substitute",
"the",
"data",
"in",
"for",
"the",
"page",
"s",
"attributes",
"."
] | bbbd5139744ccc6264595cc8960784e5c308c009 | https://github.com/af/turrentine/blob/bbbd5139744ccc6264595cc8960784e5c308c009/turrentine/admin.py#L145-L155 |
250,775 | hitchtest/hitchserve | hitchserve/service_bundle.py | ServiceBundle.redirect_stdout | def redirect_stdout(self):
"""Redirect stdout to file so that it can be tailed and aggregated with the other logs."""
self.hijacked_stdout = sys.stdout
self.hijacked_stderr = sys.stderr
# 0 must be set as the buffer, otherwise lines won't get logged in time.
sys.stdout = open(self.hitch_dir.driverout(), "ab", 0)
sys.stderr = open(self.hitch_dir.drivererr(), "ab", 0) | python | def redirect_stdout(self):
"""Redirect stdout to file so that it can be tailed and aggregated with the other logs."""
self.hijacked_stdout = sys.stdout
self.hijacked_stderr = sys.stderr
# 0 must be set as the buffer, otherwise lines won't get logged in time.
sys.stdout = open(self.hitch_dir.driverout(), "ab", 0)
sys.stderr = open(self.hitch_dir.drivererr(), "ab", 0) | [
"def",
"redirect_stdout",
"(",
"self",
")",
":",
"self",
".",
"hijacked_stdout",
"=",
"sys",
".",
"stdout",
"self",
".",
"hijacked_stderr",
"=",
"sys",
".",
"stderr",
"# 0 must be set as the buffer, otherwise lines won't get logged in time.",
"sys",
".",
"stdout",
"=",
"open",
"(",
"self",
".",
"hitch_dir",
".",
"driverout",
"(",
")",
",",
"\"ab\"",
",",
"0",
")",
"sys",
".",
"stderr",
"=",
"open",
"(",
"self",
".",
"hitch_dir",
".",
"drivererr",
"(",
")",
",",
"\"ab\"",
",",
"0",
")"
] | Redirect stdout to file so that it can be tailed and aggregated with the other logs. | [
"Redirect",
"stdout",
"to",
"file",
"so",
"that",
"it",
"can",
"be",
"tailed",
"and",
"aggregated",
"with",
"the",
"other",
"logs",
"."
] | a2def19979264186d283e76f7f0c88f3ed97f2e0 | https://github.com/hitchtest/hitchserve/blob/a2def19979264186d283e76f7f0c88f3ed97f2e0/hitchserve/service_bundle.py#L202-L208 |
250,776 | hitchtest/hitchserve | hitchserve/service_bundle.py | ServiceBundle.unredirect_stdout | def unredirect_stdout(self):
"""Redirect stdout and stderr back to screen."""
if hasattr(self, 'hijacked_stdout') and hasattr(self, 'hijacked_stderr'):
sys.stdout = self.hijacked_stdout
sys.stderr = self.hijacked_stderr | python | def unredirect_stdout(self):
"""Redirect stdout and stderr back to screen."""
if hasattr(self, 'hijacked_stdout') and hasattr(self, 'hijacked_stderr'):
sys.stdout = self.hijacked_stdout
sys.stderr = self.hijacked_stderr | [
"def",
"unredirect_stdout",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'hijacked_stdout'",
")",
"and",
"hasattr",
"(",
"self",
",",
"'hijacked_stderr'",
")",
":",
"sys",
".",
"stdout",
"=",
"self",
".",
"hijacked_stdout",
"sys",
".",
"stderr",
"=",
"self",
".",
"hijacked_stderr"
] | Redirect stdout and stderr back to screen. | [
"Redirect",
"stdout",
"and",
"stderr",
"back",
"to",
"screen",
"."
] | a2def19979264186d283e76f7f0c88f3ed97f2e0 | https://github.com/hitchtest/hitchserve/blob/a2def19979264186d283e76f7f0c88f3ed97f2e0/hitchserve/service_bundle.py#L210-L214 |
250,777 | hitchtest/hitchserve | hitchserve/service_bundle.py | ServiceBundle.time_travel | def time_travel(self, datetime=None, timedelta=None, seconds=0, minutes=0, hours=0, days=0):
"""Mock moving forward or backward in time by shifting the system clock fed to the services tested.
Note that all of these arguments can be used together, individually or not at all. The time
traveled to will be the sum of all specified time deltas from datetime. If no datetime is specified,
the deltas will be added to the current time.
Args:
datetime (Optional[datetime]): Time travel to specific datetime.
timedelta (Optional[timedelta]): Time travel to 'timedelta' from now.
seconds (Optional[number]): Time travel 'seconds' seconds from now.
minutes (Optional[number]): Time travel 'minutes' minutes from now.
hours (Optional[number]): Time travel 'hours' hours from now.
days (Optional[number]): Time travel 'days' days from now.
"""
if datetime is not None:
self.timedelta = datetime - python_datetime.now()
if timedelta is not None:
self.timedelta = self.timedelta + timedelta
self.timedelta = self.timedelta + python_timedelta(seconds=seconds)
self.timedelta = self.timedelta + python_timedelta(minutes=minutes)
self.timedelta = self.timedelta + python_timedelta(hours=hours)
self.timedelta = self.timedelta + python_timedelta(days=days)
log("Time traveling to {}\n".format(humanize.naturaltime(self.now())))
faketime.change_time(self.hitch_dir.faketime(), self.now()) | python | def time_travel(self, datetime=None, timedelta=None, seconds=0, minutes=0, hours=0, days=0):
"""Mock moving forward or backward in time by shifting the system clock fed to the services tested.
Note that all of these arguments can be used together, individually or not at all. The time
traveled to will be the sum of all specified time deltas from datetime. If no datetime is specified,
the deltas will be added to the current time.
Args:
datetime (Optional[datetime]): Time travel to specific datetime.
timedelta (Optional[timedelta]): Time travel to 'timedelta' from now.
seconds (Optional[number]): Time travel 'seconds' seconds from now.
minutes (Optional[number]): Time travel 'minutes' minutes from now.
hours (Optional[number]): Time travel 'hours' hours from now.
days (Optional[number]): Time travel 'days' days from now.
"""
if datetime is not None:
self.timedelta = datetime - python_datetime.now()
if timedelta is not None:
self.timedelta = self.timedelta + timedelta
self.timedelta = self.timedelta + python_timedelta(seconds=seconds)
self.timedelta = self.timedelta + python_timedelta(minutes=minutes)
self.timedelta = self.timedelta + python_timedelta(hours=hours)
self.timedelta = self.timedelta + python_timedelta(days=days)
log("Time traveling to {}\n".format(humanize.naturaltime(self.now())))
faketime.change_time(self.hitch_dir.faketime(), self.now()) | [
"def",
"time_travel",
"(",
"self",
",",
"datetime",
"=",
"None",
",",
"timedelta",
"=",
"None",
",",
"seconds",
"=",
"0",
",",
"minutes",
"=",
"0",
",",
"hours",
"=",
"0",
",",
"days",
"=",
"0",
")",
":",
"if",
"datetime",
"is",
"not",
"None",
":",
"self",
".",
"timedelta",
"=",
"datetime",
"-",
"python_datetime",
".",
"now",
"(",
")",
"if",
"timedelta",
"is",
"not",
"None",
":",
"self",
".",
"timedelta",
"=",
"self",
".",
"timedelta",
"+",
"timedelta",
"self",
".",
"timedelta",
"=",
"self",
".",
"timedelta",
"+",
"python_timedelta",
"(",
"seconds",
"=",
"seconds",
")",
"self",
".",
"timedelta",
"=",
"self",
".",
"timedelta",
"+",
"python_timedelta",
"(",
"minutes",
"=",
"minutes",
")",
"self",
".",
"timedelta",
"=",
"self",
".",
"timedelta",
"+",
"python_timedelta",
"(",
"hours",
"=",
"hours",
")",
"self",
".",
"timedelta",
"=",
"self",
".",
"timedelta",
"+",
"python_timedelta",
"(",
"days",
"=",
"days",
")",
"log",
"(",
"\"Time traveling to {}\\n\"",
".",
"format",
"(",
"humanize",
".",
"naturaltime",
"(",
"self",
".",
"now",
"(",
")",
")",
")",
")",
"faketime",
".",
"change_time",
"(",
"self",
".",
"hitch_dir",
".",
"faketime",
"(",
")",
",",
"self",
".",
"now",
"(",
")",
")"
] | Mock moving forward or backward in time by shifting the system clock fed to the services tested.
Note that all of these arguments can be used together, individually or not at all. The time
traveled to will be the sum of all specified time deltas from datetime. If no datetime is specified,
the deltas will be added to the current time.
Args:
datetime (Optional[datetime]): Time travel to specific datetime.
timedelta (Optional[timedelta]): Time travel to 'timedelta' from now.
seconds (Optional[number]): Time travel 'seconds' seconds from now.
minutes (Optional[number]): Time travel 'minutes' minutes from now.
hours (Optional[number]): Time travel 'hours' hours from now.
days (Optional[number]): Time travel 'days' days from now. | [
"Mock",
"moving",
"forward",
"or",
"backward",
"in",
"time",
"by",
"shifting",
"the",
"system",
"clock",
"fed",
"to",
"the",
"services",
"tested",
"."
] | a2def19979264186d283e76f7f0c88f3ed97f2e0 | https://github.com/hitchtest/hitchserve/blob/a2def19979264186d283e76f7f0c88f3ed97f2e0/hitchserve/service_bundle.py#L261-L285 |
250,778 | hitchtest/hitchserve | hitchserve/service_bundle.py | ServiceBundle.wait_for_ipykernel | def wait_for_ipykernel(self, service_name, timeout=10):
"""Wait for an IPython kernel-nnnn.json filename message to appear in log."""
kernel_line = self._services[service_name].logs.tail.until(
lambda line: "--existing" in line[1], timeout=10, lines_back=5
)
return kernel_line.replace("--existing", "").strip() | python | def wait_for_ipykernel(self, service_name, timeout=10):
"""Wait for an IPython kernel-nnnn.json filename message to appear in log."""
kernel_line = self._services[service_name].logs.tail.until(
lambda line: "--existing" in line[1], timeout=10, lines_back=5
)
return kernel_line.replace("--existing", "").strip() | [
"def",
"wait_for_ipykernel",
"(",
"self",
",",
"service_name",
",",
"timeout",
"=",
"10",
")",
":",
"kernel_line",
"=",
"self",
".",
"_services",
"[",
"service_name",
"]",
".",
"logs",
".",
"tail",
".",
"until",
"(",
"lambda",
"line",
":",
"\"--existing\"",
"in",
"line",
"[",
"1",
"]",
",",
"timeout",
"=",
"10",
",",
"lines_back",
"=",
"5",
")",
"return",
"kernel_line",
".",
"replace",
"(",
"\"--existing\"",
",",
"\"\"",
")",
".",
"strip",
"(",
")"
] | Wait for an IPython kernel-nnnn.json filename message to appear in log. | [
"Wait",
"for",
"an",
"IPython",
"kernel",
"-",
"nnnn",
".",
"json",
"filename",
"message",
"to",
"appear",
"in",
"log",
"."
] | a2def19979264186d283e76f7f0c88f3ed97f2e0 | https://github.com/hitchtest/hitchserve/blob/a2def19979264186d283e76f7f0c88f3ed97f2e0/hitchserve/service_bundle.py#L294-L299 |
250,779 | hitchtest/hitchserve | hitchserve/service_bundle.py | ServiceBundle.connect_to_ipykernel | def connect_to_ipykernel(self, service_name, timeout=10):
"""Connect to an IPython kernel as soon as its message is logged."""
kernel_json_file = self.wait_for_ipykernel(service_name, timeout=10)
self.start_interactive_mode()
subprocess.check_call([
sys.executable, "-m", "IPython", "console", "--existing", kernel_json_file
])
self.stop_interactive_mode() | python | def connect_to_ipykernel(self, service_name, timeout=10):
"""Connect to an IPython kernel as soon as its message is logged."""
kernel_json_file = self.wait_for_ipykernel(service_name, timeout=10)
self.start_interactive_mode()
subprocess.check_call([
sys.executable, "-m", "IPython", "console", "--existing", kernel_json_file
])
self.stop_interactive_mode() | [
"def",
"connect_to_ipykernel",
"(",
"self",
",",
"service_name",
",",
"timeout",
"=",
"10",
")",
":",
"kernel_json_file",
"=",
"self",
".",
"wait_for_ipykernel",
"(",
"service_name",
",",
"timeout",
"=",
"10",
")",
"self",
".",
"start_interactive_mode",
"(",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"sys",
".",
"executable",
",",
"\"-m\"",
",",
"\"IPython\"",
",",
"\"console\"",
",",
"\"--existing\"",
",",
"kernel_json_file",
"]",
")",
"self",
".",
"stop_interactive_mode",
"(",
")"
] | Connect to an IPython kernel as soon as its message is logged. | [
"Connect",
"to",
"an",
"IPython",
"kernel",
"as",
"soon",
"as",
"its",
"message",
"is",
"logged",
"."
] | a2def19979264186d283e76f7f0c88f3ed97f2e0 | https://github.com/hitchtest/hitchserve/blob/a2def19979264186d283e76f7f0c88f3ed97f2e0/hitchserve/service_bundle.py#L301-L308 |
250,780 | EventTeam/beliefs | src/beliefs/referent.py | TaxonomyCell.build_class_graph | def build_class_graph(modules, klass=None, graph=None):
""" Builds up a graph of the DictCell subclass structure """
if klass is None:
class_graph = nx.DiGraph()
for name, classmember in inspect.getmembers(modules, inspect.isclass):
if issubclass(classmember, Referent) and classmember is not Referent:
TaxonomyCell.build_class_graph(modules, classmember, class_graph)
return class_graph
else:
parents = getattr(klass, '__bases__')
for parent in parents:
if parent != Referent:
graph.add_edge(parent.__name__, klass.__name__)
# store pointer to classes in property 'class'
graph.node[parent.__name__]['class'] = parent
graph.node[klass.__name__]['class'] = klass
if issubclass(parent, Referent):
TaxonomyCell.build_class_graph(modules, parent, graph) | python | def build_class_graph(modules, klass=None, graph=None):
""" Builds up a graph of the DictCell subclass structure """
if klass is None:
class_graph = nx.DiGraph()
for name, classmember in inspect.getmembers(modules, inspect.isclass):
if issubclass(classmember, Referent) and classmember is not Referent:
TaxonomyCell.build_class_graph(modules, classmember, class_graph)
return class_graph
else:
parents = getattr(klass, '__bases__')
for parent in parents:
if parent != Referent:
graph.add_edge(parent.__name__, klass.__name__)
# store pointer to classes in property 'class'
graph.node[parent.__name__]['class'] = parent
graph.node[klass.__name__]['class'] = klass
if issubclass(parent, Referent):
TaxonomyCell.build_class_graph(modules, parent, graph) | [
"def",
"build_class_graph",
"(",
"modules",
",",
"klass",
"=",
"None",
",",
"graph",
"=",
"None",
")",
":",
"if",
"klass",
"is",
"None",
":",
"class_graph",
"=",
"nx",
".",
"DiGraph",
"(",
")",
"for",
"name",
",",
"classmember",
"in",
"inspect",
".",
"getmembers",
"(",
"modules",
",",
"inspect",
".",
"isclass",
")",
":",
"if",
"issubclass",
"(",
"classmember",
",",
"Referent",
")",
"and",
"classmember",
"is",
"not",
"Referent",
":",
"TaxonomyCell",
".",
"build_class_graph",
"(",
"modules",
",",
"classmember",
",",
"class_graph",
")",
"return",
"class_graph",
"else",
":",
"parents",
"=",
"getattr",
"(",
"klass",
",",
"'__bases__'",
")",
"for",
"parent",
"in",
"parents",
":",
"if",
"parent",
"!=",
"Referent",
":",
"graph",
".",
"add_edge",
"(",
"parent",
".",
"__name__",
",",
"klass",
".",
"__name__",
")",
"# store pointer to classes in property 'class'",
"graph",
".",
"node",
"[",
"parent",
".",
"__name__",
"]",
"[",
"'class'",
"]",
"=",
"parent",
"graph",
".",
"node",
"[",
"klass",
".",
"__name__",
"]",
"[",
"'class'",
"]",
"=",
"klass",
"if",
"issubclass",
"(",
"parent",
",",
"Referent",
")",
":",
"TaxonomyCell",
".",
"build_class_graph",
"(",
"modules",
",",
"parent",
",",
"graph",
")"
] | Builds up a graph of the DictCell subclass structure | [
"Builds",
"up",
"a",
"graph",
"of",
"the",
"DictCell",
"subclass",
"structure"
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/referent.py#L45-L62 |
250,781 | EventTeam/beliefs | src/beliefs/referent.py | Referent.cells_from_defaults | def cells_from_defaults(clz, jsonobj):
""" Creates a referent instance of type `json.kind` and
initializes it to default values.
"""
# convert strings to dicts
if isinstance(jsonobj, (str, unicode)):
jsonobj = json.loads(jsonobj)
assert 'cells' in jsonobj, "No cells in object"
domain = TaxonomyCell.get_domain()
cells = []
for num, cell_dna in enumerate(jsonobj['cells']):
assert 'kind' in cell_dna, "No type definition"
classgenerator = domain.node[cell_dna['kind']]['class']
cell = classgenerator()
cell['num'].merge(num)
for attr, val in cell_dna.items():
if not attr in ['kind']:
cell[attr].merge(val)
cells.append(cell)
return cells | python | def cells_from_defaults(clz, jsonobj):
""" Creates a referent instance of type `json.kind` and
initializes it to default values.
"""
# convert strings to dicts
if isinstance(jsonobj, (str, unicode)):
jsonobj = json.loads(jsonobj)
assert 'cells' in jsonobj, "No cells in object"
domain = TaxonomyCell.get_domain()
cells = []
for num, cell_dna in enumerate(jsonobj['cells']):
assert 'kind' in cell_dna, "No type definition"
classgenerator = domain.node[cell_dna['kind']]['class']
cell = classgenerator()
cell['num'].merge(num)
for attr, val in cell_dna.items():
if not attr in ['kind']:
cell[attr].merge(val)
cells.append(cell)
return cells | [
"def",
"cells_from_defaults",
"(",
"clz",
",",
"jsonobj",
")",
":",
"# convert strings to dicts",
"if",
"isinstance",
"(",
"jsonobj",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"jsonobj",
"=",
"json",
".",
"loads",
"(",
"jsonobj",
")",
"assert",
"'cells'",
"in",
"jsonobj",
",",
"\"No cells in object\"",
"domain",
"=",
"TaxonomyCell",
".",
"get_domain",
"(",
")",
"cells",
"=",
"[",
"]",
"for",
"num",
",",
"cell_dna",
"in",
"enumerate",
"(",
"jsonobj",
"[",
"'cells'",
"]",
")",
":",
"assert",
"'kind'",
"in",
"cell_dna",
",",
"\"No type definition\"",
"classgenerator",
"=",
"domain",
".",
"node",
"[",
"cell_dna",
"[",
"'kind'",
"]",
"]",
"[",
"'class'",
"]",
"cell",
"=",
"classgenerator",
"(",
")",
"cell",
"[",
"'num'",
"]",
".",
"merge",
"(",
"num",
")",
"for",
"attr",
",",
"val",
"in",
"cell_dna",
".",
"items",
"(",
")",
":",
"if",
"not",
"attr",
"in",
"[",
"'kind'",
"]",
":",
"cell",
"[",
"attr",
"]",
".",
"merge",
"(",
"val",
")",
"cells",
".",
"append",
"(",
"cell",
")",
"return",
"cells"
] | Creates a referent instance of type `json.kind` and
initializes it to default values. | [
"Creates",
"a",
"referent",
"instance",
"of",
"type",
"json",
".",
"kind",
"and",
"initializes",
"it",
"to",
"default",
"values",
"."
] | c07d22b61bebeede74a72800030dde770bf64208 | https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/referent.py#L75-L96 |
250,782 | Ffisegydd/whatis | whatis/_iterable.py | get_element_types | def get_element_types(obj, **kwargs):
"""Get element types as a set."""
max_iterable_length = kwargs.get('max_iterable_length', 10000)
consume_generator = kwargs.get('consume_generator', False)
if not isiterable(obj):
return None
if isgenerator(obj) and not consume_generator:
return None
t = get_types(obj, **kwargs)
if not t['too_big']:
if t['types']:
return "Element types: {}".format(', '.join([extract_type(t) for t in t['types']]))
else:
return None
else:
return "Element types: {}".format(', '.join([extract_type(t) for t in t['types']])) + " (based on first {} elements.)".format(max_iterable_length) | python | def get_element_types(obj, **kwargs):
"""Get element types as a set."""
max_iterable_length = kwargs.get('max_iterable_length', 10000)
consume_generator = kwargs.get('consume_generator', False)
if not isiterable(obj):
return None
if isgenerator(obj) and not consume_generator:
return None
t = get_types(obj, **kwargs)
if not t['too_big']:
if t['types']:
return "Element types: {}".format(', '.join([extract_type(t) for t in t['types']]))
else:
return None
else:
return "Element types: {}".format(', '.join([extract_type(t) for t in t['types']])) + " (based on first {} elements.)".format(max_iterable_length) | [
"def",
"get_element_types",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"max_iterable_length",
"=",
"kwargs",
".",
"get",
"(",
"'max_iterable_length'",
",",
"10000",
")",
"consume_generator",
"=",
"kwargs",
".",
"get",
"(",
"'consume_generator'",
",",
"False",
")",
"if",
"not",
"isiterable",
"(",
"obj",
")",
":",
"return",
"None",
"if",
"isgenerator",
"(",
"obj",
")",
"and",
"not",
"consume_generator",
":",
"return",
"None",
"t",
"=",
"get_types",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"t",
"[",
"'too_big'",
"]",
":",
"if",
"t",
"[",
"'types'",
"]",
":",
"return",
"\"Element types: {}\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"[",
"extract_type",
"(",
"t",
")",
"for",
"t",
"in",
"t",
"[",
"'types'",
"]",
"]",
")",
")",
"else",
":",
"return",
"None",
"else",
":",
"return",
"\"Element types: {}\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"[",
"extract_type",
"(",
"t",
")",
"for",
"t",
"in",
"t",
"[",
"'types'",
"]",
"]",
")",
")",
"+",
"\" (based on first {} elements.)\"",
".",
"format",
"(",
"max_iterable_length",
")"
] | Get element types as a set. | [
"Get",
"element",
"types",
"as",
"a",
"set",
"."
] | eef780ced61aae6d001aeeef7574e5e27e613583 | https://github.com/Ffisegydd/whatis/blob/eef780ced61aae6d001aeeef7574e5e27e613583/whatis/_iterable.py#L37-L57 |
250,783 | xtrementl/focus | focus/plugin/modules/stats.py | Stats._setup_dir | def _setup_dir(self, base_dir):
""" Creates stats directory for storing stat files.
`base_dir`
Base directory.
"""
stats_dir = self._sdir(base_dir)
if not os.path.isdir(stats_dir):
try:
os.mkdir(stats_dir)
except OSError:
raise errors.DirectorySetupFail() | python | def _setup_dir(self, base_dir):
""" Creates stats directory for storing stat files.
`base_dir`
Base directory.
"""
stats_dir = self._sdir(base_dir)
if not os.path.isdir(stats_dir):
try:
os.mkdir(stats_dir)
except OSError:
raise errors.DirectorySetupFail() | [
"def",
"_setup_dir",
"(",
"self",
",",
"base_dir",
")",
":",
"stats_dir",
"=",
"self",
".",
"_sdir",
"(",
"base_dir",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"stats_dir",
")",
":",
"try",
":",
"os",
".",
"mkdir",
"(",
"stats_dir",
")",
"except",
"OSError",
":",
"raise",
"errors",
".",
"DirectorySetupFail",
"(",
")"
] | Creates stats directory for storing stat files.
`base_dir`
Base directory. | [
"Creates",
"stats",
"directory",
"for",
"storing",
"stat",
"files",
"."
] | cbbbc0b49a7409f9e0dc899de5b7e057f50838e4 | https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/stats.py#L40-L52 |
250,784 | xtrementl/focus | focus/plugin/modules/stats.py | Stats._log_task | def _log_task(self, task):
""" Logs task record to file.
`task`
``Task`` instance.
"""
if not task.duration:
return
self._setup_dir(task.base_dir)
stats_dir = self._sdir(task.base_dir)
duration = task.duration
while duration > 0:
# build filename
date = (datetime.datetime.now() -
datetime.timedelta(minutes=duration))
date_str = date.strftime('%Y%m%d')
filename = os.path.join(stats_dir, '{0}.json'.format(date_str))
with open(filename, 'a+') as file_:
# fetch any existing data
try:
file_.seek(0)
data = json.loads(file_.read())
except (ValueError, OSError):
data = {}
if not task.name in data:
data[task.name] = 0
# how much total time for day
try:
total_time = sum(int(x) for x in data.values())
if total_time > MINS_IN_DAY:
total_time = MINS_IN_DAY
except ValueError:
total_time = 0
# constrain to single day
amount = duration
if amount + total_time > MINS_IN_DAY:
amount = MINS_IN_DAY - total_time
# invalid or broken state, bail
if amount <= 0:
break
data[task.name] += amount
duration -= amount
# write file
try:
file_.seek(0)
file_.truncate(0)
file_.write(json.dumps(data))
except (ValueError, OSError):
pass | python | def _log_task(self, task):
""" Logs task record to file.
`task`
``Task`` instance.
"""
if not task.duration:
return
self._setup_dir(task.base_dir)
stats_dir = self._sdir(task.base_dir)
duration = task.duration
while duration > 0:
# build filename
date = (datetime.datetime.now() -
datetime.timedelta(minutes=duration))
date_str = date.strftime('%Y%m%d')
filename = os.path.join(stats_dir, '{0}.json'.format(date_str))
with open(filename, 'a+') as file_:
# fetch any existing data
try:
file_.seek(0)
data = json.loads(file_.read())
except (ValueError, OSError):
data = {}
if not task.name in data:
data[task.name] = 0
# how much total time for day
try:
total_time = sum(int(x) for x in data.values())
if total_time > MINS_IN_DAY:
total_time = MINS_IN_DAY
except ValueError:
total_time = 0
# constrain to single day
amount = duration
if amount + total_time > MINS_IN_DAY:
amount = MINS_IN_DAY - total_time
# invalid or broken state, bail
if amount <= 0:
break
data[task.name] += amount
duration -= amount
# write file
try:
file_.seek(0)
file_.truncate(0)
file_.write(json.dumps(data))
except (ValueError, OSError):
pass | [
"def",
"_log_task",
"(",
"self",
",",
"task",
")",
":",
"if",
"not",
"task",
".",
"duration",
":",
"return",
"self",
".",
"_setup_dir",
"(",
"task",
".",
"base_dir",
")",
"stats_dir",
"=",
"self",
".",
"_sdir",
"(",
"task",
".",
"base_dir",
")",
"duration",
"=",
"task",
".",
"duration",
"while",
"duration",
">",
"0",
":",
"# build filename",
"date",
"=",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"-",
"datetime",
".",
"timedelta",
"(",
"minutes",
"=",
"duration",
")",
")",
"date_str",
"=",
"date",
".",
"strftime",
"(",
"'%Y%m%d'",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"stats_dir",
",",
"'{0}.json'",
".",
"format",
"(",
"date_str",
")",
")",
"with",
"open",
"(",
"filename",
",",
"'a+'",
")",
"as",
"file_",
":",
"# fetch any existing data",
"try",
":",
"file_",
".",
"seek",
"(",
"0",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"file_",
".",
"read",
"(",
")",
")",
"except",
"(",
"ValueError",
",",
"OSError",
")",
":",
"data",
"=",
"{",
"}",
"if",
"not",
"task",
".",
"name",
"in",
"data",
":",
"data",
"[",
"task",
".",
"name",
"]",
"=",
"0",
"# how much total time for day",
"try",
":",
"total_time",
"=",
"sum",
"(",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"data",
".",
"values",
"(",
")",
")",
"if",
"total_time",
">",
"MINS_IN_DAY",
":",
"total_time",
"=",
"MINS_IN_DAY",
"except",
"ValueError",
":",
"total_time",
"=",
"0",
"# constrain to single day",
"amount",
"=",
"duration",
"if",
"amount",
"+",
"total_time",
">",
"MINS_IN_DAY",
":",
"amount",
"=",
"MINS_IN_DAY",
"-",
"total_time",
"# invalid or broken state, bail",
"if",
"amount",
"<=",
"0",
":",
"break",
"data",
"[",
"task",
".",
"name",
"]",
"+=",
"amount",
"duration",
"-=",
"amount",
"# write file",
"try",
":",
"file_",
".",
"seek",
"(",
"0",
")",
"file_",
".",
"truncate",
"(",
"0",
")",
"file_",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"data",
")",
")",
"except",
"(",
"ValueError",
",",
"OSError",
")",
":",
"pass"
] | Logs task record to file.
`task`
``Task`` instance. | [
"Logs",
"task",
"record",
"to",
"file",
"."
] | cbbbc0b49a7409f9e0dc899de5b7e057f50838e4 | https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/stats.py#L54-L112 |
250,785 | xtrementl/focus | focus/plugin/modules/stats.py | Stats._fuzzy_time_parse | def _fuzzy_time_parse(self, value):
""" Parses a fuzzy time value into a meaningful interpretation.
`value`
String value to parse.
"""
value = value.lower().strip()
today = datetime.date.today()
if value in ('today', 't'):
return today
else:
kwargs = {}
if value in ('y', 'yesterday'):
kwargs['days'] = -1
elif value in ('w', 'wk', 'week', 'last week'):
kwargs['days'] = -7
else:
# match days
match = re.match(r'(\d+)\s*(d|day|days)\s*(ago)?$', value)
if match:
kwargs['days'] = -int(match.groups(1)[0])
else:
# match weeks
match = re.match(r'(\d+)\s*(w|wk|week|weeks)\s*(ago)?$',
value)
if match:
kwargs['weeks'] = -int(match.groups(1)[0])
if kwargs:
return today + datetime.timedelta(**kwargs)
return None | python | def _fuzzy_time_parse(self, value):
""" Parses a fuzzy time value into a meaningful interpretation.
`value`
String value to parse.
"""
value = value.lower().strip()
today = datetime.date.today()
if value in ('today', 't'):
return today
else:
kwargs = {}
if value in ('y', 'yesterday'):
kwargs['days'] = -1
elif value in ('w', 'wk', 'week', 'last week'):
kwargs['days'] = -7
else:
# match days
match = re.match(r'(\d+)\s*(d|day|days)\s*(ago)?$', value)
if match:
kwargs['days'] = -int(match.groups(1)[0])
else:
# match weeks
match = re.match(r'(\d+)\s*(w|wk|week|weeks)\s*(ago)?$',
value)
if match:
kwargs['weeks'] = -int(match.groups(1)[0])
if kwargs:
return today + datetime.timedelta(**kwargs)
return None | [
"def",
"_fuzzy_time_parse",
"(",
"self",
",",
"value",
")",
":",
"value",
"=",
"value",
".",
"lower",
"(",
")",
".",
"strip",
"(",
")",
"today",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"if",
"value",
"in",
"(",
"'today'",
",",
"'t'",
")",
":",
"return",
"today",
"else",
":",
"kwargs",
"=",
"{",
"}",
"if",
"value",
"in",
"(",
"'y'",
",",
"'yesterday'",
")",
":",
"kwargs",
"[",
"'days'",
"]",
"=",
"-",
"1",
"elif",
"value",
"in",
"(",
"'w'",
",",
"'wk'",
",",
"'week'",
",",
"'last week'",
")",
":",
"kwargs",
"[",
"'days'",
"]",
"=",
"-",
"7",
"else",
":",
"# match days",
"match",
"=",
"re",
".",
"match",
"(",
"r'(\\d+)\\s*(d|day|days)\\s*(ago)?$'",
",",
"value",
")",
"if",
"match",
":",
"kwargs",
"[",
"'days'",
"]",
"=",
"-",
"int",
"(",
"match",
".",
"groups",
"(",
"1",
")",
"[",
"0",
"]",
")",
"else",
":",
"# match weeks",
"match",
"=",
"re",
".",
"match",
"(",
"r'(\\d+)\\s*(w|wk|week|weeks)\\s*(ago)?$'",
",",
"value",
")",
"if",
"match",
":",
"kwargs",
"[",
"'weeks'",
"]",
"=",
"-",
"int",
"(",
"match",
".",
"groups",
"(",
"1",
")",
"[",
"0",
"]",
")",
"if",
"kwargs",
":",
"return",
"today",
"+",
"datetime",
".",
"timedelta",
"(",
"*",
"*",
"kwargs",
")",
"return",
"None"
] | Parses a fuzzy time value into a meaningful interpretation.
`value`
String value to parse. | [
"Parses",
"a",
"fuzzy",
"time",
"value",
"into",
"a",
"meaningful",
"interpretation",
"."
] | cbbbc0b49a7409f9e0dc899de5b7e057f50838e4 | https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/stats.py#L114-L152 |
250,786 | xtrementl/focus | focus/plugin/modules/stats.py | Stats._get_stats | def _get_stats(self, task, start_date):
""" Fetches statistic information for given task and start range.
"""
stats = []
stats_dir = self._sdir(task.base_dir)
date = start_date
end_date = datetime.date.today()
delta = datetime.timedelta(days=1)
while date <= end_date:
date_str = date.strftime('%Y%m%d')
filename = os.path.join(stats_dir, '{0}.json'.format(date_str))
if os.path.exists(filename):
try:
# fetch stats content
with open(filename, 'r') as file_:
data = json.loads(file_.read())
# sort descending by time
stats.append((date, sorted(data.iteritems(),
key=lambda x: x[1],
reverse=True)))
except (json.JSONDecodeError, OSError):
pass
date += delta # next day
return stats | python | def _get_stats(self, task, start_date):
""" Fetches statistic information for given task and start range.
"""
stats = []
stats_dir = self._sdir(task.base_dir)
date = start_date
end_date = datetime.date.today()
delta = datetime.timedelta(days=1)
while date <= end_date:
date_str = date.strftime('%Y%m%d')
filename = os.path.join(stats_dir, '{0}.json'.format(date_str))
if os.path.exists(filename):
try:
# fetch stats content
with open(filename, 'r') as file_:
data = json.loads(file_.read())
# sort descending by time
stats.append((date, sorted(data.iteritems(),
key=lambda x: x[1],
reverse=True)))
except (json.JSONDecodeError, OSError):
pass
date += delta # next day
return stats | [
"def",
"_get_stats",
"(",
"self",
",",
"task",
",",
"start_date",
")",
":",
"stats",
"=",
"[",
"]",
"stats_dir",
"=",
"self",
".",
"_sdir",
"(",
"task",
".",
"base_dir",
")",
"date",
"=",
"start_date",
"end_date",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"delta",
"=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"while",
"date",
"<=",
"end_date",
":",
"date_str",
"=",
"date",
".",
"strftime",
"(",
"'%Y%m%d'",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"stats_dir",
",",
"'{0}.json'",
".",
"format",
"(",
"date_str",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"try",
":",
"# fetch stats content",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"file_",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"file_",
".",
"read",
"(",
")",
")",
"# sort descending by time",
"stats",
".",
"append",
"(",
"(",
"date",
",",
"sorted",
"(",
"data",
".",
"iteritems",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
")",
")",
"except",
"(",
"json",
".",
"JSONDecodeError",
",",
"OSError",
")",
":",
"pass",
"date",
"+=",
"delta",
"# next day",
"return",
"stats"
] | Fetches statistic information for given task and start range. | [
"Fetches",
"statistic",
"information",
"for",
"given",
"task",
"and",
"start",
"range",
"."
] | cbbbc0b49a7409f9e0dc899de5b7e057f50838e4 | https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/stats.py#L154-L184 |
250,787 | xtrementl/focus | focus/plugin/modules/stats.py | Stats._print_stats | def _print_stats(self, env, stats):
""" Prints statistic information using io stream.
`env`
``Environment`` object.
`stats`
Tuple of task stats for each date.
"""
def _format_time(mins):
""" Generates formatted time string.
"""
mins = int(mins)
if mins < MINS_IN_HOUR:
time_str = '0:{0:02}'.format(mins)
else:
hours = mins // MINS_IN_HOUR
mins %= MINS_IN_HOUR
if mins > 0:
time_str = '{0}:{1:02}'.format(hours, mins)
else:
time_str = '{0}'.format(hours)
return time_str
if not stats:
env.io.write('No stats found.')
return
for date, tasks in stats:
env.io.write('')
total_mins = float(sum(v[1] for v in tasks))
env.io.write('[ {0} ]'.format(date.strftime('%Y-%m-%d')))
env.io.write('')
for name, mins in tasks:
# format time
time_str = _format_time(mins)
# generate stat line
line = ' {0:>5}'.format(time_str)
line += ' ({0:2.0f}%) - '.format(mins * 100.0 / total_mins)
if len(name) > 55:
name = name[:55] + '...'
line += name
env.io.write(line)
# generate total line
env.io.write('_' * len(line))
time_str = _format_time(total_mins)
env.io.write(' {0:>5} (total)'.format(time_str))
env.io.write('') | python | def _print_stats(self, env, stats):
""" Prints statistic information using io stream.
`env`
``Environment`` object.
`stats`
Tuple of task stats for each date.
"""
def _format_time(mins):
""" Generates formatted time string.
"""
mins = int(mins)
if mins < MINS_IN_HOUR:
time_str = '0:{0:02}'.format(mins)
else:
hours = mins // MINS_IN_HOUR
mins %= MINS_IN_HOUR
if mins > 0:
time_str = '{0}:{1:02}'.format(hours, mins)
else:
time_str = '{0}'.format(hours)
return time_str
if not stats:
env.io.write('No stats found.')
return
for date, tasks in stats:
env.io.write('')
total_mins = float(sum(v[1] for v in tasks))
env.io.write('[ {0} ]'.format(date.strftime('%Y-%m-%d')))
env.io.write('')
for name, mins in tasks:
# format time
time_str = _format_time(mins)
# generate stat line
line = ' {0:>5}'.format(time_str)
line += ' ({0:2.0f}%) - '.format(mins * 100.0 / total_mins)
if len(name) > 55:
name = name[:55] + '...'
line += name
env.io.write(line)
# generate total line
env.io.write('_' * len(line))
time_str = _format_time(total_mins)
env.io.write(' {0:>5} (total)'.format(time_str))
env.io.write('') | [
"def",
"_print_stats",
"(",
"self",
",",
"env",
",",
"stats",
")",
":",
"def",
"_format_time",
"(",
"mins",
")",
":",
"\"\"\" Generates formatted time string.\n \"\"\"",
"mins",
"=",
"int",
"(",
"mins",
")",
"if",
"mins",
"<",
"MINS_IN_HOUR",
":",
"time_str",
"=",
"'0:{0:02}'",
".",
"format",
"(",
"mins",
")",
"else",
":",
"hours",
"=",
"mins",
"//",
"MINS_IN_HOUR",
"mins",
"%=",
"MINS_IN_HOUR",
"if",
"mins",
">",
"0",
":",
"time_str",
"=",
"'{0}:{1:02}'",
".",
"format",
"(",
"hours",
",",
"mins",
")",
"else",
":",
"time_str",
"=",
"'{0}'",
".",
"format",
"(",
"hours",
")",
"return",
"time_str",
"if",
"not",
"stats",
":",
"env",
".",
"io",
".",
"write",
"(",
"'No stats found.'",
")",
"return",
"for",
"date",
",",
"tasks",
"in",
"stats",
":",
"env",
".",
"io",
".",
"write",
"(",
"''",
")",
"total_mins",
"=",
"float",
"(",
"sum",
"(",
"v",
"[",
"1",
"]",
"for",
"v",
"in",
"tasks",
")",
")",
"env",
".",
"io",
".",
"write",
"(",
"'[ {0} ]'",
".",
"format",
"(",
"date",
".",
"strftime",
"(",
"'%Y-%m-%d'",
")",
")",
")",
"env",
".",
"io",
".",
"write",
"(",
"''",
")",
"for",
"name",
",",
"mins",
"in",
"tasks",
":",
"# format time",
"time_str",
"=",
"_format_time",
"(",
"mins",
")",
"# generate stat line",
"line",
"=",
"' {0:>5}'",
".",
"format",
"(",
"time_str",
")",
"line",
"+=",
"' ({0:2.0f}%) - '",
".",
"format",
"(",
"mins",
"*",
"100.0",
"/",
"total_mins",
")",
"if",
"len",
"(",
"name",
")",
">",
"55",
":",
"name",
"=",
"name",
"[",
":",
"55",
"]",
"+",
"'...'",
"line",
"+=",
"name",
"env",
".",
"io",
".",
"write",
"(",
"line",
")",
"# generate total line",
"env",
".",
"io",
".",
"write",
"(",
"'_'",
"*",
"len",
"(",
"line",
")",
")",
"time_str",
"=",
"_format_time",
"(",
"total_mins",
")",
"env",
".",
"io",
".",
"write",
"(",
"' {0:>5} (total)'",
".",
"format",
"(",
"time_str",
")",
")",
"env",
".",
"io",
".",
"write",
"(",
"''",
")"
] | Prints statistic information using io stream.
`env`
``Environment`` object.
`stats`
Tuple of task stats for each date. | [
"Prints",
"statistic",
"information",
"using",
"io",
"stream",
"."
] | cbbbc0b49a7409f9e0dc899de5b7e057f50838e4 | https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/stats.py#L186-L239 |
250,788 | rich-pixley/rain | rain/__init__.py | WorkSpace.create | def create(self):
"""called to create the work space"""
self.logger.log(logging.DEBUG, 'os.mkdir %s', self.name)
os.mkdir(self.name) | python | def create(self):
"""called to create the work space"""
self.logger.log(logging.DEBUG, 'os.mkdir %s', self.name)
os.mkdir(self.name) | [
"def",
"create",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"log",
"(",
"logging",
".",
"DEBUG",
",",
"'os.mkdir %s'",
",",
"self",
".",
"name",
")",
"os",
".",
"mkdir",
"(",
"self",
".",
"name",
")"
] | called to create the work space | [
"called",
"to",
"create",
"the",
"work",
"space"
] | ed95aafc73002fbf0466be9a5eaa1e6ed3990a6d | https://github.com/rich-pixley/rain/blob/ed95aafc73002fbf0466be9a5eaa1e6ed3990a6d/rain/__init__.py#L40-L43 |
250,789 | ryanjdillon/pyotelem | pyotelem/physio_seal.py | bodycomp | def bodycomp(mass, tbw, method='reilly', simulate=False, n_rand=1000):
'''Create dataframe with derived body composition values
Args
----
mass: ndarray
Mass of the seal (kg)
tbw: ndarray
Total body water (kg)
method: str
name of method used to derive composition values
simulate: bool
switch for generating values with random noise
n_rand: int
number of density values to simulate
Returns
-------
field: pandas.Dataframe
dataframe containing columns for each body composition value
References
----------
Reilly, J.J., Fedak, M.A., 1990. Measurement of the body composition of
living gray seals by hydrogen isotope dilution. Journal of Applied
Physiology 69, 885β891.
Gales, R., Renouf, D., Noseworthy, E., 1994. Body composition of harp
seals. Canadian journal of zoology 72, 545β551.
'''
import numpy
import pandas
if len(mass) != len(tbw):
raise SystemError('`mass` and `tbw` arrays must be the same length')
bc = pandas.DataFrame(index=range(len(mass)))
rnorm = lambda n, mu, sigma: numpy.random.normal(mu, sigma, n)
if method == 'reilly':
if simulate is True:
bc['ptbw'] = 100 * (tbw / mass)
bc['ptbf'] = 105.1 - (1.47 * bc['ptbw']) + rnorm(n_rand, 0, 1.1)
bc['ptbp'] = (0.42 * bc['ptbw']) - 4.75 + rnorm(n_rand, 0, 0.8)
bc['tbf'] = mass * (bc['ptbf'] / 100)
bc['tbp'] = mass * (bc['ptbp'] / 100)
bc['tba'] = 0.1 - (0.008 * mass) + \
(0.05 * tbw) + rnorm(0, 0.3, n_rand)
bc['tbge'] = (40.8 * mass) - (48.5 * tbw) - \
0.4 + rnorm(0, 17.2, n_rand)
else:
bc['ptbw'] = 100 * (tbw / mass)
bc['ptbf'] = 105.1 - (1.47 * bc['ptbw'])
bc['ptbp'] = (0.42 * bc['ptbw']) - 4.75
bc['tbf'] = mass * (bc['ptbf'] / 100)
bc['tbp'] = mass * (bc['ptbp'] / 100)
bc['tba'] = 0.1 - (0.008 * mass) + (0.05 * tbw)
bc['tbge'] = (40.8 * mass) - (48.5 * tbw) - 0.4
elif method == 'gales':
if simulate is True:
raise ValueError('Random error simulation is currently only '
'implemented for `method` `reilly`. `simulate` must be passed '
'as `False` when using `method` `gales`.')
else:
bc['ptbw'] = 100 * (tbw / mass)
bc['tbf'] = mass - (1.37 * tbw)
bc['tbp'] = 0.27 * (mass - bc['tbf'])
bc['tbge'] = (40.8 * mass) - (48.5 * tbw) - 0.4
bc['ptbf'] = 100 * (bc['tbf'] / mass)
bc['ptbp'] = 100 * (bc['tbp'] / mass)
else:
raise ValueError('`method` must be either `reilly` or `gales`, not '
'`{}`'.format(method))
return bc | python | def bodycomp(mass, tbw, method='reilly', simulate=False, n_rand=1000):
'''Create dataframe with derived body composition values
Args
----
mass: ndarray
Mass of the seal (kg)
tbw: ndarray
Total body water (kg)
method: str
name of method used to derive composition values
simulate: bool
switch for generating values with random noise
n_rand: int
number of density values to simulate
Returns
-------
field: pandas.Dataframe
dataframe containing columns for each body composition value
References
----------
Reilly, J.J., Fedak, M.A., 1990. Measurement of the body composition of
living gray seals by hydrogen isotope dilution. Journal of Applied
Physiology 69, 885β891.
Gales, R., Renouf, D., Noseworthy, E., 1994. Body composition of harp
seals. Canadian journal of zoology 72, 545β551.
'''
import numpy
import pandas
if len(mass) != len(tbw):
raise SystemError('`mass` and `tbw` arrays must be the same length')
bc = pandas.DataFrame(index=range(len(mass)))
rnorm = lambda n, mu, sigma: numpy.random.normal(mu, sigma, n)
if method == 'reilly':
if simulate is True:
bc['ptbw'] = 100 * (tbw / mass)
bc['ptbf'] = 105.1 - (1.47 * bc['ptbw']) + rnorm(n_rand, 0, 1.1)
bc['ptbp'] = (0.42 * bc['ptbw']) - 4.75 + rnorm(n_rand, 0, 0.8)
bc['tbf'] = mass * (bc['ptbf'] / 100)
bc['tbp'] = mass * (bc['ptbp'] / 100)
bc['tba'] = 0.1 - (0.008 * mass) + \
(0.05 * tbw) + rnorm(0, 0.3, n_rand)
bc['tbge'] = (40.8 * mass) - (48.5 * tbw) - \
0.4 + rnorm(0, 17.2, n_rand)
else:
bc['ptbw'] = 100 * (tbw / mass)
bc['ptbf'] = 105.1 - (1.47 * bc['ptbw'])
bc['ptbp'] = (0.42 * bc['ptbw']) - 4.75
bc['tbf'] = mass * (bc['ptbf'] / 100)
bc['tbp'] = mass * (bc['ptbp'] / 100)
bc['tba'] = 0.1 - (0.008 * mass) + (0.05 * tbw)
bc['tbge'] = (40.8 * mass) - (48.5 * tbw) - 0.4
elif method == 'gales':
if simulate is True:
raise ValueError('Random error simulation is currently only '
'implemented for `method` `reilly`. `simulate` must be passed '
'as `False` when using `method` `gales`.')
else:
bc['ptbw'] = 100 * (tbw / mass)
bc['tbf'] = mass - (1.37 * tbw)
bc['tbp'] = 0.27 * (mass - bc['tbf'])
bc['tbge'] = (40.8 * mass) - (48.5 * tbw) - 0.4
bc['ptbf'] = 100 * (bc['tbf'] / mass)
bc['ptbp'] = 100 * (bc['tbp'] / mass)
else:
raise ValueError('`method` must be either `reilly` or `gales`, not '
'`{}`'.format(method))
return bc | [
"def",
"bodycomp",
"(",
"mass",
",",
"tbw",
",",
"method",
"=",
"'reilly'",
",",
"simulate",
"=",
"False",
",",
"n_rand",
"=",
"1000",
")",
":",
"import",
"numpy",
"import",
"pandas",
"if",
"len",
"(",
"mass",
")",
"!=",
"len",
"(",
"tbw",
")",
":",
"raise",
"SystemError",
"(",
"'`mass` and `tbw` arrays must be the same length'",
")",
"bc",
"=",
"pandas",
".",
"DataFrame",
"(",
"index",
"=",
"range",
"(",
"len",
"(",
"mass",
")",
")",
")",
"rnorm",
"=",
"lambda",
"n",
",",
"mu",
",",
"sigma",
":",
"numpy",
".",
"random",
".",
"normal",
"(",
"mu",
",",
"sigma",
",",
"n",
")",
"if",
"method",
"==",
"'reilly'",
":",
"if",
"simulate",
"is",
"True",
":",
"bc",
"[",
"'ptbw'",
"]",
"=",
"100",
"*",
"(",
"tbw",
"/",
"mass",
")",
"bc",
"[",
"'ptbf'",
"]",
"=",
"105.1",
"-",
"(",
"1.47",
"*",
"bc",
"[",
"'ptbw'",
"]",
")",
"+",
"rnorm",
"(",
"n_rand",
",",
"0",
",",
"1.1",
")",
"bc",
"[",
"'ptbp'",
"]",
"=",
"(",
"0.42",
"*",
"bc",
"[",
"'ptbw'",
"]",
")",
"-",
"4.75",
"+",
"rnorm",
"(",
"n_rand",
",",
"0",
",",
"0.8",
")",
"bc",
"[",
"'tbf'",
"]",
"=",
"mass",
"*",
"(",
"bc",
"[",
"'ptbf'",
"]",
"/",
"100",
")",
"bc",
"[",
"'tbp'",
"]",
"=",
"mass",
"*",
"(",
"bc",
"[",
"'ptbp'",
"]",
"/",
"100",
")",
"bc",
"[",
"'tba'",
"]",
"=",
"0.1",
"-",
"(",
"0.008",
"*",
"mass",
")",
"+",
"(",
"0.05",
"*",
"tbw",
")",
"+",
"rnorm",
"(",
"0",
",",
"0.3",
",",
"n_rand",
")",
"bc",
"[",
"'tbge'",
"]",
"=",
"(",
"40.8",
"*",
"mass",
")",
"-",
"(",
"48.5",
"*",
"tbw",
")",
"-",
"0.4",
"+",
"rnorm",
"(",
"0",
",",
"17.2",
",",
"n_rand",
")",
"else",
":",
"bc",
"[",
"'ptbw'",
"]",
"=",
"100",
"*",
"(",
"tbw",
"/",
"mass",
")",
"bc",
"[",
"'ptbf'",
"]",
"=",
"105.1",
"-",
"(",
"1.47",
"*",
"bc",
"[",
"'ptbw'",
"]",
")",
"bc",
"[",
"'ptbp'",
"]",
"=",
"(",
"0.42",
"*",
"bc",
"[",
"'ptbw'",
"]",
")",
"-",
"4.75",
"bc",
"[",
"'tbf'",
"]",
"=",
"mass",
"*",
"(",
"bc",
"[",
"'ptbf'",
"]",
"/",
"100",
")",
"bc",
"[",
"'tbp'",
"]",
"=",
"mass",
"*",
"(",
"bc",
"[",
"'ptbp'",
"]",
"/",
"100",
")",
"bc",
"[",
"'tba'",
"]",
"=",
"0.1",
"-",
"(",
"0.008",
"*",
"mass",
")",
"+",
"(",
"0.05",
"*",
"tbw",
")",
"bc",
"[",
"'tbge'",
"]",
"=",
"(",
"40.8",
"*",
"mass",
")",
"-",
"(",
"48.5",
"*",
"tbw",
")",
"-",
"0.4",
"elif",
"method",
"==",
"'gales'",
":",
"if",
"simulate",
"is",
"True",
":",
"raise",
"ValueError",
"(",
"'Random error simulation is currently only '",
"'implemented for `method` `reilly`. `simulate` must be passed '",
"'as `False` when using `method` `gales`.'",
")",
"else",
":",
"bc",
"[",
"'ptbw'",
"]",
"=",
"100",
"*",
"(",
"tbw",
"/",
"mass",
")",
"bc",
"[",
"'tbf'",
"]",
"=",
"mass",
"-",
"(",
"1.37",
"*",
"tbw",
")",
"bc",
"[",
"'tbp'",
"]",
"=",
"0.27",
"*",
"(",
"mass",
"-",
"bc",
"[",
"'tbf'",
"]",
")",
"bc",
"[",
"'tbge'",
"]",
"=",
"(",
"40.8",
"*",
"mass",
")",
"-",
"(",
"48.5",
"*",
"tbw",
")",
"-",
"0.4",
"bc",
"[",
"'ptbf'",
"]",
"=",
"100",
"*",
"(",
"bc",
"[",
"'tbf'",
"]",
"/",
"mass",
")",
"bc",
"[",
"'ptbp'",
"]",
"=",
"100",
"*",
"(",
"bc",
"[",
"'tbp'",
"]",
"/",
"mass",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'`method` must be either `reilly` or `gales`, not '",
"'`{}`'",
".",
"format",
"(",
"method",
")",
")",
"return",
"bc"
] | Create dataframe with derived body composition values
Args
----
mass: ndarray
Mass of the seal (kg)
tbw: ndarray
Total body water (kg)
method: str
name of method used to derive composition values
simulate: bool
switch for generating values with random noise
n_rand: int
number of density values to simulate
Returns
-------
field: pandas.Dataframe
dataframe containing columns for each body composition value
References
----------
Reilly, J.J., Fedak, M.A., 1990. Measurement of the body composition of
living gray seals by hydrogen isotope dilution. Journal of Applied
Physiology 69, 885β891.
Gales, R., Renouf, D., Noseworthy, E., 1994. Body composition of harp
seals. Canadian journal of zoology 72, 545β551. | [
"Create",
"dataframe",
"with",
"derived",
"body",
"composition",
"values"
] | 816563a9c3feb3fa416f1c2921c6b75db34111ad | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/physio_seal.py#L49-L124 |
250,790 | ryanjdillon/pyotelem | pyotelem/physio_seal.py | perc_bc_from_lipid | def perc_bc_from_lipid(perc_lipid, perc_water=None):
'''Calculate body composition component percentages based on % lipid
Calculation of percent protein and percent ash are based on those presented
in Reilly and Fedak (1990).
Args
----
perc_lipid: float or ndarray
1D array of percent lipid values from which to calculate body composition
perc_water: float or ndarray
1D array of percent water values from which to calculate body
composition (Default `None`). If no values are passed, calculations are
performed with values from Biuw et al. (2003).
Returns
-------
perc_water: float or ndarray
1D array of percent water values
perc_protein: float or ndarray
1D array of percent protein values
perc_ash: float or ndarray
1D array of percent ash values
References
----------
Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of
free-ranging seals using simple dive characteristics. Journal of
Experimental Biology 206, 3405β3423. doi:10.1242/jeb.00583
Reilly, J.J., Fedak, M.A., 1990. Measurement of the body composition of
living gray seals by hydrogen isotope dilution. Journal of Applied
Physiology 69, 885β891.
'''
import numpy
# Cast iterables to numpy arrays
if numpy.iterable(perc_lipid):
perc_lipid = numpy.asarray(perc_lipid)
if numpy.iterable(perc_water):
perc_water = numpy.asarray(perc_water)
if not perc_water:
# TODO check where `perc_water` values come from
perc_water = 71.4966 - (0.6802721 * perc_lipid)
perc_protein = (0.42 * perc_water) - 4.75
perc_ash = 100 - (perc_lipid + perc_water + perc_protein)
return perc_water, perc_protein, perc_ash | python | def perc_bc_from_lipid(perc_lipid, perc_water=None):
'''Calculate body composition component percentages based on % lipid
Calculation of percent protein and percent ash are based on those presented
in Reilly and Fedak (1990).
Args
----
perc_lipid: float or ndarray
1D array of percent lipid values from which to calculate body composition
perc_water: float or ndarray
1D array of percent water values from which to calculate body
composition (Default `None`). If no values are passed, calculations are
performed with values from Biuw et al. (2003).
Returns
-------
perc_water: float or ndarray
1D array of percent water values
perc_protein: float or ndarray
1D array of percent protein values
perc_ash: float or ndarray
1D array of percent ash values
References
----------
Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of
free-ranging seals using simple dive characteristics. Journal of
Experimental Biology 206, 3405β3423. doi:10.1242/jeb.00583
Reilly, J.J., Fedak, M.A., 1990. Measurement of the body composition of
living gray seals by hydrogen isotope dilution. Journal of Applied
Physiology 69, 885β891.
'''
import numpy
# Cast iterables to numpy arrays
if numpy.iterable(perc_lipid):
perc_lipid = numpy.asarray(perc_lipid)
if numpy.iterable(perc_water):
perc_water = numpy.asarray(perc_water)
if not perc_water:
# TODO check where `perc_water` values come from
perc_water = 71.4966 - (0.6802721 * perc_lipid)
perc_protein = (0.42 * perc_water) - 4.75
perc_ash = 100 - (perc_lipid + perc_water + perc_protein)
return perc_water, perc_protein, perc_ash | [
"def",
"perc_bc_from_lipid",
"(",
"perc_lipid",
",",
"perc_water",
"=",
"None",
")",
":",
"import",
"numpy",
"# Cast iterables to numpy arrays",
"if",
"numpy",
".",
"iterable",
"(",
"perc_lipid",
")",
":",
"perc_lipid",
"=",
"numpy",
".",
"asarray",
"(",
"perc_lipid",
")",
"if",
"numpy",
".",
"iterable",
"(",
"perc_water",
")",
":",
"perc_water",
"=",
"numpy",
".",
"asarray",
"(",
"perc_water",
")",
"if",
"not",
"perc_water",
":",
"# TODO check where `perc_water` values come from",
"perc_water",
"=",
"71.4966",
"-",
"(",
"0.6802721",
"*",
"perc_lipid",
")",
"perc_protein",
"=",
"(",
"0.42",
"*",
"perc_water",
")",
"-",
"4.75",
"perc_ash",
"=",
"100",
"-",
"(",
"perc_lipid",
"+",
"perc_water",
"+",
"perc_protein",
")",
"return",
"perc_water",
",",
"perc_protein",
",",
"perc_ash"
] | Calculate body composition component percentages based on % lipid
Calculation of percent protein and percent ash are based on those presented
in Reilly and Fedak (1990).
Args
----
perc_lipid: float or ndarray
1D array of percent lipid values from which to calculate body composition
perc_water: float or ndarray
1D array of percent water values from which to calculate body
composition (Default `None`). If no values are passed, calculations are
performed with values from Biuw et al. (2003).
Returns
-------
perc_water: float or ndarray
1D array of percent water values
perc_protein: float or ndarray
1D array of percent protein values
perc_ash: float or ndarray
1D array of percent ash values
References
----------
Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of
free-ranging seals using simple dive characteristics. Journal of
Experimental Biology 206, 3405β3423. doi:10.1242/jeb.00583
Reilly, J.J., Fedak, M.A., 1990. Measurement of the body composition of
living gray seals by hydrogen isotope dilution. Journal of Applied
Physiology 69, 885β891. | [
"Calculate",
"body",
"composition",
"component",
"percentages",
"based",
"on",
"%",
"lipid"
] | 816563a9c3feb3fa416f1c2921c6b75db34111ad | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/physio_seal.py#L127-L176 |
250,791 | ryanjdillon/pyotelem | pyotelem/physio_seal.py | lip2dens | def lip2dens(perc_lipid, dens_lipid=0.9007, dens_prot=1.34, dens_water=0.994,
dens_ash=2.3):
'''Derive tissue density from lipids
The equation calculating animal density is from Biuw et al. (2003), and
default values for component densities are from human studies collected in
the book by Moore et al. (1963).
Args
----
perc_lipid: float or ndarray
Percent lipid of body composition
dens_lipid: float
Density of lipid in animal (Default 0.9007 g/cm^3)
dens_prot: float
Density of protein in animal (Default 1.34 g/cm^3)
dens_water: float
Density of water in animal (Default 0.994 g/cm^3)
dens_ash: float
Density of ash in animal (Default 2.3 g/cm^3)
Returns
-------
dens_gcm3: float or ndarray
Density of seal calculated from percent compositions and densities of
components from Moore et al. (1963)
References
----------
Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of
free-ranging seals using simple dive characteristics. Journal of
Experimental Biology 206, 3405β3423. doi:10.1242/jeb.00583
Moore FD, Oleson KH, McMurrery JD, Parker HV, Ball MR, Boyden CM. The Body
Cell Mass and Its Supporting Environment - The Composition in Health and
Disease. Philadelphia: W.B. Saunders Company; 1963. 535 p.
ISBN:0-7216-6480-6
'''
import numpy
# Cast iterables to numpy array
if numpy.iterable(perc_lipid):
perc_lipid = numpy.asarray(perc_lipid)
perc_water, perc_protein, perc_ash = perc_bc_from_lipid(perc_lipid)
dens_gcm3 = (dens_lipid * (0.01 * perc_lipid)) + \
(dens_prot * (0.01 * perc_protein)) + \
(dens_water * (0.01 * perc_water)) + \
(dens_ash * (0.01 * perc_ash))
return dens_gcm3 | python | def lip2dens(perc_lipid, dens_lipid=0.9007, dens_prot=1.34, dens_water=0.994,
dens_ash=2.3):
'''Derive tissue density from lipids
The equation calculating animal density is from Biuw et al. (2003), and
default values for component densities are from human studies collected in
the book by Moore et al. (1963).
Args
----
perc_lipid: float or ndarray
Percent lipid of body composition
dens_lipid: float
Density of lipid in animal (Default 0.9007 g/cm^3)
dens_prot: float
Density of protein in animal (Default 1.34 g/cm^3)
dens_water: float
Density of water in animal (Default 0.994 g/cm^3)
dens_ash: float
Density of ash in animal (Default 2.3 g/cm^3)
Returns
-------
dens_gcm3: float or ndarray
Density of seal calculated from percent compositions and densities of
components from Moore et al. (1963)
References
----------
Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of
free-ranging seals using simple dive characteristics. Journal of
Experimental Biology 206, 3405β3423. doi:10.1242/jeb.00583
Moore FD, Oleson KH, McMurrery JD, Parker HV, Ball MR, Boyden CM. The Body
Cell Mass and Its Supporting Environment - The Composition in Health and
Disease. Philadelphia: W.B. Saunders Company; 1963. 535 p.
ISBN:0-7216-6480-6
'''
import numpy
# Cast iterables to numpy array
if numpy.iterable(perc_lipid):
perc_lipid = numpy.asarray(perc_lipid)
perc_water, perc_protein, perc_ash = perc_bc_from_lipid(perc_lipid)
dens_gcm3 = (dens_lipid * (0.01 * perc_lipid)) + \
(dens_prot * (0.01 * perc_protein)) + \
(dens_water * (0.01 * perc_water)) + \
(dens_ash * (0.01 * perc_ash))
return dens_gcm3 | [
"def",
"lip2dens",
"(",
"perc_lipid",
",",
"dens_lipid",
"=",
"0.9007",
",",
"dens_prot",
"=",
"1.34",
",",
"dens_water",
"=",
"0.994",
",",
"dens_ash",
"=",
"2.3",
")",
":",
"import",
"numpy",
"# Cast iterables to numpy array",
"if",
"numpy",
".",
"iterable",
"(",
"perc_lipid",
")",
":",
"perc_lipid",
"=",
"numpy",
".",
"asarray",
"(",
"perc_lipid",
")",
"perc_water",
",",
"perc_protein",
",",
"perc_ash",
"=",
"perc_bc_from_lipid",
"(",
"perc_lipid",
")",
"dens_gcm3",
"=",
"(",
"dens_lipid",
"*",
"(",
"0.01",
"*",
"perc_lipid",
")",
")",
"+",
"(",
"dens_prot",
"*",
"(",
"0.01",
"*",
"perc_protein",
")",
")",
"+",
"(",
"dens_water",
"*",
"(",
"0.01",
"*",
"perc_water",
")",
")",
"+",
"(",
"dens_ash",
"*",
"(",
"0.01",
"*",
"perc_ash",
")",
")",
"return",
"dens_gcm3"
] | Derive tissue density from lipids
The equation calculating animal density is from Biuw et al. (2003), and
default values for component densities are from human studies collected in
the book by Moore et al. (1963).
Args
----
perc_lipid: float or ndarray
Percent lipid of body composition
dens_lipid: float
Density of lipid in animal (Default 0.9007 g/cm^3)
dens_prot: float
Density of protein in animal (Default 1.34 g/cm^3)
dens_water: float
Density of water in animal (Default 0.994 g/cm^3)
dens_ash: float
Density of ash in animal (Default 2.3 g/cm^3)
Returns
-------
dens_gcm3: float or ndarray
Density of seal calculated from percent compositions and densities of
components from Moore et al. (1963)
References
----------
Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of
free-ranging seals using simple dive characteristics. Journal of
Experimental Biology 206, 3405β3423. doi:10.1242/jeb.00583
Moore FD, Oleson KH, McMurrery JD, Parker HV, Ball MR, Boyden CM. The Body
Cell Mass and Its Supporting Environment - The Composition in Health and
Disease. Philadelphia: W.B. Saunders Company; 1963. 535 p.
ISBN:0-7216-6480-6 | [
"Derive",
"tissue",
"density",
"from",
"lipids"
] | 816563a9c3feb3fa416f1c2921c6b75db34111ad | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/physio_seal.py#L203-L254 |
250,792 | ryanjdillon/pyotelem | pyotelem/physio_seal.py | dens2lip | def dens2lip(dens_gcm3, dens_lipid=0.9007, dens_prot=1.34, dens_water=0.994,
dens_ash=2.3):
'''Get percent composition of animal from body density
The equation calculating animal density is from Biuw et al. (2003), and
default values for component densities are from human studies collected in
the book by Moore et al. (1963).
Args
----
dens_gcm3: float or ndarray
An array of seal densities (g/cm^3). The calculations only yield valid
percents with densities between 0.888-1.123 with other parameters left
as defaults.
dens_lipid: float
Density of lipid content in the animal (g/cm^3)
dens_prot: float
Density of protein content in the animal (g/cm^3)
dens_water: float
Density of water content in the animal (g/cm^3)
dens_ash: float
Density of ash content in the animal (g/cm^3)
Returns
-------
perc_all: pandas.DataFrame
Dataframe of components of body composition
References
----------
Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of
free-ranging seals using simple dive characteristics. Journal of
Experimental Biology 206, 3405β3423. doi:10.1242/jeb.00583
Moore FD, Oleson KH, McMurrery JD, Parker HV, Ball MR, Boyden CM. The Body
Cell Mass and Its Supporting Environment - The Composition in Health and
Disease. Philadelphia: W.B. Saunders Company; 1963. 535 p.
ISBN:0-7216-6480-6
'''
import numpy
# Cast iterables to numpy array
if numpy.iterable(dens_gcm3):
dens_gcm3 = numpy.asarray(dens_gcm3)
# Numerators
ad_num = -3.2248 * dens_ash
pd_num = -25.2786 * dens_prot
wd_num = -71.4966 * dens_water
# Denominators
ad_den = -0.034 * dens_ash
pd_den = -0.2857 * dens_prot
wd_den = -0.6803 * dens_water
perc_lipid = ((100 * dens_gcm3) + ad_num + pd_num + wd_num) / \
(dens_lipid + ad_den + pd_den + wd_den)
return perc_lipid | python | def dens2lip(dens_gcm3, dens_lipid=0.9007, dens_prot=1.34, dens_water=0.994,
dens_ash=2.3):
'''Get percent composition of animal from body density
The equation calculating animal density is from Biuw et al. (2003), and
default values for component densities are from human studies collected in
the book by Moore et al. (1963).
Args
----
dens_gcm3: float or ndarray
An array of seal densities (g/cm^3). The calculations only yield valid
percents with densities between 0.888-1.123 with other parameters left
as defaults.
dens_lipid: float
Density of lipid content in the animal (g/cm^3)
dens_prot: float
Density of protein content in the animal (g/cm^3)
dens_water: float
Density of water content in the animal (g/cm^3)
dens_ash: float
Density of ash content in the animal (g/cm^3)
Returns
-------
perc_all: pandas.DataFrame
Dataframe of components of body composition
References
----------
Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of
free-ranging seals using simple dive characteristics. Journal of
Experimental Biology 206, 3405β3423. doi:10.1242/jeb.00583
Moore FD, Oleson KH, McMurrery JD, Parker HV, Ball MR, Boyden CM. The Body
Cell Mass and Its Supporting Environment - The Composition in Health and
Disease. Philadelphia: W.B. Saunders Company; 1963. 535 p.
ISBN:0-7216-6480-6
'''
import numpy
# Cast iterables to numpy array
if numpy.iterable(dens_gcm3):
dens_gcm3 = numpy.asarray(dens_gcm3)
# Numerators
ad_num = -3.2248 * dens_ash
pd_num = -25.2786 * dens_prot
wd_num = -71.4966 * dens_water
# Denominators
ad_den = -0.034 * dens_ash
pd_den = -0.2857 * dens_prot
wd_den = -0.6803 * dens_water
perc_lipid = ((100 * dens_gcm3) + ad_num + pd_num + wd_num) / \
(dens_lipid + ad_den + pd_den + wd_den)
return perc_lipid | [
"def",
"dens2lip",
"(",
"dens_gcm3",
",",
"dens_lipid",
"=",
"0.9007",
",",
"dens_prot",
"=",
"1.34",
",",
"dens_water",
"=",
"0.994",
",",
"dens_ash",
"=",
"2.3",
")",
":",
"import",
"numpy",
"# Cast iterables to numpy array",
"if",
"numpy",
".",
"iterable",
"(",
"dens_gcm3",
")",
":",
"dens_gcm3",
"=",
"numpy",
".",
"asarray",
"(",
"dens_gcm3",
")",
"# Numerators",
"ad_num",
"=",
"-",
"3.2248",
"*",
"dens_ash",
"pd_num",
"=",
"-",
"25.2786",
"*",
"dens_prot",
"wd_num",
"=",
"-",
"71.4966",
"*",
"dens_water",
"# Denominators",
"ad_den",
"=",
"-",
"0.034",
"*",
"dens_ash",
"pd_den",
"=",
"-",
"0.2857",
"*",
"dens_prot",
"wd_den",
"=",
"-",
"0.6803",
"*",
"dens_water",
"perc_lipid",
"=",
"(",
"(",
"100",
"*",
"dens_gcm3",
")",
"+",
"ad_num",
"+",
"pd_num",
"+",
"wd_num",
")",
"/",
"(",
"dens_lipid",
"+",
"ad_den",
"+",
"pd_den",
"+",
"wd_den",
")",
"return",
"perc_lipid"
] | Get percent composition of animal from body density
The equation calculating animal density is from Biuw et al. (2003), and
default values for component densities are from human studies collected in
the book by Moore et al. (1963).
Args
----
dens_gcm3: float or ndarray
An array of seal densities (g/cm^3). The calculations only yield valid
percents with densities between 0.888-1.123 with other parameters left
as defaults.
dens_lipid: float
Density of lipid content in the animal (g/cm^3)
dens_prot: float
Density of protein content in the animal (g/cm^3)
dens_water: float
Density of water content in the animal (g/cm^3)
dens_ash: float
Density of ash content in the animal (g/cm^3)
Returns
-------
perc_all: pandas.DataFrame
Dataframe of components of body composition
References
----------
Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of
free-ranging seals using simple dive characteristics. Journal of
Experimental Biology 206, 3405β3423. doi:10.1242/jeb.00583
Moore FD, Oleson KH, McMurrery JD, Parker HV, Ball MR, Boyden CM. The Body
Cell Mass and Its Supporting Environment - The Composition in Health and
Disease. Philadelphia: W.B. Saunders Company; 1963. 535 p.
ISBN:0-7216-6480-6 | [
"Get",
"percent",
"composition",
"of",
"animal",
"from",
"body",
"density"
] | 816563a9c3feb3fa416f1c2921c6b75db34111ad | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/physio_seal.py#L257-L315 |
250,793 | ryanjdillon/pyotelem | pyotelem/physio_seal.py | diff_speed | def diff_speed(sw_dens=1.028, dens_gcm3=1.053, seal_length=300, seal_girth=200,
Cd=0.09):
'''Calculate terminal velocity of animal with a body size
Args
----
sw_dens: float
Density of seawater (g/cm^3)
dens_gcm3: float
Density of animal (g/cm^3)
seal_length: float
Length of animal (cm)
seal_girth: float
Girth of animal (cm)
Cd: float
Drag coefficient of object in fluid, unitless
Returns
-------
Vt: float
Terminal velocity of animal with given body dimensions (m/s).
References
----------
Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of
free-ranging seals using simple dive characteristics. Journal of
Experimental Biology 206, 3405β3423. doi:10.1242/jeb.00583
Vogel, S., 1994. Life in Moving Fluids: The Physical Biology of Flow.
Princeton University Press.
'''
import numpy
surf, vol = surf_vol(seal_length, seal_girth)
Fb = buoyant_force(dens_gcm3, vol, sw_dens)
x = 2 * (Fb/(Cd * sw_dens * (surf*1000)))
if x >= 0:
Vt = numpy.sqrt(x)
else:
Vt = -numpy.sqrt(-x)
return Vt | python | def diff_speed(sw_dens=1.028, dens_gcm3=1.053, seal_length=300, seal_girth=200,
Cd=0.09):
'''Calculate terminal velocity of animal with a body size
Args
----
sw_dens: float
Density of seawater (g/cm^3)
dens_gcm3: float
Density of animal (g/cm^3)
seal_length: float
Length of animal (cm)
seal_girth: float
Girth of animal (cm)
Cd: float
Drag coefficient of object in fluid, unitless
Returns
-------
Vt: float
Terminal velocity of animal with given body dimensions (m/s).
References
----------
Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of
free-ranging seals using simple dive characteristics. Journal of
Experimental Biology 206, 3405β3423. doi:10.1242/jeb.00583
Vogel, S., 1994. Life in Moving Fluids: The Physical Biology of Flow.
Princeton University Press.
'''
import numpy
surf, vol = surf_vol(seal_length, seal_girth)
Fb = buoyant_force(dens_gcm3, vol, sw_dens)
x = 2 * (Fb/(Cd * sw_dens * (surf*1000)))
if x >= 0:
Vt = numpy.sqrt(x)
else:
Vt = -numpy.sqrt(-x)
return Vt | [
"def",
"diff_speed",
"(",
"sw_dens",
"=",
"1.028",
",",
"dens_gcm3",
"=",
"1.053",
",",
"seal_length",
"=",
"300",
",",
"seal_girth",
"=",
"200",
",",
"Cd",
"=",
"0.09",
")",
":",
"import",
"numpy",
"surf",
",",
"vol",
"=",
"surf_vol",
"(",
"seal_length",
",",
"seal_girth",
")",
"Fb",
"=",
"buoyant_force",
"(",
"dens_gcm3",
",",
"vol",
",",
"sw_dens",
")",
"x",
"=",
"2",
"*",
"(",
"Fb",
"/",
"(",
"Cd",
"*",
"sw_dens",
"*",
"(",
"surf",
"*",
"1000",
")",
")",
")",
"if",
"x",
">=",
"0",
":",
"Vt",
"=",
"numpy",
".",
"sqrt",
"(",
"x",
")",
"else",
":",
"Vt",
"=",
"-",
"numpy",
".",
"sqrt",
"(",
"-",
"x",
")",
"return",
"Vt"
] | Calculate terminal velocity of animal with a body size
Args
----
sw_dens: float
Density of seawater (g/cm^3)
dens_gcm3: float
Density of animal (g/cm^3)
seal_length: float
Length of animal (cm)
seal_girth: float
Girth of animal (cm)
Cd: float
Drag coefficient of object in fluid, unitless
Returns
-------
Vt: float
Terminal velocity of animal with given body dimensions (m/s).
References
----------
Biuw, M., 2003. Blubber and buoyancy: monitoring the body condition of
free-ranging seals using simple dive characteristics. Journal of
Experimental Biology 206, 3405β3423. doi:10.1242/jeb.00583
Vogel, S., 1994. Life in Moving Fluids: The Physical Biology of Flow.
Princeton University Press. | [
"Calculate",
"terminal",
"velocity",
"of",
"animal",
"with",
"a",
"body",
"size"
] | 816563a9c3feb3fa416f1c2921c6b75db34111ad | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/physio_seal.py#L344-L388 |
250,794 | ryanjdillon/pyotelem | pyotelem/physio_seal.py | surf_vol | def surf_vol(length, girth):
'''Calculate the surface volume of an animal from its length and girth
Args
----
length: float or ndarray
Length of animal (m)
girth: float or ndarray
Girth of animal (m)
Returns
-------
surf:
Surface area of animal (m^2)
vol: float or ndarray
Volume of animal (m^3)
'''
import numpy
a_r = 0.01 * girth / (2 * numpy.pi)
stl_l = 0.01 * length
c_r = stl_l / 2
e = numpy.sqrt(1-(a_r**2/c_r**2))
surf = ((2*numpy.pi * a_r**2) + \
(2*numpy.pi * ((a_r * c_r)/e)) * 1/(numpy.sin(e)))
vol = (((4/3) * numpy.pi)*(a_r**2) * c_r)
return surf, vol | python | def surf_vol(length, girth):
'''Calculate the surface volume of an animal from its length and girth
Args
----
length: float or ndarray
Length of animal (m)
girth: float or ndarray
Girth of animal (m)
Returns
-------
surf:
Surface area of animal (m^2)
vol: float or ndarray
Volume of animal (m^3)
'''
import numpy
a_r = 0.01 * girth / (2 * numpy.pi)
stl_l = 0.01 * length
c_r = stl_l / 2
e = numpy.sqrt(1-(a_r**2/c_r**2))
surf = ((2*numpy.pi * a_r**2) + \
(2*numpy.pi * ((a_r * c_r)/e)) * 1/(numpy.sin(e)))
vol = (((4/3) * numpy.pi)*(a_r**2) * c_r)
return surf, vol | [
"def",
"surf_vol",
"(",
"length",
",",
"girth",
")",
":",
"import",
"numpy",
"a_r",
"=",
"0.01",
"*",
"girth",
"/",
"(",
"2",
"*",
"numpy",
".",
"pi",
")",
"stl_l",
"=",
"0.01",
"*",
"length",
"c_r",
"=",
"stl_l",
"/",
"2",
"e",
"=",
"numpy",
".",
"sqrt",
"(",
"1",
"-",
"(",
"a_r",
"**",
"2",
"/",
"c_r",
"**",
"2",
")",
")",
"surf",
"=",
"(",
"(",
"2",
"*",
"numpy",
".",
"pi",
"*",
"a_r",
"**",
"2",
")",
"+",
"(",
"2",
"*",
"numpy",
".",
"pi",
"*",
"(",
"(",
"a_r",
"*",
"c_r",
")",
"/",
"e",
")",
")",
"*",
"1",
"/",
"(",
"numpy",
".",
"sin",
"(",
"e",
")",
")",
")",
"vol",
"=",
"(",
"(",
"(",
"4",
"/",
"3",
")",
"*",
"numpy",
".",
"pi",
")",
"*",
"(",
"a_r",
"**",
"2",
")",
"*",
"c_r",
")",
"return",
"surf",
",",
"vol"
] | Calculate the surface volume of an animal from its length and girth
Args
----
length: float or ndarray
Length of animal (m)
girth: float or ndarray
Girth of animal (m)
Returns
-------
surf:
Surface area of animal (m^2)
vol: float or ndarray
Volume of animal (m^3) | [
"Calculate",
"the",
"surface",
"volume",
"of",
"an",
"animal",
"from",
"its",
"length",
"and",
"girth"
] | 816563a9c3feb3fa416f1c2921c6b75db34111ad | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/physio_seal.py#L411-L440 |
250,795 | ryanjdillon/pyotelem | pyotelem/physio_seal.py | calc_seal_volume | def calc_seal_volume(mass_kg, dens_kgm3, length=None, girth=None):
'''Calculate an animal's volume from mass and density or length and girth
Args
----
mass_kg: float or ndarray
Mass of animal (kg)
dens_kgm3: float or ndarray
Density of animal (kg/m^3)
length: float or None
Length of animal. Default `None` (m)
girth: float or None
Girth of animal. Default `None` (m)
Returns
-------
vol_kgm3: float or ndarray
Volume of animal (m^3)
'''
if (length is not None) and (girth is not None):
_, seal_vol = surf_vol(length, girth)
else:
seal_vol = mass_kg / dens_kgm3
return seal_vol | python | def calc_seal_volume(mass_kg, dens_kgm3, length=None, girth=None):
'''Calculate an animal's volume from mass and density or length and girth
Args
----
mass_kg: float or ndarray
Mass of animal (kg)
dens_kgm3: float or ndarray
Density of animal (kg/m^3)
length: float or None
Length of animal. Default `None` (m)
girth: float or None
Girth of animal. Default `None` (m)
Returns
-------
vol_kgm3: float or ndarray
Volume of animal (m^3)
'''
if (length is not None) and (girth is not None):
_, seal_vol = surf_vol(length, girth)
else:
seal_vol = mass_kg / dens_kgm3
return seal_vol | [
"def",
"calc_seal_volume",
"(",
"mass_kg",
",",
"dens_kgm3",
",",
"length",
"=",
"None",
",",
"girth",
"=",
"None",
")",
":",
"if",
"(",
"length",
"is",
"not",
"None",
")",
"and",
"(",
"girth",
"is",
"not",
"None",
")",
":",
"_",
",",
"seal_vol",
"=",
"surf_vol",
"(",
"length",
",",
"girth",
")",
"else",
":",
"seal_vol",
"=",
"mass_kg",
"/",
"dens_kgm3",
"return",
"seal_vol"
] | Calculate an animal's volume from mass and density or length and girth
Args
----
mass_kg: float or ndarray
Mass of animal (kg)
dens_kgm3: float or ndarray
Density of animal (kg/m^3)
length: float or None
Length of animal. Default `None` (m)
girth: float or None
Girth of animal. Default `None` (m)
Returns
-------
vol_kgm3: float or ndarray
Volume of animal (m^3) | [
"Calculate",
"an",
"animal",
"s",
"volume",
"from",
"mass",
"and",
"density",
"or",
"length",
"and",
"girth"
] | 816563a9c3feb3fa416f1c2921c6b75db34111ad | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/physio_seal.py#L443-L467 |
250,796 | pschmitt/zhue | zhue/model/bridge.py | Bridge.__find_new | def __find_new(self, hueobjecttype):
'''
Starts a search for new Hue objects
'''
assert hueobjecttype in ['lights', 'sensors'], \
'Unsupported object type {}'.format(hueobjecttype)
url = '{}/{}'.format(self.API, hueobjecttype)
return self._request(
method='POST',
url=url
) | python | def __find_new(self, hueobjecttype):
'''
Starts a search for new Hue objects
'''
assert hueobjecttype in ['lights', 'sensors'], \
'Unsupported object type {}'.format(hueobjecttype)
url = '{}/{}'.format(self.API, hueobjecttype)
return self._request(
method='POST',
url=url
) | [
"def",
"__find_new",
"(",
"self",
",",
"hueobjecttype",
")",
":",
"assert",
"hueobjecttype",
"in",
"[",
"'lights'",
",",
"'sensors'",
"]",
",",
"'Unsupported object type {}'",
".",
"format",
"(",
"hueobjecttype",
")",
"url",
"=",
"'{}/{}'",
".",
"format",
"(",
"self",
".",
"API",
",",
"hueobjecttype",
")",
"return",
"self",
".",
"_request",
"(",
"method",
"=",
"'POST'",
",",
"url",
"=",
"url",
")"
] | Starts a search for new Hue objects | [
"Starts",
"a",
"search",
"for",
"new",
"Hue",
"objects"
] | 4a3f4ddf12ceeedcb2157f92d93ff1c6438a7d59 | https://github.com/pschmitt/zhue/blob/4a3f4ddf12ceeedcb2157f92d93ff1c6438a7d59/zhue/model/bridge.py#L311-L321 |
250,797 | pschmitt/zhue | zhue/model/bridge.py | Bridge.__get_new | def __get_new(self, hueobjecttype):
'''
Get a list of newly found Hue object
'''
assert hueobjecttype in ['lights', 'sensors'], \
'Unsupported object type {}'.format(hueobjecttype)
url = '{}/{}/new'.format(self.API, hueobjecttype)
return self._request(url=url) | python | def __get_new(self, hueobjecttype):
'''
Get a list of newly found Hue object
'''
assert hueobjecttype in ['lights', 'sensors'], \
'Unsupported object type {}'.format(hueobjecttype)
url = '{}/{}/new'.format(self.API, hueobjecttype)
return self._request(url=url) | [
"def",
"__get_new",
"(",
"self",
",",
"hueobjecttype",
")",
":",
"assert",
"hueobjecttype",
"in",
"[",
"'lights'",
",",
"'sensors'",
"]",
",",
"'Unsupported object type {}'",
".",
"format",
"(",
"hueobjecttype",
")",
"url",
"=",
"'{}/{}/new'",
".",
"format",
"(",
"self",
".",
"API",
",",
"hueobjecttype",
")",
"return",
"self",
".",
"_request",
"(",
"url",
"=",
"url",
")"
] | Get a list of newly found Hue object | [
"Get",
"a",
"list",
"of",
"newly",
"found",
"Hue",
"object"
] | 4a3f4ddf12ceeedcb2157f92d93ff1c6438a7d59 | https://github.com/pschmitt/zhue/blob/4a3f4ddf12ceeedcb2157f92d93ff1c6438a7d59/zhue/model/bridge.py#L323-L330 |
250,798 | bogdan-kulynych/defaultcontext | defaultcontext/stack.py | DefaultStack.get_context_manager | def get_context_manager(self, default):
"""A context manager for manipulating a default stack."""
try:
self.stack.append(default)
yield default
finally:
if self.enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects"
% type(default))
self.stack.pop()
else:
self.stack.remove(default) | python | def get_context_manager(self, default):
"""A context manager for manipulating a default stack."""
try:
self.stack.append(default)
yield default
finally:
if self.enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects"
% type(default))
self.stack.pop()
else:
self.stack.remove(default) | [
"def",
"get_context_manager",
"(",
"self",
",",
"default",
")",
":",
"try",
":",
"self",
".",
"stack",
".",
"append",
"(",
"default",
")",
"yield",
"default",
"finally",
":",
"if",
"self",
".",
"enforce_nesting",
":",
"if",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
"is",
"not",
"default",
":",
"raise",
"AssertionError",
"(",
"\"Nesting violated for default stack of %s objects\"",
"%",
"type",
"(",
"default",
")",
")",
"self",
".",
"stack",
".",
"pop",
"(",
")",
"else",
":",
"self",
".",
"stack",
".",
"remove",
"(",
"default",
")"
] | A context manager for manipulating a default stack. | [
"A",
"context",
"manager",
"for",
"manipulating",
"a",
"default",
"stack",
"."
] | ec9bb96552dfb3d42a1103da1772b024414dd801 | https://github.com/bogdan-kulynych/defaultcontext/blob/ec9bb96552dfb3d42a1103da1772b024414dd801/defaultcontext/stack.py#L229-L242 |
250,799 | rackerlabs/rackspace-python-neutronclient | neutronclient/neutron/v2_0/vpn/ipsec_site_connection.py | IPsecSiteConnectionMixin.args2body | def args2body(self, parsed_args, body=None):
"""Add in conditional args and then return all conn info."""
if body is None:
body = {}
if parsed_args.dpd:
vpn_utils.validate_dpd_dict(parsed_args.dpd)
body['dpd'] = parsed_args.dpd
if parsed_args.local_ep_group:
_local_epg = neutronv20.find_resourceid_by_name_or_id(
self.get_client(), 'endpoint_group',
parsed_args.local_ep_group)
body['local_ep_group_id'] = _local_epg
if parsed_args.peer_ep_group:
_peer_epg = neutronv20.find_resourceid_by_name_or_id(
self.get_client(), 'endpoint_group',
parsed_args.peer_ep_group)
body['peer_ep_group_id'] = _peer_epg
return {self.resource: body} | python | def args2body(self, parsed_args, body=None):
"""Add in conditional args and then return all conn info."""
if body is None:
body = {}
if parsed_args.dpd:
vpn_utils.validate_dpd_dict(parsed_args.dpd)
body['dpd'] = parsed_args.dpd
if parsed_args.local_ep_group:
_local_epg = neutronv20.find_resourceid_by_name_or_id(
self.get_client(), 'endpoint_group',
parsed_args.local_ep_group)
body['local_ep_group_id'] = _local_epg
if parsed_args.peer_ep_group:
_peer_epg = neutronv20.find_resourceid_by_name_or_id(
self.get_client(), 'endpoint_group',
parsed_args.peer_ep_group)
body['peer_ep_group_id'] = _peer_epg
return {self.resource: body} | [
"def",
"args2body",
"(",
"self",
",",
"parsed_args",
",",
"body",
"=",
"None",
")",
":",
"if",
"body",
"is",
"None",
":",
"body",
"=",
"{",
"}",
"if",
"parsed_args",
".",
"dpd",
":",
"vpn_utils",
".",
"validate_dpd_dict",
"(",
"parsed_args",
".",
"dpd",
")",
"body",
"[",
"'dpd'",
"]",
"=",
"parsed_args",
".",
"dpd",
"if",
"parsed_args",
".",
"local_ep_group",
":",
"_local_epg",
"=",
"neutronv20",
".",
"find_resourceid_by_name_or_id",
"(",
"self",
".",
"get_client",
"(",
")",
",",
"'endpoint_group'",
",",
"parsed_args",
".",
"local_ep_group",
")",
"body",
"[",
"'local_ep_group_id'",
"]",
"=",
"_local_epg",
"if",
"parsed_args",
".",
"peer_ep_group",
":",
"_peer_epg",
"=",
"neutronv20",
".",
"find_resourceid_by_name_or_id",
"(",
"self",
".",
"get_client",
"(",
")",
",",
"'endpoint_group'",
",",
"parsed_args",
".",
"peer_ep_group",
")",
"body",
"[",
"'peer_ep_group_id'",
"]",
"=",
"_peer_epg",
"return",
"{",
"self",
".",
"resource",
":",
"body",
"}"
] | Add in conditional args and then return all conn info. | [
"Add",
"in",
"conditional",
"args",
"and",
"then",
"return",
"all",
"conn",
"info",
"."
] | 5a5009a8fe078e3aa1d582176669f1b28ab26bef | https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/neutron/v2_0/vpn/ipsec_site_connection.py#L70-L88 |
Subsets and Splits